gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""Consts for the OpenWeatherMap."""
from homeassistant.components.weather import (
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_EXCEPTIONAL,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
ATTR_CONDITION_WINDY_VARIANT,
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
)
from homeassistant.const import (
DEGREE,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
LENGTH_MILLIMETERS,
PERCENTAGE,
PRESSURE_HPA,
SPEED_METERS_PER_SECOND,
TEMP_CELSIUS,
)
DOMAIN = "openweathermap"
DEFAULT_NAME = "OpenWeatherMap"
DEFAULT_LANGUAGE = "en"
ATTRIBUTION = "Data provided by OpenWeatherMap"
CONF_LANGUAGE = "language"
CONFIG_FLOW_VERSION = 2
ENTRY_NAME = "name"
ENTRY_WEATHER_COORDINATOR = "weather_coordinator"
ATTR_API_PRECIPITATION = "precipitation"
ATTR_API_DATETIME = "datetime"
ATTR_API_WEATHER = "weather"
ATTR_API_TEMPERATURE = "temperature"
ATTR_API_WIND_SPEED = "wind_speed"
ATTR_API_WIND_BEARING = "wind_bearing"
ATTR_API_HUMIDITY = "humidity"
ATTR_API_PRESSURE = "pressure"
ATTR_API_CONDITION = "condition"
ATTR_API_CLOUDS = "clouds"
ATTR_API_RAIN = "rain"
ATTR_API_SNOW = "snow"
ATTR_API_WEATHER_CODE = "weather_code"
ATTR_API_FORECAST = "forecast"
SENSOR_NAME = "sensor_name"
SENSOR_UNIT = "sensor_unit"
SENSOR_DEVICE_CLASS = "sensor_device_class"
UPDATE_LISTENER = "update_listener"
COMPONENTS = ["sensor", "weather"]
FORECAST_MODE_HOURLY = "hourly"
FORECAST_MODE_DAILY = "daily"
FORECAST_MODE_FREE_DAILY = "freedaily"
FORECAST_MODE_ONECALL_HOURLY = "onecall_hourly"
FORECAST_MODE_ONECALL_DAILY = "onecall_daily"
FORECAST_MODES = [
FORECAST_MODE_HOURLY,
FORECAST_MODE_DAILY,
FORECAST_MODE_ONECALL_HOURLY,
FORECAST_MODE_ONECALL_DAILY,
]
DEFAULT_FORECAST_MODE = FORECAST_MODE_ONECALL_DAILY
MONITORED_CONDITIONS = [
ATTR_API_WEATHER,
ATTR_API_TEMPERATURE,
ATTR_API_WIND_SPEED,
ATTR_API_WIND_BEARING,
ATTR_API_HUMIDITY,
ATTR_API_PRESSURE,
ATTR_API_CLOUDS,
ATTR_API_RAIN,
ATTR_API_SNOW,
ATTR_API_CONDITION,
ATTR_API_WEATHER_CODE,
]
FORECAST_MONITORED_CONDITIONS = [
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
]
LANGUAGES = [
"af",
"al",
"ar",
"az",
"bg",
"ca",
"cz",
"da",
"de",
"el",
"en",
"es",
"eu",
"fa",
"fi",
"fr",
"gl",
"he",
"hi",
"hr",
"hu",
"id",
"it",
"ja",
"kr",
"la",
"lt",
"mk",
"nl",
"no",
"pl",
"pt",
"pt_br",
"ro",
"ru",
"se",
"sk",
"sl",
"sp",
"sr",
"sv",
"th",
"tr",
"ua",
"uk",
"vi",
"zh_cn",
"zh_tw",
"zu",
]
WEATHER_CODE_SUNNY_OR_CLEAR_NIGHT = 800
CONDITION_CLASSES = {
ATTR_CONDITION_CLOUDY: [803, 804],
ATTR_CONDITION_FOG: [701, 741],
ATTR_CONDITION_HAIL: [906],
ATTR_CONDITION_LIGHTNING: [210, 211, 212, 221],
ATTR_CONDITION_LIGHTNING_RAINY: [200, 201, 202, 230, 231, 232],
ATTR_CONDITION_PARTLYCLOUDY: [801, 802],
ATTR_CONDITION_POURING: [504, 314, 502, 503, 522],
ATTR_CONDITION_RAINY: [300, 301, 302, 310, 311, 312, 313, 500, 501, 520, 521],
ATTR_CONDITION_SNOWY: [600, 601, 602, 611, 612, 620, 621, 622],
ATTR_CONDITION_SNOWY_RAINY: [511, 615, 616],
ATTR_CONDITION_SUNNY: [WEATHER_CODE_SUNNY_OR_CLEAR_NIGHT],
ATTR_CONDITION_WINDY: [905, 951, 952, 953, 954, 955, 956, 957],
ATTR_CONDITION_WINDY_VARIANT: [958, 959, 960, 961],
ATTR_CONDITION_EXCEPTIONAL: [
711,
721,
731,
751,
761,
762,
771,
900,
901,
962,
903,
904,
],
}
WEATHER_SENSOR_TYPES = {
ATTR_API_WEATHER: {SENSOR_NAME: "Weather"},
ATTR_API_TEMPERATURE: {
SENSOR_NAME: "Temperature",
SENSOR_UNIT: TEMP_CELSIUS,
SENSOR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
ATTR_API_WIND_SPEED: {
SENSOR_NAME: "Wind speed",
SENSOR_UNIT: SPEED_METERS_PER_SECOND,
},
ATTR_API_WIND_BEARING: {SENSOR_NAME: "Wind bearing", SENSOR_UNIT: DEGREE},
ATTR_API_HUMIDITY: {
SENSOR_NAME: "Humidity",
SENSOR_UNIT: PERCENTAGE,
SENSOR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
},
ATTR_API_PRESSURE: {
SENSOR_NAME: "Pressure",
SENSOR_UNIT: PRESSURE_HPA,
SENSOR_DEVICE_CLASS: DEVICE_CLASS_PRESSURE,
},
ATTR_API_CLOUDS: {SENSOR_NAME: "Cloud coverage", SENSOR_UNIT: PERCENTAGE},
ATTR_API_RAIN: {SENSOR_NAME: "Rain", SENSOR_UNIT: LENGTH_MILLIMETERS},
ATTR_API_SNOW: {SENSOR_NAME: "Snow", SENSOR_UNIT: LENGTH_MILLIMETERS},
ATTR_API_CONDITION: {SENSOR_NAME: "Condition"},
ATTR_API_WEATHER_CODE: {SENSOR_NAME: "Weather Code"},
}
FORECAST_SENSOR_TYPES = {
ATTR_FORECAST_CONDITION: {SENSOR_NAME: "Condition"},
ATTR_FORECAST_PRECIPITATION: {SENSOR_NAME: "Precipitation"},
ATTR_FORECAST_TEMP: {
SENSOR_NAME: "Temperature",
SENSOR_UNIT: TEMP_CELSIUS,
SENSOR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
ATTR_FORECAST_TEMP_LOW: {
SENSOR_NAME: "Temperature Low",
SENSOR_UNIT: TEMP_CELSIUS,
SENSOR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
ATTR_FORECAST_TIME: {
SENSOR_NAME: "Time",
SENSOR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
},
ATTR_API_WIND_BEARING: {SENSOR_NAME: "Wind bearing", SENSOR_UNIT: DEGREE},
ATTR_API_WIND_SPEED: {
SENSOR_NAME: "Wind speed",
SENSOR_UNIT: SPEED_METERS_PER_SECOND,
},
}
| |
# License: BSD 3 clause
"""
Tests related to ablation experiments.
:author: Michael Heilman (mheilman@ets.org)
:author: Nitin Madnani (nmadnani@ets.org)
:author: Dan Blanchard (dblanchard@ets.org)
:author: Aoife Cahill (acahill@ets.org)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import csv
import glob
import json
import os
from collections import OrderedDict
from io import open
from os.path import abspath, dirname, exists, join
import numpy as np
from nose.tools import eq_
from skll.data import FeatureSet, NDJWriter
from skll.experiments import run_configuration
from skll.learner import _DEFAULT_PARAM_GRIDS
from utils import fill_in_config_paths
_ALL_MODELS = list(_DEFAULT_PARAM_GRIDS.keys())
_my_dir = abspath(dirname(__file__))
def setup():
"""
Create necessary directories for testing.
"""
train_dir = join(_my_dir, 'train')
if not exists(train_dir):
os.makedirs(train_dir)
test_dir = join(_my_dir, 'test')
if not exists(test_dir):
os.makedirs(test_dir)
output_dir = join(_my_dir, 'output')
if not exists(output_dir):
os.makedirs(output_dir)
def tearDown():
"""
Clean up after tests.
"""
output_dir = join(_my_dir, 'output')
config_dir = join(_my_dir, 'configs')
for output_file in glob.glob(join(output_dir, 'ablation_cv_*')):
os.unlink(output_file)
config_files = ['test_ablation.cfg',
'test_ablation_feature_hasher.cfg',
'test_ablation_sampler.cfg',
'test_ablation_feature_hasher_sampler.cfg']
for cf in config_files:
if exists(join(config_dir, cf)):
os.unlink(join(config_dir, cf))
def make_ablation_data():
# Remove old CV data
for old_file in glob.glob(join(_my_dir, 'output',
'ablation_cv_*.results')):
os.remove(old_file)
num_examples = 1000
np.random.seed(1234567890)
# Create lists we will write files from
ids = []
features = []
labels = []
for j in range(num_examples):
y = "dog" if j % 2 == 0 else "cat"
ex_id = "{}{}".format(y, j)
x = {"f{}".format(feat_num): np.random.randint(0, 4) for feat_num in
range(5)}
x = OrderedDict(sorted(x.items(), key=lambda t: t[0]))
ids.append(ex_id)
labels.append(y)
features.append(x)
for i in range(5):
train_path = join(_my_dir, 'train', 'f{}.jsonlines'.format(i))
sub_features = []
for example_num in range(num_examples):
feat_num = i
x = {"f{}".format(feat_num):
features[example_num]["f{}".format(feat_num)]}
sub_features.append(x)
train_fs = FeatureSet('ablation_cv', ids, features=sub_features,
labels=labels)
writer = NDJWriter(train_path, train_fs)
writer.write()
def check_ablation_rows(reader):
"""
Helper function to ensure that all ablated_features and featureset values
are correct for each row in results summary file.
:returns: Number of items in reader
"""
row_num = 0
for row_num, row in enumerate(reader, 1):
if row['ablated_features']:
fs_str, ablated_str = row['featureset_name'].split('_minus_')
actual_ablated = json.loads(row['ablated_features'])
else:
fs_str, ablated_str = row['featureset_name'].split('_all')
actual_ablated = []
expected_fs = set(fs_str.split('+'))
expected_ablated = ablated_str.split('+') if ablated_str else []
expected_fs = sorted(expected_fs - set(expected_ablated))
actual_fs = json.loads(row['featureset'])
eq_(expected_ablated, actual_ablated)
eq_(expected_fs, actual_fs)
return row_num
def test_ablation_cv():
"""
Test if ablation works with cross-validate
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs',
'test_ablation.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=1)
# read in the summary file and make sure it has
# 7 ablated featuresets * (10 folds + 1 average line) * 2 learners = 154
# lines
with open(join(_my_dir, 'output', 'ablation_cv_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 154)
# make sure there are 6 ablated featuresets * 2 learners = 12 results files
num_result_files = len(glob.glob(join(_my_dir, 'output',
'ablation_cv_*.results')))
eq_(num_result_files, 14)
def test_ablation_cv_all_combos():
"""
Test to validate whether ablation all-combos works with cross-validate
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs',
'test_ablation.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=None)
# read in the summary file and make sure it has
# 10 ablated featuresets * (10 folds + 1 average line) * 2 learners = 220
# lines
with open(join(_my_dir, 'output', 'ablation_cv_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 220)
# make sure there are 10 ablated featuresets * 2 learners = 20 results
# files
num_result_files = len(glob.glob(join(_my_dir, 'output',
'ablation_cv_*results')))
eq_(num_result_files, 20)
def test_ablation_cv_feature_hasher():
"""
Test if ablation works with cross-validate and feature_hasher
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs',
'test_ablation_feature_hasher.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=1)
# read in the summary file and make sure it has
# 7 ablated featuresets * (10 folds + 1 average line) * 2 learners = 154
# lines
with open(join(_my_dir, 'output',
'ablation_cv_feature_hasher_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 154)
# make sure there are 6 ablated featuresets * 2 learners = 12 results files
num_result_files = len(glob.glob(join(_my_dir, 'output',
('ablation_cv_feature_hasher_'
'*.results'))))
eq_(num_result_files, 14)
def test_ablation_cv_feature_hasher_all_combos():
"""
Test if ablation all-combos works with cross-validate and feature_hasher
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs',
'test_ablation_feature_hasher.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=None)
# read in the summary file and make sure it has
# 10 ablated featuresets * (10 folds + 1 average line) * 2 learners = 220
# lines
with open(join(_my_dir, 'output',
'ablation_cv_feature_hasher_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 220)
# make sure there are 10 ablated featuresets * 2 learners = 20 results
# files
num_result_files = len(glob.glob(join(_my_dir, 'output',
('ablation_cv_feature_hasher_'
'*results'))))
eq_(num_result_files, 20)
def test_ablation_cv_sampler():
"""
Test to validate whether ablation works with cross-validate and samplers
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs',
'test_ablation_sampler.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=1)
# read in the summary file and make sure it has
# 7 ablated featuresets * (10 folds + 1 average line) * 2 learners = 154
# lines
with open(join(_my_dir, 'output', 'ablation_cv_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 154)
# make sure there are 6 ablated featuresets * 2 learners = 12 results files
num_result_files = len(glob.glob(join(_my_dir, 'output',
'ablation_cv_*.results')))
eq_(num_result_files, 14)
def test_ablation_cv_all_combos_sampler():
"""
Test to validate whether ablation works with cross-validate
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs',
'test_ablation_sampler.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=None)
# read in the summary file and make sure it has
# 10 ablated featuresets * (10 folds + 1 average line) * 2 learners = 220
# lines
with open(join(_my_dir, 'output', 'ablation_cv_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 220)
# make sure there are 10 ablated featuresets * 2 learners = 20 results
# files
num_result_files = len(glob.glob(join(_my_dir, 'output',
'ablation_cv_*results')))
eq_(num_result_files, 20)
def test_ablation_cv_feature_hasher_sampler():
"""
Test to validate whether ablation works with cross-validate
and feature_hasher
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs', ('test_ablation_feature_'
'hasher_sampler.template'
'.cfg'))
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=1)
# read in the summary file and make sure it has
# 7 ablated featuresets * (10 folds + 1 average line) * 2 learners = 154
# lines
with open(join(_my_dir, 'output',
'ablation_cv_feature_hasher_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 154)
# make sure there are 6 ablated featuresets * 2 learners = 12 results files
num_result_files = len(glob.glob(join(_my_dir, 'output',
('ablation_cv_feature_hasher_'
'*.results'))))
eq_(num_result_files, 14)
def test_ablation_cv_feature_hasher_all_combos_sampler():
"""
Test to validate whether ablation works with cross-validate
and feature_hasher
"""
make_ablation_data()
config_template_path = join(_my_dir, 'configs', ('test_ablation_feature_'
'hasher_sampler.template'
'.cfg'))
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True, ablation=None)
# read in the summary file and make sure it has
# 10 ablated featuresets * (10 folds + 1 average line) * 2 learners = 220
# lines
with open(join(_my_dir, 'output',
'ablation_cv_feature_hasher_summary.tsv')) as f:
reader = csv.DictReader(f, dialect=csv.excel_tab)
num_rows = check_ablation_rows(reader)
eq_(num_rows, 220)
# make sure there are 10 ablated featuresets * 2 learners = 20 results
# files
num_result_files = len(glob.glob(join(_my_dir, 'output',
('ablation_cv_feature_hasher_'
'*results'))))
eq_(num_result_files, 20)
| |
import numpy as np
from pytz import timezone, country_timezones
from simulator.customer_abstract import AbstractCustomer
class BaseCustomer(AbstractCustomer):
def __init__(self, transaction_model, fraudster):
"""
Base class for customers/fraudsters that support uni-modal authentication.
:param transaction_model:
:param fraudster:
"""
unique_id = transaction_model.get_next_customer_id(fraudster)
super().__init__(unique_id, transaction_model, fraudster)
# initialise probability of making a transaction per month/hour/...
self.noise_level = self.params['noise_level']
# average number of transaction per hour in general; varies per customer
self.avg_trans_per_hour = self.initialise_avg_trans_per_hour()
# initialise transaction probabilities per month/monthday/weekday/hour
self.trans_prob_month, self.trans_prob_monthday, self.trans_prob_weekday, self.trans_prob_hour = self.initialise_transaction_probabilities()
# whether the current transaction was cancelled by the customer
self.curr_trans_cancelled = False
def decide_making_transaction(self):
# reset that the current transaction was not cancelled
self.curr_trans_cancelled = False
if self.stay:
make_transaction = self.get_transaction_prob() > self.random_state.uniform(0, 1)
else:
make_transaction = False
return make_transaction
def post_process_transaction(self):
# decide whether to stay
self.stay = self.stay_after_transaction()
def get_transaction_prob(self):
# get the current local time
self.local_datetime = self.get_local_datetime()
# get the average transactions per hour
trans_prob = self.avg_trans_per_hour
# now weigh by probabilities of transactions per month/week/...
trans_prob *= 12 * self.trans_prob_month[self.local_datetime.month - 1]
trans_prob *= 24 * self.trans_prob_hour[self.local_datetime.hour]
trans_prob *= 30.5 * self.trans_prob_monthday[self.local_datetime.day - 1]
trans_prob *= 7 * self.trans_prob_weekday[self.local_datetime.weekday()]
return trans_prob
def get_local_datetime(self):
# convert global to local date (first add global timezone info, then convert to local)
local_datetime = self.model.curr_global_date
local_datetime = local_datetime.astimezone(timezone(country_timezones(self.country)[0]))
return local_datetime
def get_curr_merchant(self):
"""
Can be called at each transaction; will select a merchant to buy from.
:return: merchant ID
"""
merchant_prob = self.params['merchant_per_currency'][self.fraudster]
merchant_prob = merchant_prob.loc[self.currency]
merchant_ID = self.random_state.choice(merchant_prob.index.values, p=merchant_prob.values.flatten())
return next(m for m in self.model.merchants if m.unique_id == merchant_ID)
def get_curr_amount(self):
return self.curr_merchant.get_amount(self)
def stay_after_transaction(self):
return self.get_staying_prob() > self.random_state.uniform(0, 1)
def get_staying_prob(self):
return self.params['stay_prob'][self.fraudster]
def initialise_country(self):
country_frac = self.params['country_frac']
return self.random_state.choice(country_frac.index.values, p=country_frac.iloc[:, self.fraudster].values)
def initialise_currency(self):
currency_prob = self.params['currency_per_country'][self.fraudster]
currency_prob = currency_prob.loc[self.country]
return self.random_state.choice(currency_prob.index.values, p=currency_prob.values.flatten())
def initialise_card_id(self):
return self.model.get_next_card_id()
def initialise_transaction_probabilities(self):
# transaction probability per month
trans_prob_month = self.params['frac_month'][:, self.fraudster]
trans_prob_month = self.random_state.multivariate_normal(trans_prob_month, np.eye(12) * self.noise_level / 1200)
trans_prob_month[trans_prob_month < 0] = 0
# transaction probability per day in month
trans_prob_monthday = self.params['frac_monthday'][:, self.fraudster]
trans_prob_monthday = self.random_state.multivariate_normal(trans_prob_monthday, np.eye(31) * self.noise_level / 305)
trans_prob_monthday[trans_prob_monthday < 0] = 0
# transaction probability per weekday (we assume this differs per individual)
trans_prob_weekday = self.params['frac_weekday'][:, self.fraudster]
trans_prob_weekday = self.random_state.multivariate_normal(trans_prob_weekday, np.eye(7) * self.noise_level / 70)
trans_prob_weekday[trans_prob_weekday < 0] = 0
# transaction probability per hour (we assume this differs per individual)
trans_prob_hour = self.params['frac_hour'][:, self.fraudster]
trans_prob_hour = self.random_state.multivariate_normal(trans_prob_hour, np.eye(24) * self.noise_level / 240)
trans_prob_hour[trans_prob_hour < 0] = 0
return trans_prob_month, trans_prob_monthday, trans_prob_weekday, trans_prob_hour
def initialise_avg_trans_per_hour(self):
trans_per_year = self.params['trans_per_year'][self.fraudster]
rand_addition = self.random_state.normal(0, self.noise_level * trans_per_year)
if trans_per_year + rand_addition > 0:
trans_per_year += rand_addition
avg_trans_per_hour = trans_per_year / 366. / 24.
avg_trans_per_hour *= self.params['transaction_motivation'][self.fraudster]
return avg_trans_per_hour
class GenuineCustomer(BaseCustomer):
def __init__(self, transaction_model, satisfaction=1):
super().__init__(transaction_model, fraudster=False)
# add field for whether the credit card was corrupted by a fraudster
self.card_corrupted = False
# field whether current transaction was authorised or not
self.curr_auth_step = 0
# initialise the customer's patience (optimistically)
self.patience = self.random_state.beta(10, 2)
# instantiate the customer's satisfaction
self.satisfaction = satisfaction
def stay_after_transaction(self):
stay_prob = self.satisfaction * self.params['stay_prob'][self.fraudster]
return (1-stay_prob) <= self.random_state.uniform(0, 1)
def card_got_corrupted(self):
self.card_corrupted = True
def get_transaction_prob(self):
return self.satisfaction * super().get_transaction_prob()
def decide_making_transaction(self):
"""
For a genuine customer, we add the option of leaving
when the customer's card was subject to fraud
:return:
"""
# reset authentication step count
self.curr_auth_step = 0
# if the card was corrupted, the user is more likely to leave
if self.card_corrupted:
if self.params['stay_after_fraud'] < self.random_state.uniform(0, 1):
self.stay = False
# can skip the entire super().decide_making_transaction() computation
return False
return super().decide_making_transaction()
def post_process_transaction(self):
self.update_satisfaction()
super().post_process_transaction()
def update_satisfaction(self):
"""
Adjust the satisfaction of the user after a transaction was made.
:return:
"""
# if the customer cancelled the transaction, the satisfaction goes down by 5%
if self.curr_trans_cancelled:
self.satisfaction *= 0.95
else:
# if no authentication was done, the satisfaction goes up by 0.01
if self.curr_auth_step == 0:
self.satisfaction *= 1.01
# otherwise, it goes down by 1%
else:
self.satisfaction *= 0.99
self.satisfaction = min([1, self.satisfaction])
self.satisfaction = max([0, self.satisfaction])
def give_authentication(self):
"""
Authenticate self; this can be called several times per transaction.
Returns the authentication quality.
:return:
"""
curr_patience = 0.8 * self.patience + 0.2 * self.curr_amount/self.curr_merchant.max_amount
if curr_patience > self.random_state.uniform(0, 1):
auth_quality = 1
else:
# cancel the transaction
self.curr_trans_cancelled = True
auth_quality = None
self.curr_auth_step += 1
return auth_quality
class FraudulentCustomer(BaseCustomer):
def __init__(self, transaction_model):
super().__init__(transaction_model, fraudster=True)
def initialise_card_id(self):
"""
Pick a card either by using a card from an existing user,
or a completely new one (i.e., from customers unnknown to the processing platform)
:return:
"""
if self.params['fraud_cards_in_genuine'] > self.random_state.uniform(0, 1):
# the fraudster picks a customer...
# ... (1) from a familiar country
fraudster_countries = self.params['country_frac'].index[self.params['country_frac']['fraud'] !=0].values
# ... (2) from a familiar currency
fraudster_currencies = self.params['currency_per_country'][1].index.get_level_values(1).unique()
# ... (3) that has already made a transaction
customers_with_active_cards = [c for c in self.model.customers if c.card_id is not None]
# now pick the fraud target (if there are no targets get own credit card)
try:
customer = self.random_state.choice([c for c in customers_with_active_cards if (c.country in fraudster_countries) and (c.currency in fraudster_currencies)])
# get the information from the target
card = customer.card_id
self.country = customer.country
self.currency = customer.currency
except ValueError:
card = super().initialise_card_id()
else:
card = super().initialise_card_id()
return card
def give_authentication(self):
"""
Authenticate self; this can be called several times per transaction.
Returns the authentication quality.
:return:
"""
# we assume that the fraudster cannot provide a second authentication
self.curr_trans_cancelled = True
return None
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteGatewaysOperations:
"""ExpressRouteGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_subscription(
self,
**kwargs: Any
) -> "_models.ExpressRouteGatewayList":
"""Lists ExpressRoute gateways under a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
async def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> "_models.ExpressRouteGatewayList":
"""Lists ExpressRoute gateways in a given resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
put_express_route_gateway_parameters: "_models.ExpressRouteGateway",
**kwargs: Any
) -> "_models.ExpressRouteGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_gateway_parameters, 'ExpressRouteGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
put_express_route_gateway_parameters: "_models.ExpressRouteGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteGateway"]:
"""Creates or updates a ExpressRoute gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT
operation.
:type put_express_route_gateway_parameters: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_06_01.models.ExpressRouteGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
put_express_route_gateway_parameters=put_express_route_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> "_models.ExpressRouteGateway":
"""Fetches the details of a ExpressRoute gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ExpressRouteGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway
resource can only be deleted when there are no connection subresources.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For Scheduler."""
import collections
import copy
from datetime import datetime
from unittest import mock
import ddt
from oslo_config import cfg
from cinder.common import constants
from cinder import context
from cinder import exception
from cinder.message import message_field
from cinder import objects
from cinder.scheduler import driver
from cinder.scheduler import manager
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit.scheduler import fakes as fake_scheduler
from cinder.tests.unit import test
from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF
@ddt.ddt
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'cinder.scheduler.driver.Scheduler'
class AnException(Exception):
pass
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.manager._startup_delay = False
self.context = context.get_admin_context()
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
@mock.patch('cinder.scheduler.driver.Scheduler.is_first_receive')
@mock.patch('eventlet.sleep')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities')
def test_init_host_with_rpc_delay_after_3_tries(self,
publish_capabilities_mock,
sleep_mock,
is_first_receive_mock):
self.manager._startup_delay = True
is_first_receive_mock.side_effect = [False, False, True]
self.manager.init_host_with_rpc()
publish_capabilities_mock.assert_called_once_with(mock.ANY)
calls = [mock.call(1)] * 2
sleep_mock.assert_has_calls(calls)
self.assertEqual(2, sleep_mock.call_count)
self.assertFalse(self.manager._startup_delay)
@mock.patch('cinder.scheduler.driver.Scheduler.is_first_receive')
@mock.patch('eventlet.sleep')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities')
@ddt.data(71, 17)
def test_init_host_with_rpc_delay_uses_new_config(
self, new_cfg_value, publish_capabilities_mock, sleep_mock,
is_first_receive_mock):
# previously used CONF.periodic_interval; see Bug #1828748
new_cfg_name = 'scheduler_driver_init_wait_time'
self.addCleanup(CONF.clear_override, new_cfg_name)
CONF.set_override(new_cfg_name, new_cfg_value)
is_first_receive_mock.return_value = False
self.manager.init_host_with_rpc()
self.assertEqual(new_cfg_value, sleep_mock.call_count)
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch(
'cinder.scheduler.host_manager.BackendState.consume_from_volume')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot')
def test_manage_existing_snapshot(self, mock_manage_existing_snapshot,
mock_consume, mock_backend_passes):
volume = fake_volume.fake_volume_obj(self.context, **{'size': 1})
fake_backend = fake_scheduler.FakeBackendState('host1', {})
mock_backend_passes.return_value = fake_backend
self.manager.manage_existing_snapshot(self.context, volume,
'fake_snapshot', 'fake_ref',
None)
mock_consume.assert_called_once_with({'size': 1})
mock_manage_existing_snapshot.assert_called_once_with(
self.context, 'fake_snapshot', 'fake_ref',
volume.service_topic_queue)
@mock.patch('cinder.objects.service.Service.get_minimum_rpc_version')
@mock.patch('cinder.objects.service.Service.get_minimum_obj_version')
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'})
def test_reset(self, get_min_obj, get_min_rpc):
old_version = objects.base.OBJ_VERSIONS.versions[-2]
with mock.patch('cinder.rpc.LAST_OBJ_VERSIONS',
{'cinder-volume': old_version,
'cinder-scheduler': old_version,
'cinder-backup': old_version}):
mgr = self.manager_cls()
volume_rpcapi = mgr.driver.volume_rpcapi
self.assertEqual('1.3', volume_rpcapi.client.version_cap)
self.assertEqual(old_version,
volume_rpcapi.client.serializer._base.version_cap)
get_min_obj.return_value = self.latest_ovo_version
mgr.reset()
volume_rpcapi = mgr.driver.volume_rpcapi
self.assertEqual(get_min_rpc.return_value,
volume_rpcapi.client.version_cap)
self.assertEqual(get_min_obj.return_value,
volume_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(volume_rpcapi.client.serializer._base.manifest)
@mock.patch('cinder.message.api.API.cleanup_expired_messages')
def test_clean_expired_messages(self, mock_clean):
self.manager._clean_expired_messages(self.context)
mock_clean.assert_called_once_with(self.context)
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch(
'cinder.scheduler.host_manager.BackendState.consume_from_volume')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume')
def test_extend_volume(self, mock_extend,
mock_consume, mock_backend_passes):
volume = fake_volume.fake_volume_obj(self.context, **{'size': 1})
fake_backend = fake_scheduler.FakeBackendState('host1', {})
mock_backend_passes.return_value = fake_backend
self.manager.extend_volume(self.context, volume, 2, 'fake_reservation')
mock_consume.assert_called_once_with({'size': 1})
mock_extend.assert_called_once_with(
self.context, volume, 2, 'fake_reservation')
@ddt.data({'key': 'value'},
objects.RequestSpec(volume_id=fake.VOLUME2_ID))
def test_append_operation_decorator(self, rs):
@manager.append_operation_type()
def _fake_schedule_method1(request_spec=None):
return request_spec
@manager.append_operation_type(name='_fake_schedule_method22')
def _fake_schedule_method2(request_spec=None):
return request_spec
@manager.append_operation_type()
def _fake_schedule_method3(request_spec2=None):
return request_spec2
result1 = _fake_schedule_method1(request_spec=copy.deepcopy(rs))
result2 = _fake_schedule_method2(request_spec=copy.deepcopy(rs))
result3 = _fake_schedule_method3(request_spec2=copy.deepcopy(rs))
self.assertEqual('_fake_schedule_method1', result1['operation'])
self.assertEqual('_fake_schedule_method22', result2['operation'])
self.assertEqual(rs, result3)
@ddt.data([{'key1': 'value1'}, {'key1': 'value2'}],
[objects.RequestSpec(volume_id=fake.VOLUME_ID),
objects.RequestSpec(volume_id=fake.VOLUME2_ID)])
def test_append_operation_decorator_with_list(self, rs_list):
@manager.append_operation_type()
def _fake_schedule_method(request_spec_list=None):
return request_spec_list
result1 = _fake_schedule_method(request_spec_list=rs_list)
for rs in result1:
self.assertEqual('_fake_schedule_method', rs['operation'])
@ddt.data('available', 'in-use')
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch(
'cinder.scheduler.host_manager.BackendState.consume_from_volume')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume')
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.message.api.API.create')
def test_extend_volume_no_valid_host(self, status, mock_create,
mock_rollback,
mock_extend, mock_consume,
mock_backend_passes):
volume = fake_volume.fake_volume_obj(self.context,
**{'size': 1,
'previous_status': status})
no_valid_backend = exception.NoValidBackend(reason='')
mock_backend_passes.side_effect = [no_valid_backend]
with mock.patch.object(self.manager,
'_set_volume_state_and_notify') as mock_notify:
self.manager.extend_volume(self.context, volume, 2,
'fake_reservation')
mock_notify.assert_called_once_with(
'extend_volume', {'volume_state': {'status': status,
'previous_status': None}},
self.context, no_valid_backend, None)
mock_rollback.assert_called_once_with(
self.context, 'fake_reservation', project_id=volume.project_id)
mock_consume.assert_not_called()
mock_extend.assert_not_called()
mock_create.assert_called_once_with(
self.context,
message_field.Action.EXTEND_VOLUME,
resource_uuid=volume.id,
exception=no_valid_backend)
@mock.patch('cinder.quota.QuotaEngine.expire')
def test_clean_expired_reservation(self, mock_clean):
self.manager._clean_expired_reservation(self.context)
mock_clean.assert_called_once_with(self.context)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_empty_dict(self, _mock_update_cap):
# Test no capabilities passes empty dictionary
service = 'fake_service'
host = 'fake_host'
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host)
_mock_update_cap.assert_called_once_with(service, host, {}, None, None)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_correct(self, _mock_update_cap):
# Test capabilities passes correctly
service = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host,
capabilities=capabilities)
_mock_update_cap.assert_called_once_with(service, host, capabilities,
None, None)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'notify_service_capabilities')
def test_notify_service_capabilities_no_timestamp(self, _mock_notify_cap):
"""Test old interface that receives host."""
service = 'volume'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.manager.notify_service_capabilities(self.context,
service_name=service,
host=host,
capabilities=capabilities)
_mock_notify_cap.assert_called_once_with(service, host, capabilities,
None)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'notify_service_capabilities')
def test_notify_service_capabilities_timestamp(self, _mock_notify_cap):
"""Test new interface that receives backend and timestamp."""
service = 'volume'
backend = 'fake_cluster'
capabilities = {'fake_capability': 'fake_value'}
timestamp = '1970-01-01T00:00:00.000000'
self.manager.notify_service_capabilities(self.context,
service_name=service,
backend=backend,
capabilities=capabilities,
timestamp=timestamp)
_mock_notify_cap.assert_called_once_with(service, backend,
capabilities,
datetime(1970, 1, 1))
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.message.api.API.create')
@mock.patch('cinder.db.volume_update')
def test_create_volume_exception_puts_volume_in_error_state(
self, _mock_volume_update, _mock_message_create,
_mock_sched_create):
# Test NoValidBackend exception behavior for create_volume.
# Puts the volume in 'error' state and eats the exception.
_mock_sched_create.side_effect = exception.NoValidBackend(reason="")
volume = fake_volume.fake_volume_obj(self.context, use_quota=True)
request_spec = {'volume_id': volume.id,
'volume': {'id': volume.id, '_name_id': None,
'metadata': {}, 'admin_metadata': {},
'glance_metadata': {}}}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
volume.id,
{'status': 'error'})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
_mock_message_create.assert_called_once_with(
self.context, message_field.Action.SCHEDULE_ALLOCATE_VOLUME,
resource_uuid=volume.id,
exception=mock.ANY)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('eventlet.sleep')
def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create):
volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('eventlet.sleep')
def test_create_volume_set_worker(self, _mock_sleep, _mock_sched_create):
"""Make sure that the worker is created when creating a volume."""
volume = tests_utils.create_volume(self.context, status='creating')
request_spec = {'volume_id': volume.id}
self.manager.create_volume(self.context, volume,
request_spec=request_spec,
filter_properties={})
volume.set_worker.assert_called_once_with()
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_after_3_tries(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
_mock_is_ready.side_effect = [False, False, True]
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
calls = [mock.call(1)] * 2
_mock_sleep.assert_has_calls(calls)
self.assertEqual(2, _mock_sleep.call_count)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_in_1_try(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id}
request_spec_obj = objects.RequestSpec.from_primitives(request_spec)
_mock_is_ready.return_value = True
self.manager.create_volume(self.context, volume,
request_spec=request_spec_obj,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context,
request_spec_obj, {})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_backend_passes,
_mock_volume_get):
# Test NoValidBackend exception behavior for migrate_volume_to_host.
# Puts the volume in 'error_migrating' state and eats the exception.
fake_updates = {'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_backend_passes, _mock_volume_get,
'available', fake_updates)
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state_maintenance(
self, _mock_volume_update, _mock_backend_passes,
_mock_volume_get):
fake_updates = {'status': 'available',
'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_backend_passes, _mock_volume_get,
'maintenance', fake_updates)
def _test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_backend_passes,
_mock_volume_get, status, fake_updates):
volume = tests_utils.create_volume(self.context,
status=status,
previous_status='available')
fake_volume_id = volume.id
request_spec = {'volume_id': fake_volume_id}
_mock_backend_passes.side_effect = exception.NoValidBackend(reason="")
_mock_volume_get.return_value = volume
self.manager.migrate_volume_to_host(self.context, volume, 'host', True,
request_spec=request_spec,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
fake_volume_id,
fake_updates)
_mock_backend_passes.assert_called_once_with(self.context, 'host',
request_spec, {})
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.db.volume_attachment_get_all_by_volume_id')
@mock.patch('cinder.quota.QUOTAS.rollback')
def test_retype_volume_exception_returns_volume_state(
self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update):
# Test NoValidBackend exception behavior for retype.
# Puts the volume in original state and eats the exception.
volume = tests_utils.create_volume(self.context,
status='retyping',
previous_status='in-use')
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume_attach = tests_utils.attach_volume(self.context, volume.id,
instance_uuid, None,
'/dev/fake')
_mock_vol_attachment_get.return_value = [volume_attach]
reservations = mock.sentinel.reservations
request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3},
'migration_policy': 'on-demand',
'quota_reservations': reservations}
_mock_vol_update.return_value = {'status': 'in-use'}
_mock_find_retype_backend = mock.Mock(
side_effect=exception.NoValidBackend(reason=""))
orig_retype = self.manager.driver.find_retype_backend
self.manager.driver.find_retype_backend = _mock_find_retype_backend
self.manager.retype(self.context, volume, request_spec=request_spec,
filter_properties={})
_mock_find_retype_backend.assert_called_once_with(self.context,
request_spec, {},
'on-demand')
quota_rollback.assert_called_once_with(self.context, reservations)
_mock_vol_update.assert_called_once_with(self.context, volume.id,
{'status': 'in-use'})
self.manager.driver.find_retype_host = orig_retype
def test_do_cleanup(self):
vol = tests_utils.create_volume(self.context, status='creating')
self.manager._do_cleanup(self.context, vol)
vol.refresh()
self.assertEqual('error', vol.status)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI'
'.determine_rpc_version_cap', mock.Mock(return_value='2.0'))
def test_upgrading_cloud(self):
self.assertTrue(self.manager.upgrading_cloud)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI'
'.determine_rpc_version_cap')
def test_upgrading_cloud_not(self, cap_mock):
cap_mock.return_value = self.manager.RPC_API_VERSION
self.assertFalse(self.manager.upgrading_cloud)
def test_cleanup_destination_scheduler(self):
service = objects.Service(id=1, host='hostname',
binary='cinder-scheduler')
result = self.manager._cleanup_destination(None, service)
expected = self.manager.sch_api.do_cleanup, None, service.host
self.assertEqual(expected, result)
def test_cleanup_destination_volume(self):
service = objects.Service(id=1, host='hostname', cluster_name=None,
binary=constants.VOLUME_BINARY)
result = self.manager._cleanup_destination(None, service)
expected = self.manager.volume_api.do_cleanup, service, service.host
self.assertEqual(expected, result)
def test_cleanup_destination_volume_cluster_cache_hit(self):
cluster = objects.Cluster(id=1, name='mycluster',
binary=constants.VOLUME_BINARY)
service = objects.Service(id=2, host='hostname',
cluster_name=cluster.name,
binary=constants.VOLUME_BINARY)
cluster_cache = {'cinder-volume': {'mycluster': cluster}}
result = self.manager._cleanup_destination(cluster_cache, service)
expected = self.manager.volume_api.do_cleanup, cluster, cluster.name
self.assertEqual(expected, result)
@mock.patch('cinder.objects.Cluster.get_by_id')
def test_cleanup_destination_volume_cluster_cache_miss(self, get_mock):
cluster = objects.Cluster(id=1, name='mycluster',
binary=constants.VOLUME_BINARY)
service = objects.Service(self.context,
id=2, host='hostname',
cluster_name=cluster.name,
binary=constants.VOLUME_BINARY)
get_mock.return_value = cluster
cluster_cache = collections.defaultdict(dict)
result = self.manager._cleanup_destination(cluster_cache, service)
expected = self.manager.volume_api.do_cleanup, cluster, cluster.name
self.assertEqual(expected, result)
@mock.patch('cinder.scheduler.manager.SchedulerManager.upgrading_cloud')
def test_work_cleanup_upgrading(self, upgrading_mock):
cleanup_request = objects.CleanupRequest(host='myhost')
upgrading_mock.return_value = True
self.assertRaises(exception.UnavailableDuringUpgrade,
self.manager.work_cleanup,
self.context,
cleanup_request)
@mock.patch('cinder.objects.Cluster.is_up', True)
@mock.patch('cinder.objects.Service.is_up', False)
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.do_cleanup')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.do_cleanup')
@mock.patch('cinder.objects.ServiceList.get_all')
def test_work_cleanup(self, get_mock, vol_clean_mock, sch_clean_mock):
args = dict(service_id=1, cluster_name='cluster_name', host='host',
binary=constants.VOLUME_BINARY, is_up=False, disabled=True,
resource_id=fake.VOLUME_ID, resource_type='Volume')
cluster = objects.Cluster(id=1, name=args['cluster_name'],
binary=constants.VOLUME_BINARY)
services = [objects.Service(self.context,
id=2, host='hostname',
cluster_name=cluster.name,
binary=constants.VOLUME_BINARY,
cluster=cluster),
objects.Service(self.context,
id=3, host='hostname',
cluster_name=None,
binary=constants.SCHEDULER_BINARY),
objects.Service(self.context,
id=4, host='hostname',
cluster_name=None,
binary=constants.VOLUME_BINARY)]
get_mock.return_value = services
cleanup_request = objects.CleanupRequest(self.context, **args)
res = self.manager.work_cleanup(self.context, cleanup_request)
self.assertEqual((services[:2], services[2:]), res)
self.assertEqual(1, vol_clean_mock.call_count)
self.assertEqual(1, sch_clean_mock.call_count)
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@mock.patch('cinder.objects.backup.Backup.save')
@mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host')
@mock.patch('cinder.db.volume_get')
def test_create_backup(self, mock_volume_get, mock_host, mock_save,
mock_create):
volume = fake_volume.fake_db_volume()
mock_volume_get.return_value = volume
mock_host.return_value = 'cinder-backup'
backup = fake_backup.fake_backup_obj(self.context)
self.manager.create_backup(self.context, backup=backup)
mock_save.assert_called_once()
mock_host.assert_called_once_with(volume)
mock_volume_get.assert_called_once_with(self.context, backup.volume_id)
mock_create.assert_called_once_with(self.context, backup)
@mock.patch('cinder.volume.volume_utils.update_backup_error')
@mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host')
@mock.patch('cinder.db.volume_get')
@mock.patch('cinder.db.volume_update')
def test_create_backup_no_service(self, mock_volume_update,
mock_volume_get, mock_host, mock_error):
volume = fake_volume.fake_db_volume()
volume['status'] = 'backing-up'
volume['previous_status'] = 'available'
mock_volume_get.return_value = volume
mock_host.side_effect = exception.ServiceNotFound(
service_id='cinder-volume')
backup = fake_backup.fake_backup_obj(self.context)
self.manager.create_backup(self.context, backup=backup)
mock_host.assert_called_once_with(volume)
mock_volume_get.assert_called_once_with(self.context, backup.volume_id)
mock_volume_update.assert_called_once_with(
self.context,
backup.volume_id,
{'status': 'available',
'previous_status': 'backing-up'})
mock_error.assert_called_once_with(
backup, 'Service not found for creating backup.')
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
self.topic = 'fake_topic'
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities(self, _mock_update_cap):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.driver.update_service_capabilities(service_name, host,
capabilities, None)
_mock_update_cap.assert_called_once_with(service_name, host,
capabilities, None)
@mock.patch('cinder.scheduler.host_manager.HostManager.'
'has_all_capabilities', return_value=False)
def test_is_ready(self, _mock_has_caps):
ready = self.driver.is_ready()
_mock_has_caps.assert_called_once_with()
self.assertFalse(ready)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test schedule driver class.
Test cases for base scheduler driver class methods
that will fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.objects.volume.Volume.get_by_id')
def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update):
volume = fake_volume.fake_volume_obj(self.context, use_quota=True)
_mock_volume_get.return_value = volume
driver.volume_update_db(self.context, volume.id, 'fake_host',
'fake_cluster')
scheduled_at = volume.scheduled_at.replace(tzinfo=None)
_mock_vol_update.assert_called_once_with(
self.context, volume.id, {'host': 'fake_host',
'cluster_name': 'fake_cluster',
'scheduled_at': scheduled_at,
'availability_zone': None})
| |
import sys
import CppHeaderParser
import os
def createLUABindings(inputPath, prefix, mainInclude, libSmallName, libName, apiPath, apiClassPath, includePath, sourcePath):
out = ""
sout = ""
lfout = ""
sout += "#include \"%sLUA.h\"\n" % (prefix)
sout += "#include \"%sLUAWrappers.h\"\n\n" % (prefix)
sout += "int luaopen_%s(lua_State *L) {\n" % (prefix)
if prefix != "Polycode":
sout += "CoreServices *inst = (CoreServices*)lua_topointer(L, 1);\n"
sout += "CoreServices::setInstance(inst);\n"
sout += "\tstatic const struct luaL_reg %sLib [] = {" % (libSmallName)
out += "#pragma once\n\n"
out += "#include <%s>\n\n" % (mainInclude)
out += "extern \"C\" {\n\n"
out += "#include <stdio.h>\n"
out += "#include \"lua.h\"\n"
out += "#include \"lualib.h\"\n"
out += "#include \"lauxlib.h\"\n\n"
if prefix == "Polycode":
out += "class LuaEventHandler : public EventHandler {\n"
out += "public:\n"
out += " LuaEventHandler() : EventHandler() {}\n"
out += " ~LuaEventHandler();\n"
out += " void handleEvent(Event *e) {\n"
out += " lua_rawgeti( L, LUA_REGISTRYINDEX, wrapperIndex );\n"
out += " lua_getfield(L, -1, \"__handleEvent\");\n"
out += " lua_rawgeti( L, LUA_REGISTRYINDEX, wrapperIndex );\n"
out += " lua_pushlightuserdata(L, e);\n"
out += " lua_call(L, 2, 0);\n"
out += " }\n"
out += " int wrapperIndex;\n"
out += " lua_State *L;\n"
out += "};\n"
files = os.listdir(inputPath)
for fileName in files:
inheritInModule = ["PhysicsSceneEntity", "CollisionScene", "CollisionSceneEntity"]
ignore = ["PolyGLSLProgram", "PolyGLSLShader", "PolyGLSLShaderModule", "PolyWinCore", "PolyCocoaCore", "PolyAGLCore", "PolyGLES1Renderer", "PolyGLRenderer", "tinyxml", "tinystr", "OpenGLCubemap", "PolyiPhoneCore", "PolyGLES1Texture", "PolyGLTexture", "PolyGLVertexBuffer", "PolyThreaded"]
if fileName.split(".")[1] == "h" and fileName.split(".")[0] not in ignore:
headerFile = "%s/%s" % (inputPath, fileName)
print "Parsing %s" % fileName
try:
f = open(headerFile)
contents = f.read().replace("_PolyExport", "")
cppHeader = CppHeaderParser.CppHeader(contents, "string")
ignore_classes = ["PolycodeShaderModule", "Object", "Threaded", "OpenGLCubemap"]
for ckey in cppHeader.classes:
print ">> Parsing class %s" % ckey
c = cppHeader.classes[ckey]
# if ckey == "ParticleEmitter":
# print c
lout = ""
inherits = False
if len(c["inherits"]) > 0:
if c["inherits"][0]["class"] not in ignore_classes:
if c["inherits"][0]["class"] in inheritInModule:
lout += "require \"%s/%s\"\n\n" % (prefix, c["inherits"][0]["class"])
else:
lout += "require \"Polycode/%s\"\n\n" % (c["inherits"][0]["class"])
lout += "class \"%s\" (%s)\n\n" % (ckey, c["inherits"][0]["class"])
inherits = True
if inherits == False:
lout += "class \"%s\"\n\n" % ckey
if len(c["methods"]["public"]) < 2 or ckey in ignore_classes:
continue
if ckey == "OSFileEntry":
print c["methods"]["public"]
parsed_methods = []
ignore_methods = ["readByte32", "readByte16", "getCustomEntitiesByType", "Core", "Renderer", "Shader", "Texture", "handleEvent", "secondaryHandler"]
lout += "\n\n"
pps = []
for pp in c["properties"]["public"]:
if pp["type"].find("static ") != -1:
if "defaltValue" in pp:
lout += "%s = %s\n" % (pp["name"], pp["defaltValue"])
else:
#there are some bugs in the class parser that cause it to return junk
if pp["type"].find("*") == -1 and pp["type"].find("vector") == -1 and pp["name"] != "16" and pp["name"] != "setScale" and pp["name"] != "setPosition" and pp["name"] != "BUFFER_CACHE_PRECISION":
pps.append(pp)
#if pp["type"] == "Number" or pp["type"] == "String" or pp["type"] == "int" or pp["type"] == "bool":
# pps.append(pp)
#else:
# print(">>> Skipping %s[%s %s]" % (ckey, pp["type"], pp["name"]))
pidx = 0
# hack to fix the lack of multiple inheritance
#if ckey == "ScreenParticleEmitter" or ckey == "SceneParticleEmitter":
# pps.append({"name": "emitter", "type": "ParticleEmitter"})
if len(pps) > 0:
lout += "function %s:__index__(name)\n" % ckey
for pp in pps:
if pidx == 0:
lout += "\tif name == \"%s\" then\n" % (pp["name"])
else:
lout += "\telseif name == \"%s\" then\n" % (pp["name"])
if pp["type"] == "Number" or pp["type"] == "String" or pp["type"] == "int" or pp["type"] == "bool":
lout += "\t\treturn %s.%s_get_%s(self.__ptr)\n" % (libName, ckey, pp["name"])
elif (ckey == "ScreenParticleEmitter" or ckey == "SceneParticleEmitter") and pp["name"] == "emitter":
lout += "\t\tlocal ret = %s(\"__skip_ptr__\")\n" % (pp["type"])
lout += "\t\tret.__ptr = self.__ptr\n"
lout += "\t\treturn ret\n"
else:
lout += "\t\tretVal = %s.%s_get_%s(self.__ptr)\n" % (libName, ckey, pp["name"])
lout += "\t\tif Polycore.__ptr_lookup[retVal] ~= nil then\n"
lout += "\t\t\treturn Polycore.__ptr_lookup[retVal]\n"
lout += "\t\telse\n"
lout += "\t\t\tPolycore.__ptr_lookup[retVal] = %s(\"__skip_ptr__\")\n" % (pp["type"])
lout += "\t\t\tPolycore.__ptr_lookup[retVal].__ptr = retVal\n"
lout += "\t\t\treturn Polycore.__ptr_lookup[retVal]\n"
lout += "\t\tend\n"
if not ((ckey == "ScreenParticleEmitter" or ckey == "SceneParticleEmitter") and pp["name"] == "emitter"):
sout += "\t\t{\"%s_get_%s\", %s_%s_get_%s},\n" % (ckey, pp["name"], libName, ckey, pp["name"])
out += "static int %s_%s_get_%s(lua_State *L) {\n" % (libName, ckey, pp["name"])
out += "\tluaL_checktype(L, 1, LUA_TLIGHTUSERDATA);\n"
out += "\t%s *inst = (%s*)lua_topointer(L, 1);\n" % (ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"), ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
outfunc = "lua_pushlightuserdata"
retFunc = ""
if pp["type"] == "Number":
outfunc = "lua_pushnumber"
if pp["type"] == "String":
outfunc = "lua_pushstring"
retFunc = ".c_str()"
if pp["type"] == "int":
outfunc = "lua_pushinteger"
if pp["type"] == "bool":
outfunc = "lua_pushboolean"
if pp["type"] == "Number" or pp["type"] == "String" or pp["type"] == "int" or pp["type"] == "bool":
out += "\t%s(L, inst->%s%s);\n" % (outfunc, pp["name"], retFunc)
else:
out += "\t%s(L, &inst->%s%s);\n" % (outfunc, pp["name"], retFunc)
out += "\treturn 1;\n"
out += "}\n\n"
pidx = pidx + 1
lout += "\tend\n"
lout += "end\n"
lout += "\n\n"
pidx = 0
if len(pps) > 0:
lout += "function %s:__set_callback(name,value)\n" % ckey
for pp in pps:
if pp["type"] == "Number" or pp["type"] == "String" or pp["type"] == "int" or pp["type"] == "bool":
if pidx == 0:
lout += "\tif name == \"%s\" then\n" % (pp["name"])
else:
lout += "\telseif name == \"%s\" then\n" % (pp["name"])
lout += "\t\t%s.%s_set_%s(self.__ptr, value)\n" % (libName, ckey, pp["name"])
lout += "\t\treturn true\n"
sout += "\t\t{\"%s_set_%s\", %s_%s_set_%s},\n" % (ckey, pp["name"], libName, ckey, pp["name"])
out += "static int %s_%s_set_%s(lua_State *L) {\n" % (libName, ckey, pp["name"])
out += "\tluaL_checktype(L, 1, LUA_TLIGHTUSERDATA);\n"
out += "\t%s *inst = (%s*)lua_topointer(L, 1);\n" % (ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"), ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
outfunc = "lua_topointer"
if pp["type"] == "Number":
outfunc = "lua_tonumber"
if pp["type"] == "String":
outfunc = "lua_tostring"
if pp["type"] == "int":
outfunc = "lua_tointeger"
if pp["type"] == "bool":
outfunc = "lua_toboolean"
out += "\t%s param = %s(L, 2);\n" % (pp["type"], outfunc)
out += "\tinst->%s = param;\n" % (pp["name"])
out += "\treturn 0;\n"
out += "}\n\n"
pidx = pidx + 1
if pidx != 0:
lout += "\tend\n"
lout += "\treturn false\n"
lout += "end\n"
lout += "\n\n"
for pm in c["methods"]["public"]:
if pm["name"] in parsed_methods or pm["name"].find("operator") > -1 or pm["name"] in ignore_methods:
continue
if pm["name"] == "~"+ckey or pm["rtnType"].find("<") > -1:
out += ""
else:
basicType = False
voidRet = False
if pm["name"] == ckey:
sout += "\t\t{\"%s\", %s_%s},\n" % (ckey, libName, ckey)
out += "static int %s_%s(lua_State *L) {\n" % (libName, ckey)
idx = 1
else:
sout += "\t\t{\"%s_%s\", %s_%s_%s},\n" % (ckey, pm["name"], libName, ckey, pm["name"])
out += "static int %s_%s_%s(lua_State *L) {\n" % (libName, ckey, pm["name"])
if pm["rtnType"].find("static ") == -1:
out += "\tluaL_checktype(L, 1, LUA_TLIGHTUSERDATA);\n"
out += "\t%s *inst = (%s*)lua_topointer(L, 1);\n" % (ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"), ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
idx = 2
paramlist = []
lparamlist = []
for param in pm["parameters"]:
if not param.has_key("type"):
continue
if param["type"] == "0":
continue
param["name"] = param["name"].replace("end", "_end").replace("repeat", "_repeat")
if"type" in param:
luatype = "LUA_TLIGHTUSERDATA"
checkfunc = "lua_islightuserdata"
if param["type"].find("*") > -1:
luafunc = "(%s)lua_topointer" % (param["type"].replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
elif param["type"].find("&") > -1:
luafunc = "*(%s*)lua_topointer" % (param["type"].replace("const", "").replace("&", "").replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
else:
luafunc = "*(%s*)lua_topointer" % (param["type"].replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
lend = ".__ptr"
if param["type"] == "int" or param["type"] == "unsigned int":
luafunc = "lua_tointeger"
luatype = "LUA_TNUMBER"
checkfunc = "lua_isnumber"
lend = ""
if param["type"] == "bool":
luafunc = "lua_toboolean"
luatype = "LUA_TBOOLEAN"
checkfunc = "lua_isboolean"
lend = ""
if param["type"] == "Number" or param["type"] == "float" or param["type"] == "double":
luatype = "LUA_TNUMBER"
luafunc = "lua_tonumber"
checkfunc = "lua_isnumber"
lend = ""
if param["type"] == "String":
luatype = "LUA_TSTRING"
luafunc = "lua_tostring"
checkfunc = "lua_isstring"
lend = ""
param["type"] = param["type"].replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle")
if "defaltValue" in param:
if checkfunc != "lua_islightuserdata" or (checkfunc == "lua_islightuserdata" and param["defaltValue"] == "NULL"):
param["defaltValue"] = param["defaltValue"].replace(" 0f", ".0f")
param["defaltValue"] = param["defaltValue"].replace(": :", "::")
param["defaltValue"] = param["defaltValue"].replace("0 ", "0.")
out += "\t%s %s;\n" % (param["type"], param["name"])
out += "\tif(%s(L, %d)) {\n" % (checkfunc, idx)
out += "\t\t%s = %s(L, %d);\n" % (param["name"], luafunc, idx)
out += "\t} else {\n"
out += "\t\t%s = %s;\n" % (param["name"], param["defaltValue"])
out += "\t}\n"
else:
out += "\tluaL_checktype(L, %d, %s);\n" % (idx, luatype);
if param["type"] == "String":
out += "\t%s %s = String(%s(L, %d));\n" % (param["type"], param["name"], luafunc, idx)
else:
out += "\t%s %s = %s(L, %d);\n" % (param["type"], param["name"], luafunc, idx)
else:
out += "\tluaL_checktype(L, %d, %s);\n" % (idx, luatype);
if param["type"] == "String":
out += "\t%s %s = String(%s(L, %d));\n" % (param["type"], param["name"], luafunc, idx)
else:
out += "\t%s %s = %s(L, %d);\n" % (param["type"], param["name"], luafunc, idx)
paramlist.append(param["name"])
lparamlist.append(param["name"]+lend)
idx = idx +1
if pm["name"] == ckey:
if ckey == "EventHandler":
out += "\tLuaEventHandler *inst = new LuaEventHandler();\n"
out += "\tinst->wrapperIndex = luaL_ref(L, LUA_REGISTRYINDEX );\n"
out += "\tinst->L = L;\n"
else:
out += "\t%s *inst = new %s(%s);\n" % (ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"), ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"), ", ".join(paramlist))
out += "\tlua_pushlightuserdata(L, (void*)inst);\n"
out += "\treturn 1;\n"
else:
if pm["rtnType"].find("static ") == -1:
call = "inst->%s(%s)" % (pm["name"], ", ".join(paramlist))
else:
call = "%s::%s(%s)" % (ckey, pm["name"], ", ".join(paramlist))
if pm["rtnType"] == "void" or pm["rtnType"] == "static void" or pm["rtnType"] == "virtual void" or pm["rtnType"] == "inline void":
out += "\t%s;\n" % (call)
basicType = True
voidRet = True
out += "\treturn 0;\n"
else:
outfunc = "lua_pushlightuserdata"
retFunc = ""
basicType = False
if pm["rtnType"] == "Number" or pm["rtnType"] == "inline Number":
outfunc = "lua_pushnumber"
basicType = True
if pm["rtnType"] == "String" or pm["rtnType"] == "static String":
outfunc = "lua_pushstring"
basicType = True
retFunc = ".c_str()"
if pm["rtnType"] == "int" or pm["rtnType"] == "static int" or pm["rtnType"] == "size_t" or pm["rtnType"] == "static size_t" or pm["rtnType"] == "long" or pm["rtnType"] == "unsigned int" or pm["rtnType"] == "static long":
outfunc = "lua_pushinteger"
basicType = True
if pm["rtnType"] == "bool" or pm["rtnType"] == "static bool" or pm["rtnType"] == "virtual bool":
outfunc = "lua_pushboolean"
basicType = True
if pm["rtnType"].find("*") > -1:
out += "\tvoid *ptrRetVal = (void*)%s%s;\n" % (call, retFunc)
out += "\tif(ptrRetVal == NULL) {\n"
out += "\t\tlua_pushnil(L);\n"
out += "\t} else {\n"
out += "\t\t%s(L, ptrRetVal);\n" % (outfunc)
out += "\t}\n"
elif basicType == True:
out += "\t%s(L, %s%s);\n" % (outfunc, call, retFunc)
else:
className = pm["rtnType"].replace("const", "").replace("&", "").replace("inline", "").replace("virtual", "").replace("static", "")
if className == "Polygon":
className = "Polycode::Polygon"
if className == "Rectangle":
className = "Polycode::Rectangle"
out += "\t%s *retInst = new %s();\n" % (className, className)
out += "\t*retInst = %s;\n" % (call)
out += "\t%s(L, retInst);\n" % (outfunc)
out += "\treturn 1;\n"
out += "}\n\n"
if pm["name"] == ckey:
lout += "function %s:%s(...)\n" % (ckey, ckey)
if inherits:
lout += "\tif type(arg[1]) == \"table\" and count(arg) == 1 then\n"
lout += "\t\tif \"\"..arg[1]:class() == \"%s\" then\n" % (c["inherits"][0]["class"])
lout += "\t\t\tself.__ptr = arg[1].__ptr\n"
lout += "\t\t\treturn\n"
lout += "\t\tend\n"
lout += "\tend\n"
lout += "\tfor k,v in pairs(arg) do\n"
lout += "\t\tif type(v) == \"table\" then\n"
lout += "\t\t\tif v.__ptr ~= nil then\n"
lout += "\t\t\t\targ[k] = v.__ptr\n"
lout += "\t\t\tend\n"
lout += "\t\tend\n"
lout += "\tend\n"
lout += "\tif self.__ptr == nil and arg[1] ~= \"__skip_ptr__\" then\n"
if ckey == "EventHandler":
lout += "\t\tself.__ptr = %s.%s(self)\n" % (libName, ckey)
else:
lout += "\t\tself.__ptr = %s.%s(unpack(arg))\n" % (libName, ckey)
lout += "\t\tPolycore.__ptr_lookup[self.__ptr] = self\n"
lout += "\tend\n"
lout += "end\n\n"
else:
lout += "function %s:%s(%s)\n" % (ckey, pm["name"], ", ".join(paramlist))
if pm["rtnType"].find("static ") == -1:
if len(lparamlist):
lout += "\tlocal retVal = %s.%s_%s(self.__ptr, %s)\n" % (libName, ckey, pm["name"], ", ".join(lparamlist))
else:
lout += "\tlocal retVal = %s.%s_%s(self.__ptr)\n" % (libName, ckey, pm["name"])
else:
if len(lparamlist):
lout += "\tlocal retVal = %s.%s_%s(%s)\n" % (libName, ckey, pm["name"], ", ".join(lparamlist))
else:
lout += "\tlocal retVal = %s.%s_%s()\n" % (libName, ckey, pm["name"])
if not voidRet:
if basicType == True:
lout += "\treturn retVal\n"
else:
className = pm["rtnType"].replace("const", "").replace("&", "").replace("inline", "").replace("virtual", "").replace("static", "").replace("*","").replace(" ", "")
lout += "\tif retVal == nil then return nil end\n"
lout += "\tif Polycore.__ptr_lookup[retVal] ~= nil then\n"
lout += "\t\treturn Polycore.__ptr_lookup[retVal]\n"
lout += "\telse\n"
lout += "\t\tPolycore.__ptr_lookup[retVal] = %s(\"__skip_ptr__\")\n" % (className)
lout += "\t\tPolycore.__ptr_lookup[retVal].__ptr = retVal\n"
lout += "\t\treturn Polycore.__ptr_lookup[retVal]\n"
lout += "\tend\n"
lout += "end\n\n"
parsed_methods.append(pm["name"])
#cleanup
sout += "\t\t{\"delete_%s\", %s_delete_%s},\n" % (ckey, libName, ckey)
out += "static int %s_delete_%s(lua_State *L) {\n" % (libName, ckey)
out += "\tluaL_checktype(L, 1, LUA_TLIGHTUSERDATA);\n"
out += "\t%s *inst = (%s*)lua_topointer(L, 1);\n" % (ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"), ckey.replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
out += "\tdelete inst;\n"
out += "\treturn 0;\n"
out += "}\n\n"
lout += "\n\n"
lout += "function %s:__delete()\n" % (ckey)
lout += "\tPolycore.__ptr_lookup[self.__ptr] = nil\n"
lout += "\t%s.delete_%s(self.__ptr)\n" % (libName, ckey)
lout += "end\n"
if ckey == "EventHandler":
lout += "\n\n"
lout += "function EventHandler:__handleEvent(event)\n"
lout += "\tevt = Event(\"__skip_ptr__\")\n"
lout += "\tevt.__ptr = event\n"
lout += "\tself:handleEvent(evt)\n"
#lout += "\tself:handleEvent(event)\n"
lout += "end\n"
lfout += "require \"%s/%s\"\n" % (prefix, ckey)
fout = open("%s/%s.lua" % (apiClassPath, ckey), "w")
fout.write(lout)
except CppHeaderParser.CppParseError, e:
print e
sys.exit(1)
out += "}"
sout += "\t\t{NULL, NULL}\n"
sout += "\t};\n"
sout += "\tluaL_openlib(L, \"%s\", %sLib, 0);\n" % (libName, libSmallName)
sout += "\treturn 1;\n"
sout += "}"
shout = ""
shout += "#pragma once\n"
shout += "#include <%s>\n" % (mainInclude)
shout += "#include \"%sLUAWrappers.h\"\n" % (prefix)
shout += "extern \"C\" {\n"
shout += "#include <stdio.h>\n"
shout += "#include \"lua.h\"\n"
shout += "#include \"lualib.h\"\n"
shout += "#include \"lauxlib.h\"\n"
shout += "int _PolyExport luaopen_%s(lua_State *L);\n" % (prefix)
shout += "}\n"
fout = open("%s/%sLUA.h" % (includePath, prefix), "w")
fout.write(shout)
fout = open("%s/%s.lua" % (apiPath, prefix), "w")
fout.write(lfout)
fout = open("%s/%sLUAWrappers.h" % (includePath, prefix), "w")
fout.write(out)
fout = open("%s/%sLUA.cpp" % (sourcePath, prefix), "w")
fout.write(sout)
#print cppHeader
createLUABindings(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9])
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from bisect_results import BisectResults
import source_control
class MockDepotRegistry(object):
def ChangeToDepotDir(self, depot):
pass
class MockRevisionState(object):
def __init__(self, revision, index, depot='chromium', value=None,
perf_time=0, build_time=0, passed='?', external=None):
self.depot = depot
self.revision = revision
self.index = index
self.value = value
self.perf_time = perf_time
self.build_time = build_time
self.passed = passed
self.external = external
class MockBisectState(object):
def __init__(self):
self.mock_revision_states = []
mock_bad_val = {'values': [100, 105, 95]}
for i, rev in enumerate(['a', 'b']):
mock_rev_state = MockRevisionState(rev, i, value=mock_bad_val, passed=0)
self.mock_revision_states.append(mock_rev_state)
mock_good_val = {'values': [1, 2, 3]}
for i, rev in enumerate(['c', 'd', 'e'], start=2):
mock_rev_state = MockRevisionState(rev, i, value=mock_good_val, passed=1)
self.mock_revision_states.append(mock_rev_state)
def GetRevisionStates(self):
return self.mock_revision_states
class MockBisectOptions(object):
def __init__(self):
self.repeat_test_count = 3
class BisectResultsTest(unittest.TestCase):
def setUp(self):
self.mock_bisect_state = MockBisectState()
self.mock_depot_registry = MockDepotRegistry()
self.mock_opts = MockBisectOptions()
self.mock_warnings = []
self.original_getcwd = os.getcwd
self.original_chdir = os.chdir
self.original_query_revision_info = source_control.QueryRevisionInfo
os.getcwd = lambda: '/path'
os.chdir = lambda _: None
revision_infos = {'b': {'test': 'b'}, 'c': {'test': 'c'}}
source_control.QueryRevisionInfo = lambda rev: revision_infos[rev]
def tearDown(self):
os.getcwd = self.original_getcwd
os.chdir = self.original_chdir
source_control.QueryRevisionInfo = self.original_query_revision_info
def _AssertConfidence(self, score, bad_values, good_values):
"""Checks whether the given sets of values have a given confidence score.
The score represents our confidence that the two sets of values wouldn't
be as different as they are just by chance; that is, that some real change
occurred between the two sets of values.
Args:
score: Expected confidence score.
bad_values: First list of numbers.
good_values: Second list of numbers.
"""
confidence = BisectResults.ConfidenceScore(bad_values, good_values)
self.assertEqual(score, confidence)
def testConfidenceScoreIsZeroOnTooFewLists(self):
self._AssertConfidence(0.0, [], [1, 2])
self._AssertConfidence(0.0, [1, 2], [])
self._AssertConfidence(0.0, [1], [1, 2])
self._AssertConfidence(0.0, [1, 2], [1])
def testConfidenceScore_ZeroConfidence(self):
# The good and bad sets contain the same values, so the confidence that
# they're different should be zero.
self._AssertConfidence(0.0, [4, 5, 7, 6, 8, 7], [8, 7, 6, 7, 5, 4])
def testConfidenceScore_MediumConfidence(self):
self._AssertConfidence(80.0, [0, 1, 1, 1, 2, 2], [1, 1, 1, 3, 3, 4])
def testConfidenceScore_HighConfidence(self):
self._AssertConfidence(95.0, [0, 1, 1, 1, 2, 2], [1, 2, 2, 3, 3, 4])
def testConfidenceScore_VeryHighConfidence(self):
# Confidence is high if the two sets of values have no internal variance.
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.2, 1.2, 1.2, 1.2])
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.01, 1.01, 1.01, 1.01])
def testConfidenceScore_UnbalancedSampleSize(self):
# The second set of numbers only contains one number, so confidence is 0.
self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2], [1.4])
def testConfidenceScore_EmptySample(self):
# Confidence is zero if either or both samples are empty.
self._AssertConfidence(0.0, [], [])
self._AssertConfidence(0.0, [], [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3])
self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3], [])
def testConfidenceScore_FunctionalTestResults(self):
self._AssertConfidence(80.0, [1, 1, 0, 1, 1, 1, 0, 1], [0, 0, 1, 0, 1, 0])
self._AssertConfidence(99.9, [1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0])
def testConfidenceScore_RealWorldCases(self):
"""This method contains a set of data from actual bisect results.
The confidence scores asserted below were all copied from the actual
results, so the purpose of this test method is mainly to show what the
results for real cases are, and compare when we change the confidence
score function in the future.
"""
self._AssertConfidence(80, [133, 130, 132, 132, 130, 129], [129, 129, 125])
self._AssertConfidence(99.5, [668, 667], [498, 498, 499])
self._AssertConfidence(80, [67, 68], [65, 65, 67])
self._AssertConfidence(0, [514], [514])
self._AssertConfidence(90, [616, 613, 607, 615], [617, 619, 619, 617])
self._AssertConfidence(0, [3.5, 5.8, 4.7, 3.5, 3.6], [2.8])
self._AssertConfidence(90, [3, 3, 3], [2, 2, 2, 3])
self._AssertConfidence(0, [1999004, 1999627], [223355])
self._AssertConfidence(90, [1040, 934, 961], [876, 875, 789])
self._AssertConfidence(90, [309, 305, 304], [302, 302, 299, 303, 298])
def testCorrectlyFindsBreakingRange(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = 1
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[2], results.first_working_revision)
self.assertEqual(revision_states[1], results.last_broken_revision)
def testCorrectlyFindsBreakingRangeNotInOrder(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 1
revision_states[2].passed = 0
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[1], results.first_working_revision)
self.assertEqual(revision_states[2], results.last_broken_revision)
def testCorrectlyFindsBreakingRangeIncompleteBisect(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = '?'
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[3], results.first_working_revision)
self.assertEqual(revision_states[1], results.last_broken_revision)
def testFindBreakingRangeAllPassed(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 1
revision_states[1].passed = 1
revision_states[2].passed = 1
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[0], results.first_working_revision)
self.assertIsNone(results.last_broken_revision)
def testFindBreakingRangeNonePassed(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = 0
revision_states[3].passed = 0
revision_states[4].passed = 0
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertIsNone(results.first_working_revision)
self.assertEqual(revision_states[4], results.last_broken_revision)
def testCorrectlyComputesRegressionStatistics(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[0].value = {'values': [1000, 999, 998]}
revision_states[1].passed = 0
revision_states[1].value = {'values': [980, 1000, 999]}
revision_states[2].passed = 1
revision_states[2].value = {'values': [50, 45, 55]}
revision_states[3].passed = 1
revision_states[3].value = {'values': [45, 56, 45]}
revision_states[4].passed = 1
revision_states[4].value = {'values': [51, 41, 58]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertAlmostEqual(99.9, results.confidence)
self.assertAlmostEqual(1909.86547085, results.regression_size)
self.assertAlmostEqual(7.16625904, results.regression_std_err)
def testFindsCulpritRevisions(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[1].depot = 'chromium'
revision_states[2].depot = 'webkit'
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.culprit_revisions))
self.assertEqual(('b', {'test': 'b'}, 'chromium'),
results.culprit_revisions[0])
def testFindsOtherRegressions(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[0].value = {'values': [100, 100, 100]}
revision_states[1].passed = 0
revision_states[1].value = {'values': [100, 100, 100]}
revision_states[2].passed = 1
revision_states[2].value = {'values': [10, 10, 10]}
revision_states[3].passed = 1
revision_states[3].value = {'values': [100, 100, 100]}
revision_states[4].passed = 1
revision_states[4].value = {'values': [60, 60, 60]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
expected_regressions = [[revision_states[2], revision_states[1], 99.9],
[revision_states[4], revision_states[3], 80.0]]
self.assertEqual(expected_regressions, results.other_regressions)
def testNoResultBasedWarningsForNormalState(self):
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(0, len(results.warnings))
def testWarningForMultipleCulpritRevisions(self):
self.mock_bisect_state.mock_revision_states[2].passed = 'Skipped'
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.warnings))
def testWarningForTooLowRetryLimit(self):
self.mock_opts.repeat_test_count = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.warnings))
def testWarningForTooLowConfidence(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[2].value = {'values': [95, 90, 90]}
revision_states[3].value = {'values': [95, 90, 90]}
revision_states[4].value = {'values': [95, 90, 90]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertGreater(results.confidence, 0)
self.assertEqual(1, len(results.warnings))
def testWarningForZeroConfidence(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[2].value = {'values': [100, 105, 95]}
revision_states[3].value = {'values': [100, 105, 95]}
revision_states[4].value = {'values': [100, 105, 95]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(0, results.confidence)
self.assertEqual(1, len(results.warnings))
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
"""
line.client
~~~~~~~~~~~
LineClient for sending and receiving message from LINE server.
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
import re
import requests
import sys
from api import LineAPI
from models import LineGroup, LineContact, LineRoom, LineMessage
from curve.ttypes import TalkException, ToType, OperationType, Provider
reload(sys)
sys.setdefaultencoding("utf-8")
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
class LineClient(LineAPI):
profile = None
contacts = []
rooms = []
groups = []
def __init__(self, id=None, password=None, authToken=None, is_mac=True, com_name="carpedm20"):
"""Provide a way to communicate with LINE server.
:param id: `NAVER id` or `LINE email`
:param password: LINE account password
:param authToken: LINE session key
:param is_mac: (optional) os setting
:param com_name: (optional) name of your system
>>> client = LineClient("carpedm20", "xxxxxxxxxx")
Enter PinCode '9779' to your mobile phone in 2 minutes
>>> client = LineClient("carpedm20@gmail.com", "xxxxxxxxxx")
Enter PinCode '7390' to your mobile phone in 2 minutes
"""
if not (authToken or id and password):
msg = "id and password or authToken is needed"
self.raise_error(msg)
if is_mac:
os_version = "10.9.4-MAVERICKS-x64"
user_agent = "DESKTOP:MAC:%s(%s)" % (os_version, self.version)
app = "DESKTOPMAC\t%s\tMAC\t%s" % (self.version, os_version)
else:
os_version = "5.1.2600-XP-x64"
user_agent = "DESKTOP:WIN:%s(%s)" % (os_version, self.version)
app = "DESKTOPWIN\t%s\tWINDOWS\t%s" % (self.version, os_version)
if com_name:
self.com_name = com_name
self._headers['User-Agent'] = user_agent
self._headers['X-Line-Application'] = app
if authToken:
self.authToken = self._headers['X-Line-Access'] = authToken
self.tokenLogin()
#self.ready()
else:
if EMAIL_REGEX.match(id):
self.provider = Provider.LINE # LINE
else:
self.provider = Provider.NAVER_KR # NAVER
self.id = id
self.password = password
self.is_mac = is_mac
self.login()
self.ready()
self.revision = self._getLastOpRevision()
self.getProfile()
self.refreshGroups()
self.refreshContacts()
self.refreshActiveRooms()
def getProfile(self):
"""Get `profile` of LINE account"""
if self._check_auth():
self.profile = LineContact(self, self._getProfile())
return self.profile
return None
def getContactByName(self, name):
"""Get a `contact` by name
:param name: name of a `contact`
"""
for contact in self.contacts:
if name == contact.name:
return contact
return None
def getContactById(self, id):
"""Get a `contact` by id
:param id: id of a `contact`
"""
for contact in self.contacts:
if contact.id == id:
return contact
return None
def getContactOrRoomOrGroupById(self, id):
"""Get a `contact` or `room` or `group` by its id
:param id: id of a instance
"""
return self.getContactById(id)\
or self.getRoomById(id)\
or self.getGroupById(id)
def refreshGroups(self):
"""Refresh groups of LineClient"""
if self._check_auth():
self.groups = []
self.addGroupsWithIds(self._getGroupIdsJoined())
self.addGroupsWithIds(self._getGroupIdsInvited(), False)
def addGroupsWithIds(self, group_ids, is_joined=True):
"""Refresh groups of LineClient"""
if self._check_auth():
new_groups = self._getGroups(group_ids)
for group in new_groups:
self.groups.append(LineGroup(self, group, is_joined))
self.groups.sort()
def refreshContacts(self):
"""Refresh contacts of LineClient """
if self._check_auth():
contact_ids = self._getAllContactIds()
contacts = self._getContacts(contact_ids)
self.contacts = []
for contact in contacts:
self.contacts.append(LineContact(self, contact))
self.contacts.sort()
def refreshActiveRooms(self):
"""Refresh active chat rooms"""
if self._check_auth():
start = 1
count = 50
self.rooms = []
while True:
channel = self._getMessageBoxCompactWrapUpList(start, count)
for box in channel.messageBoxWrapUpList:
if box.messageBox.midType == ToType.ROOM:
room = LineRoom(self, self._getRoom(box.messageBox.id))
self.rooms.append(room)
if len(channel.messageBoxWrapUpList) == count:
start += count
else:
break
def createGroupWithIds(self, ids=[]):
"""Create a group with contact ids
:param name: name of group
:param ids: list of contact ids
"""
if self._check_auth():
try:
group = LineGroup(self, self._createGroup(name, ids))
self.groups.append(group)
return group
except Exception as e:
self.raise_error(e)
return None
def createGroupWithContacts(self, name, contacts=[]):
"""Create a group with contacts
:param name: name of group
:param contacts: list of contacts
"""
if self._check_auth():
try:
contact_ids = []
for contact in contacts:
contact_ids.append(contact.id)
group = LineGroup(self, self._createGroup(name, contact_ids))
self.groups.append(group)
return group
except Exception as e:
self.raise_error(e)
return None
def getGroupByName(self, name):
"""Get a group by name
:param name: name of a group
"""
for group in self.groups:
if name == group.name:
return group
return None
def getGroupById(self, id):
"""Get a group by id
:param id: id of a group
"""
for group in self.groups:
if group.id == id:
return group
return None
def inviteIntoGroup(self, group, contacts=[]):
"""Invite contacts into group
:param group: LineGroup instance
:param contacts: LineContact instances to invite
"""
if self._check_auth():
contact_ids = [contact.id for contact in contacts]
self._inviteIntoGroup(group.id, contact_ids)
def acceptGroupInvitation(self, group):
"""Accept a group invitation
:param group: LineGroup instance
"""
if self._check_auth():
try:
self._acceptGroupInvitation(group.id)
return True
except Exception as e:
self.raise_error(e)
return False
def leaveGroup(self, group):
"""Leave a group
:param group: LineGroup instance to leave
"""
if self._check_auth():
try:
self._leaveGroup(group.id)
self.groups.remove(group)
return True
except Exception as e:
self.raise_error(e)
return False
def createRoomWithIds(self, ids=[]):
"""Create a chat room with contact ids"""
if self._check_auth():
try:
room = LineRoom(self, self._createRoom(ids))
self.rooms.append(room)
return room
except Exception as e:
self.raise_error(e)
return None
def createRoomWithContacts(self, contacts=[]):
"""Create a chat room with contacts"""
if self._check_auth():
try:
contact_ids = []
for contact in contacts:
contact_ids.append(contact.id)
room = LineRoom(self, self._createRoom(contact_ids))
self.rooms.append(room)
return room
except Exception as e:
self.raise_error(e)
return None
def getRoomById(self, id):
"""Get a room by id
:param id: id of a room
"""
for room in self.rooms:
if room.id == id:
return room
return None
def inviteIntoRoom(self, room, contacts=[]):
"""Invite contacts into room
:param room: LineRoom instance
:param contacts: LineContact instances to invite
"""
if self._check_auth():
contact_ids = [contact.id for contact in contacts]
self._inviteIntoRoom(room.id, contact_ids)
def leaveRoom(self, room):
"""Leave a room
:param room: LineRoom instance to leave
"""
if self._check_auth():
try:
self._leaveRoom(room.id)
self.rooms.remove(room)
return True
except Exception as e:
self.raise_error(e)
return False
def sendMessage(self, message, seq=0):
"""Send a message
:param message: LineMessage instance to send
"""
if self._check_auth():
self._sendMessage(message, seq)
def getMessageBox(self, id):
"""Get MessageBox by id
:param id: `contact` id or `group` id or `room` id
"""
if self._check_auth():
try:
messageBoxWrapUp = self._getMessageBoxCompactWrapUp(id)
return messageBoxWrapUp.messageBox
except:
return None
def getRecentMessages(self, messageBox, count):
"""Get recent message from MessageBox
:param messageBox: MessageBox object
"""
if self._check_auth():
id = messageBox.id
messages = self._getRecentMessages(id, count)
return self.getLineMessageFromMessage(messages)
def longPoll(self, count=50):
"""Receive a list of operations that have to be processed by original
Line cleint.
:param count: number of operations to get from
:returns: a generator which returns operations
>>> for op in client.longPoll():
sender = op[0]
receiver = op[1]
message = op[2]
print "%s->%s : %s" % (sender, receiver, message)
"""
if self._check_auth():
"""Check is there any operations from LINE server"""
OT = OperationType
try:
operations = self._fetchOperations(self.revision, count)
except EOFError:
return
except TalkException as e:
if e.code == 9:
self.raise_error("user logged in to another machien")
else:
return
for operation in operations:
if operation.type == OT.END_OF_OPERATION:
pass
elif operation.type == OT.SEND_MESSAGE:
pass
elif operation.type == OT.RECEIVE_MESSAGE:
message = LineMessage(self, operation.message)
raw_sender = operation.message._from
raw_receiver = operation.message.to
sender = self.getContactOrRoomOrGroupById(raw_sender)
receiver = self.getContactOrRoomOrGroupById(raw_receiver)
if sender is None or receiver is None:
self.refreshGroups()
self.refreshContacts()
self.refreshActiveRooms()
sender = self.getContactOrRoomOrGroupById(raw_sender)
receiver = self.getContactOrRoomOrGroupById(raw_receiver)
yield (sender, receiver, message)
else:
print "[*] %s" % OT._VALUES_TO_NAMES[operation.type]
print operation
self.revision = max(operation.revision, self.revision)
def createContactOrRoomOrGroupByMessage(self, message):
if message.toType == ToType.USER:
pass
elif message.toType == ToType.ROOM:
pass
elif message.toType == ToType.GROUP:
pass
def getLineMessageFromMessage(self, messages=[]):
"""Change Message objects to LineMessage objects
:param messges: list of Message object
"""
lineMessages = []
for message in messages:
lineMessages.append(LineMessage(self, message))
return lineMessages
def _check_auth(self):
"""Check if client is logged in or not"""
if self.authToken:
return True
else:
msg = "you need to login"
self.raise_error(msg)
| |
#!/usr/bin/env python
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Coal Mine CLI
"""
import argparse
from configparser import SafeConfigParser
import copy
import os
import pprint
import re
import requests
try:
from simplejson.errors import JSONDecodeError as JSONError
except ImportError:
JSONError = ValueError
import sys
config_file = '~/.coal-mine.ini'
config_section = 'coal-mine'
def parse_args(args, config_file):
config = SafeConfigParser()
config.read([config_file])
try:
section = config[config_section]
except KeyError:
config['coal-mine'] = {}
section = config['coal-mine']
connect_parser = argparse.ArgumentParser(add_help=False)
host_default = section.get('host', 'localhost')
connect_parser.add_argument('--host', action='store',
help="Server host name or URL (default {})".
format(host_default), default=host_default)
port_default = section.get('port', None)
connect_parser.add_argument('--port', action='store', type=int,
help='Server port', default=port_default)
auth_key_group = connect_parser.add_mutually_exclusive_group()
auth_key_default = section.get('auth-key', None)
auth_key_group.add_argument('--auth-key', action='store',
help='Authentication key (default {})'.format(
'<hidden>' if auth_key_default else None),
default=auth_key_default)
auth_key_group.add_argument('--no-auth-key', action='store_true',
help='Disable authentication',
default=False)
parser = argparse.ArgumentParser(description="CLI wrapper for Coal "
"Mine's HTTP API")
subparsers = parser.add_subparsers()
configure_parser = subparsers.add_parser('configure', help='Save '
'configuration from command line '
'to ' + config_file,
parents=[connect_parser])
configure_parser.set_defaults(func=handle_configure,
config_parser=config,
config_file=config_file)
create_parser = subparsers.add_parser('create', help='Create canary',
parents=[connect_parser])
create_parser.add_argument('--name', action='store', required=True)
create_parser.add_argument('--periodicity', action='store',
type=periodicity, required=True)
create_parser.add_argument('--description', action='store')
create_parser.add_argument('--email', action='append')
create_parser.add_argument('--paused', action='store_true', default=False)
create_parser.set_defaults(func=handle_create)
id_parser = argparse.ArgumentParser(add_help=False)
id_parser_group = id_parser.add_mutually_exclusive_group(required=True)
id_parser_group.add_argument('--name', action='store')
id_parser_group.add_argument('--slug', action='store')
id_parser_group.add_argument('--id', action='store')
delete_parser = subparsers.add_parser('delete', help='Delete canary',
parents=[connect_parser, id_parser])
delete_parser.set_defaults(func=handle_delete)
update_parser = subparsers.add_parser('update', help='Update canary',
parents=[connect_parser])
update_parser.add_argument('--name', action='store')
update_parser_group = update_parser.add_mutually_exclusive_group()
update_parser_group.add_argument('--slug', action='store')
update_parser_group.add_argument('--id', action='store')
update_parser.add_argument('--no-history', '--terse', action='store_true',
help='Omit history in output')
update_parser.add_argument('--periodicity', action='store',
type=periodicity)
update_parser.add_argument('--description', action='store')
update_parser.add_argument('--email', action='append', help='Specify "-" '
'to clear existing email(s)')
update_parser.set_defaults(func=handle_update)
get_parser = subparsers.add_parser('get', help='Get canary',
parents=[connect_parser, id_parser])
get_parser.add_argument('--no-history', '--terse', action='store_true',
help='Omit history in output')
get_parser.set_defaults(func=handle_get)
list_parser = subparsers.add_parser('list', help='List canaries',
parents=[connect_parser])
list_parser.add_argument('--verbose', action='store_true', default=None)
list_parser.add_argument('--no-history', '--terse', action='store_true',
help='Omit history in output')
paused_group = list_parser.add_mutually_exclusive_group()
paused_group.add_argument('--paused', action='store_true', default=None)
paused_group.add_argument('--no-paused', dest='paused',
action='store_false', default=None)
late_group = list_parser.add_mutually_exclusive_group()
late_group.add_argument('--late', action='store_true', default=None)
late_group.add_argument('--no-late', dest='late',
action='store_false', default=None)
list_parser.add_argument('--search', action='store', default=None,
help='Regular expression to match against name, '
'slug, identifier, and email addresses')
list_parser.set_defaults(func=handle_list)
trigger_parser = subparsers.add_parser('trigger', help='Trigger canary',
parents=[connect_parser, id_parser])
trigger_parser.add_argument('--comment', action='store')
trigger_parser.set_defaults(func=handle_trigger)
pause_parser = subparsers.add_parser('pause', help='Pause canary',
parents=[connect_parser, id_parser])
pause_parser.add_argument('--no-history', '--terse', action='store_true',
help='Omit history in output')
pause_parser.add_argument('--comment', action='store')
pause_parser.set_defaults(func=handle_pause)
unpause_parser = subparsers.add_parser('unpause', help='Unpause canary',
parents=[connect_parser, id_parser])
unpause_parser.add_argument('--no-history', '--terse', action='store_true',
help='Omit history in output')
unpause_parser.add_argument('--comment', action='store')
unpause_parser.set_defaults(func=handle_unpause)
args = parser.parse_args(args)
if 'func' not in args:
parser.error("No command specified")
url = ''
if not re.match(r'^https?:', args.host):
url += 'http://'
url += args.host
if args.port:
url += ':{}'.format(args.port)
url += '/coal-mine/v1/canary/'
args.url = url
if args.no_auth_key:
args.auth_key = None
if args.func is not handle_configure:
del args.no_auth_key
return args
def doit(args, config_file):
args = parse_args(args, config_file)
args.func(args)
def handle_configure(args):
section = args.config_parser[config_section]
section['host'] = args.host
if args.port:
section['port'] = str(args.port)
if args.auth_key:
section['auth-key'] = args.auth_key
elif args.no_auth_key:
section.pop('auth-key', None)
with open(args.config_file, 'w') as configfile:
args.config_parser.write(configfile)
def handle_create(args):
call('create', args)
def handle_delete(args):
call('delete', args)
def get_no_history_filter(d):
if 'canary' in d:
d = copy.deepcopy(d)
del d['canary']['history']
return d
if 'canaries' in d: # pragma: no cover (should always be true)
d = copy.deepcopy(d)
for canary in d['canaries']:
del canary['history']
return d
return d # pragma: no cover
def handle_update(args):
if not (args.name or args.id or args.slug):
sys.exit('Must specify --name, --id, or --slug')
if args.name and not (args.id or args.slug):
found = call('get', args, {'name': args.name}, action='return')
del args.name
args.id = found['canary']['id']
if vars(args).pop('no_history', None):
filter = get_no_history_filter
else:
filter = None
call('update', args, filter=filter)
def handle_get(args):
if vars(args).pop('no_history', None):
filter = get_no_history_filter
else:
filter = None
call('get', args, filter=filter)
def handle_list(args):
if args.paused is None:
del args.paused
if args.late is None:
del args.late
if args.search is None:
del args.search
if vars(args).pop('no_history', None):
filter = get_no_history_filter
else:
filter = None
call('list', args, filter=filter)
def handle_trigger(args):
del args.auth_key
call('trigger', args)
def handle_pause(args):
if vars(args).pop('no_history', None):
filter = get_no_history_filter
else:
filter = None
call('pause', args, filter=filter)
def handle_unpause(args):
if vars(args).pop('no_history', None):
filter = get_no_history_filter
else:
filter = None
call('unpause', args, filter=filter)
def call(command, args, payload=None, action='print', filter=None):
url = args.url + command
if payload:
if args.auth_key:
payload['auth_key'] = args.auth_key
else:
payload = {key: (getattr(args, key) if key == 'email'
else str(getattr(args, key)))
for key in dir(args)
if key not in ('host', 'port', 'func', 'url') and
not key.startswith('_') and
getattr(args, key) is not None and
not (key == 'email' and getattr(args, key) == [])}
response = requests.get(url, params=payload)
if response.status_code != 200:
sys.stderr.write('{} {}\n'.format(
response.status_code, response.reason))
try:
sys.exit(pprint.pformat(response.json()).strip())
except JSONError:
sys.exit(response.text)
if action == 'print':
try:
content = response.json()
if filter:
content = filter(content)
pprint.pprint(content)
except BrokenPipeError: # pragma: no cover
pass
elif action == 'return':
return response.json()
else: # pragma: no cover
raise Exception('Unrecognized action: {}'.format(action))
def periodicity(str):
if re.match(r'[0-9.]+$', str):
return float(str)
return str
def main(): # pragma: no cover
doit(sys.argv[1:], os.path.expanduser(config_file))
if __name__ == '__main__': # pragma: no cover
main()
| |
from __future__ import division, absolute_import, print_function
import pytest
import warnings
import sys
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
assert_raises_regex, assert_warns, assert_almost_equal
)
from numpy.compat import long
class TestAtleast1d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(long(3)).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack(object):
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
hstack((np.arange(3) for _ in range(2)))
if sys.version_info.major > 2:
# map returns a list on Python 2
with assert_warns(FutureWarning):
hstack(map(lambda x: x, np.ones((3, 2))))
class TestVstack(object):
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
vstack((np.arange(3) for _ in range(2)))
class TestConcatenate(object):
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None)
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
out = np.zeros(a.size + len(b))
r = np.concatenate((a, b), axis=None)
rout = np.concatenate((a, b), axis=None, out=out)
assert_(out is rout)
assert_equal(r, rout)
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
# This should probably be deprecated:
r = np.concatenate(x, 100) # axis is >= MAXDIMS
assert_array_equal(x, r)
def test_concatenate(self):
# Test concatenate function
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
out = res.copy()
rout = concatenate((a0, a1, a2), 2, out=out)
assert_(out is rout)
assert_equal(res, rout)
def test_bad_out_shape(self):
a = array([1, 2])
b = array([3, 4])
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
def test_out_dtype(self):
out = np.empty(4, np.float32)
res = concatenate((array([1, 2]), array([3, 4])), out=out)
assert_(out is res)
out = np.empty(4, np.complex64)
res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
assert_(out is res)
# invalid cast
out = np.empty(4, np.int32)
assert_raises(TypeError, concatenate,
(array([0.1, 0.2]), array([0.3, 0.4])), out=out)
def test_stack():
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
# generator is deprecated
with assert_warns(FutureWarning):
result = stack((x for x in range(3)))
assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock(object):
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
# the algorithm is triggered depending on the number of element
# copies required.
# We define a test fixture that forces most tests to go through
# both code paths.
# Ultimately, this should be removed if a single algorithm is found
# to be faster for both small and large arrays.
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == 'force_concatenate':
return _block_force_concatenate
elif request.param == 'force_slicing':
return _block_force_slicing
elif request.param == 'block':
return block
else:
raise ValueError('Unknown blocking request. There is a typo in the tests.')
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([
[
block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [[np.ones((2,3)), np.ones((2,2))],
[np.ones((2,2)), np.ones((2,2))]]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
def test_empty_lists(self, block):
assert_raises_regex(ValueError, 'empty', block, [])
assert_raises_regex(ValueError, 'empty', block, [[]])
assert_raises_regex(ValueError, 'empty', block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
# 3D
arr_c = np.zeros((3,)*3, order='C')
arr_f = np.zeros((3,)*3, order='F')
b_c = [[[arr_c, arr_c],
[arr_c, arr_c]],
[[arr_c, arr_c],
[arr_c, arr_c]]]
b_f = [[[arr_f, arr_f],
[arr_f, arr_f]],
[[arr_f, arr_f],
[arr_f, arr_f]]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
arr_c = np.zeros((3, 3), order='C')
arr_f = np.zeros((3, 3), order='F')
# 2D
b_c = [[arr_c, arr_c],
[arr_c, arr_c]]
b_f = [[arr_f, arr_f],
[arr_f, arr_f]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
def test_block_dispatcher():
class ArrayLike(object):
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
# don't recurse into non-lists
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
| |
"""
The :class:`.CDNClient` class provides a simple API for downloading Steam content from SteamPipe
Initializing :class:`.CDNClient` requires a logged in :class:`.SteamClient` instance
.. warning::
This module uses :mod:`requests` library, which is not gevent cooperative by default.
It is high recommended that you use :meth:`steam.monkey.patch_minimal()`.
See example below
.. code:: python
import steam.monkey
steam.monkey.patch_minimal()
from steam.client import SteamClient, EMsg
from steam.client.cdn import CDNClient
mysteam = SteamClient()
mysteam.cli_login()
...
mycdn = CDNClient(mysteam)
Getting depot manifests for an app
.. code:: python
>>> mycdn.get_manifests(570)
[<CDNDepotManifest('Dota 2 Content', app_id=570, depot_id=373301, gid=6397590570861788404, creation_time='2019-06-29 16:03:11')>,
<CDNDepotManifest('Dota 2 Content 2', app_id=570, depot_id=381451, gid=5769691971272474272, creation_time='2019-06-29 00:19:02')>,
<CDNDepotManifest('Dota 2 Content 3', app_id=570, depot_id=381452, gid=3194393866044592918, creation_time='2019-06-27 00:05:38')>,
<CDNDepotManifest('Dota 2 Content 4', app_id=570, depot_id=381453, gid=8005824150061180163, creation_time='2019-06-08 07:49:57')>,
<CDNDepotManifest('Dota 2 Content 5', app_id=570, depot_id=381454, gid=9003299908441378336, creation_time='2019-06-26 18:56:19')>,
<CDNDepotManifest('Dota 2 Content 6', app_id=570, depot_id=381455, gid=8000458746487720619, creation_time='2019-06-29 00:19:43')>,
<CDNDepotManifest('Dota 2 Win32', app_id=570, depot_id=373302, gid=3561463682334619841, creation_time='2019-06-29 00:16:28')>,
<CDNDepotManifest('Dota 2 Win64', app_id=570, depot_id=373303, gid=6464064782313084040, creation_time='2019-06-29 00:16:43')>,
<CDNDepotManifest('Dota 2 Mac', app_id=570, depot_id=373304, gid=5979018571482579541, creation_time='2019-06-29 00:16:59')>,
<CDNDepotManifest('Dota 2 English', app_id=570, depot_id=373305, gid=4435851250675935801, creation_time='2015-06-01 20:15:37')>,
<CDNDepotManifest('Dota 2 Linux', app_id=570, depot_id=373306, gid=4859464855297921815, creation_time='2019-06-29 00:17:25')>,
<CDNDepotManifest('Dota 2 Korean', app_id=570, depot_id=373308, gid=8598853793233320583, creation_time='2019-03-05 17:16:49')>,
<CDNDepotManifest('Dota 2 Simplified Chinese', app_id=570, depot_id=373309, gid=6975893321745168138, creation_time='2019-06-25 21:40:37')>,
<CDNDepotManifest('Dota 2 Russian', app_id=570, depot_id=381456, gid=5425063725991897591, creation_time='2019-03-05 17:19:53')>,
<CDNDepotManifest('Dota 2 Workshop tools', app_id=570, depot_id=381450, gid=8629205096668418087, creation_time='2019-06-29 16:04:18')>,
<CDNDepotManifest('Dota 2 OpenGL Windows', app_id=570, depot_id=401531, gid=6502316736107281444, creation_time='2019-06-07 19:04:08')>,
<CDNDepotManifest('Dota 2 Vulkan Common', app_id=570, depot_id=401535, gid=6405492872419215600, creation_time='2019-06-07 19:04:11')>,
<CDNDepotManifest('Dota 2 Vulkan Win64', app_id=570, depot_id=401536, gid=3821288251412129608, creation_time='2019-06-25 21:42:29')>,
<CDNDepotManifest('Dota 2 Vulkan Linux64', app_id=570, depot_id=401537, gid=3144805829218032316, creation_time='2019-06-17 16:54:43')>,
<CDNDepotManifest('Dota 2 VR', app_id=570, depot_id=313255, gid=706332602567268673, creation_time='2017-10-04 18:52:14')>,
<CDNDepotManifest('Dota 2 Vulkan Mac', app_id=570, depot_id=401538, gid=2223235822414824351, creation_time='2019-06-11 19:37:19')>]
>>> mycdn.get_manifests(570, filter_func=lambda depot_id, info: 'Dota 2 Content' in info['name'])
[<CDNDepotManifest('Dota 2 Content', app_id=570, depot_id=373301, gid=6397590570861788404, creation_time='2019-06-29 16:03:11')>,
<CDNDepotManifest('Dota 2 Content 2', app_id=570, depot_id=381451, gid=5769691971272474272, creation_time='2019-06-29 00:19:02')>,
<CDNDepotManifest('Dota 2 Content 3', app_id=570, depot_id=381452, gid=3194393866044592918, creation_time='2019-06-27 00:05:38')>,
<CDNDepotManifest('Dota 2 Content 4', app_id=570, depot_id=381453, gid=8005824150061180163, creation_time='2019-06-08 07:49:57')>,
<CDNDepotManifest('Dota 2 Content 5', app_id=570, depot_id=381454, gid=9003299908441378336, creation_time='2019-06-26 18:56:19')>,
<CDNDepotManifest('Dota 2 Content 6', app_id=570, depot_id=381455, gid=8000458746487720619, creation_time='2019-06-29 00:19:43')>]
Listing files
.. code:: python
>>> file_list = mycdn.iter_files(570)
>>> list(file_list)[:10]
[<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\dungeon\\particles\\test_particle\\generic_attack_crit_blur_rope.vpcf_c', 2134)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\dungeon\\materials\\blends\\mud_brick_normal_psd_5cc4fe8b.vtex_c', 351444)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\hero_demo\\scripts\\vscripts\\la_spawn_enemy_at_target.lua', 1230)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\winter_2018\\particles\\dark_moon\\darkmoon_last_hit_effect_damage_flash_b.vpcf_c', 1386)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\dungeon\\scripts\\vscripts\\abilities\\siltbreaker_line_wave.lua', 3305)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\dungeon\\materials\\models\\heroes\\broodmother\\broodmother_body_poison.vmat_c', 10888)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota\\resource\\cursor\\workshop\\sltv_shaker_cursor_pack\\cursor_spell_default.ani', 4362)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\overthrow\\panorama\\images\\custom_game\\team_icons\\team_icon_tiger_01_png.vtex_c', 18340)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota\\resource\\cursor\\valve\\ti7\\cursor_attack_illegal.bmp', 4152)>,
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota_addons\\winter_2018\\models\\creeps\\ice_biome\\undeadtusk\\undead_tuskskeleton01.vmdl_c', 13516)>
Reading a file directly from SteamPipe
.. code:: python
>>> file_list = mycdn.iter_files(570, r'game\dota\gameinfo.gi')
>>> myfile = next(file_list)
<CDNDepotFile(570, 373301, 6397590570861788404, 'game\\dota\\gameinfo.gi', 6808)>
>>> print(myfile.read(80).decode('utf-8'))
"GameInfo"
{
game "Dota 2"
title "Dota 2"
gamelogo 1
type multiplayer_only
...
"""
from zipfile import ZipFile
from io import BytesIO
from collections import OrderedDict, deque
from six import itervalues, iteritems
from binascii import crc32, unhexlify
from datetime import datetime
import logging
import struct
import vdf
from gevent.pool import Pool as GPool
from cachetools import LRUCache
from steam import webapi
from steam.exceptions import SteamError
from steam.core.msg import MsgProto
from steam.enums import EResult, EType
from steam.enums.emsg import EMsg
from steam.utils.web import make_requests_session
from steam.core.crypto import symmetric_decrypt, symmetric_decrypt_ecb
from steam.core.manifest import DepotManifest, DepotFile
from steam.protobufs.content_manifest_pb2 import ContentManifestPayload
try:
import lzma
except ImportError:
from backports import lzma
def decrypt_manifest_gid_2(encrypted_gid, password):
"""Decrypt manifest gid v2 bytes
:param encrypted_gid: encrypted gid v2 bytes
:type encrypted_gid: bytes
:param password: encryption password
:type password: byt
:return: manifest gid
:rtype: int
"""
return struct.unpack('<Q', symmetric_decrypt_ecb(encrypted_gid, password))[0]
def get_content_servers_from_cs(cell_id, host='cs.steamcontent.com', port=80, num_servers=20, session=None):
"""Get a list of CS servers from a single CS server
:param cell_id: location cell id
:type cell_id: bytes
:param host: CS server host
:type host: str
:param port: server port number
:type port: int
:param num_servers: number of servers to return
:type num_servers: int
:param session: requests Session instance
:type session: :class:`requests.Session`
:return: list of CS servers
:rtype: :class:`list` [:class:`.ContentServer`]
"""
proto = 'https' if port == 443 else 'http'
url = '%s://%s:%s/serverlist/%s/%s/' % (proto, host, port, cell_id, num_servers)
session = make_requests_session() if session is None else session
resp = session.get(url)
if resp.status_code != 200:
return []
kv = vdf.loads(resp.text, mapper=OrderedDict)
if kv.get('deferred') == '1':
return []
servers = []
for entry in itervalues(kv['serverlist']):
server = ContentServer()
server.type = entry['type']
server.https = True if entry['https_support'] == 'mandatory' else False
server.host = entry['Host']
server.vhost = entry['vhost']
server.port = 443 if server.https else 80
server.cell_id = entry['cell']
server.load = entry['load']
server.weighted_load = entry['weightedload']
servers.append(server)
return servers
def get_content_servers_from_webapi(cell_id, num_servers=20):
"""Get a list of CS servers from Steam WebAPI
:param cell_id: location cell id
:type cell_id: bytes
:param num_servers: number of servers to return
:type num_servers: int
:return: list of CS servers
:rtype: :class:`list` [:class:`.ContentServer`]
"""
params = {'cell_id': cell_id, 'max_servers': num_servers}
resp = webapi.get('IContentServerDirectoryService', 'GetServersForSteamPipe', params=params)
servers = []
for entry in resp['response']['servers']:
server = ContentServer()
server.type = entry['type']
server.https = True if entry['https_support'] == 'mandatory' else False
server.host = entry['host']
server.vhost = entry['vhost']
server.port = 443 if server.https else 80
server.cell_id = entry.get('cell_id', 0)
server.load = entry['load']
server.weighted_load = entry['weighted_load']
servers.append(server)
return servers
class ContentServer(object):
https = False
host = None
vhost = None
port = None
type = None
cell_id = 0
load = None
weighted_load = None
def __repr__(self):
return "<%s('%s://%s:%s', type=%s, cell_id=%s)>" % (
self.__class__.__name__,
'https' if self.https else 'http',
self.host,
self.port,
repr(self.type),
repr(self.cell_id),
)
class CDNDepotFile(DepotFile):
def __init__(self, manifest, file_mapping):
"""File-like object proxy for content files located on SteamPipe
:param manifest: parrent manifest instance
:type manifest: :class:`.CDNDepotManifest`
:param file_mapping: file mapping instance from manifest
:type file_mapping: ContentManifestPayload.FileMapping
"""
if not isinstance(manifest, CDNDepotManifest):
raise TypeError("Expected 'manifest' to be of type CDNDepotFile")
if not isinstance(file_mapping, ContentManifestPayload.FileMapping):
raise TypeError("Expected 'file_mapping' to be of type ContentManifestPayload.FileMapping")
DepotFile.__init__(self, manifest, file_mapping)
self.offset = 0
self._lc = None
self._lcbuff = b''
def __repr__(self):
return "<%s(%s, %s, %s, %s, %s)>" % (
self.__class__.__name__,
self.manifest.app_id,
self.manifest.depot_id,
self.manifest.gid,
repr(self.filename_raw),
'is_directory=True' if self.is_directory else self.size,
)
@property
def seekable(self):
""":type: bool"""
return self.is_file
def tell(self):
""":type: int"""
if not self.seekable:
raise ValueError("This file is not seekable, probably because its directory or symlink")
return self.offset
def seek(self, offset, whence=0):
"""Seen file
:param offset: file offset
:type offset: int
:param whence: offset mode, see :meth:`io.IOBase.seek`
:type whence: int
"""
if not self.seekable:
raise ValueError("This file is not seekable, probably because its directory or symlink")
if whence == 0:
if offset < 0:
raise IOError("Invalid argument")
elif whence == 1:
offset = self.offset + offset
elif whence == 2:
offset = self.size + offset
else:
raise ValueError("Invalid value for whence")
self.offset = max(0, min(self.size, offset))
def _get_chunk(self, chunk):
if not self._lc or self._lc.sha != chunk.sha:
self._lcbuff = self.manifest.cdn_client.get_chunk(
self.manifest.app_id,
self.manifest.depot_id,
chunk.sha.hex(),
)
self._lc = chunk
return self._lcbuff
def __iter__(self):
return self
def __next__(self):
return self.next()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def next(self):
line = self.readline()
if line == b'':
raise StopIteration
return line
def read(self, length=-1):
"""Read bytes from the file
:param length: number of bytes to read. Read the whole file if not set
:type length: int
:returns: file data
:rtype: bytes
"""
if length == -1:
length = self.size - self.offset
if length == 0 or self.offset >= self.size or self.size == 0:
return b''
end_offset = self.offset + length
# we cache last chunk to allow small length reads and local seek
if (self._lc
and self.offset >= self._lc.offset
and end_offset <= self._lc.offset + self._lc.cb_original):
data = self._lcbuff[self.offset - self._lc.offset:self.offset - self._lc.offset + length]
# if we need to read outside the bounds of the cached chunk
# we go to loop over chunks to determine which to download
else:
data = BytesIO()
start_offset = None
# Manifest orders the chunks by offset in ascending order
for chunk in self.chunks:
if chunk.offset >= end_offset:
break
chunk_start = chunk.offset
chunk_end = chunk_start + chunk.cb_original
if ( chunk_start <= self.offset < chunk_end
or chunk_start < end_offset <= chunk_end):
if start_offset is None:
start_offset = chunk.offset
data.write(self._get_chunk(chunk))
data.seek(self.offset - start_offset)
data = data.read(length)
self.offset = min(self.size, end_offset)
return data
def readline(self):
"""Read a single line
:return: single file line
:rtype: bytes
"""
buf = b''
for chunk in iter(lambda: self.read(256), b''):
pos = chunk.find(b'\n')
if pos > -1:
pos += 1 # include \n
buf += chunk[:pos]
self.seek(self.offset - (len(chunk) - pos))
break
buf += chunk
return buf
def readlines(self):
"""Get file contents as list of lines
:return: list of lines
:rtype: :class:`list` [:class:`bytes`]
"""
return [line for line in self]
class CDNDepotManifest(DepotManifest):
DepotFileClass = CDNDepotFile
name = None #: set only by :meth:`CDNClient.get_manifests`
def __init__(self, cdn_client, app_id, data):
"""Holds manifest metadata and file list.
:param cdn_client: CDNClient instance
:type cdn_client: :class:`.CDNClient`
:param app_id: App ID
:type app_id: int
:param data: serialized manifest data
:type data: bytes
"""
self.cdn_client = cdn_client
self.app_id = app_id
DepotManifest.__init__(self, data)
def __repr__(self):
params = ', '.join([
"app_id=" + str(self.app_id),
"depot_id=" + str(self.depot_id),
"gid=" + str(self.gid),
"creation_time=" + repr(
datetime.utcfromtimestamp(self.metadata.creation_time).isoformat().replace('T', ' ')
),
])
if self.name:
params = repr(self.name) + ', ' + params
if self.filenames_encrypted:
params += ', filenames_encrypted=True'
return "<%s(%s)>" % (
self.__class__.__name__,
params,
)
def deserialize(self, data):
DepotManifest.deserialize(self, data)
# order chunks in ascending order by their offset
# required for CDNDepotFile
for mapping in self.payload.mappings:
mapping.chunks.sort(key=lambda x: x.offset, reverse=False)
class CDNClient(object):
DepotManifestClass = CDNDepotManifest
_LOG = logging.getLogger("CDNClient")
servers = deque() #: CS Server list
_chunk_cache = LRUCache(20)
cell_id = 0 #: Cell ID to use, initialized from SteamClient instance
def __init__(self, client):
"""CDNClient allows loading and reading of manifests for Steam apps are used
to list and download content
:param client: logged in SteamClient instance
:type client: :class:`.SteamClient`
"""
self.gpool = GPool(8) #: task pool
self.steam = client #: SteamClient instance
if self.steam:
self.cell_id = self.steam.cell_id
self.web = make_requests_session()
self.depot_keys = {} #: depot decryption keys
self.manifests = {} #: CDNDepotManifest instances
self.app_depots = {} #: app depot info
self.beta_passwords = {} #: beta branch decryption keys
self.licensed_app_ids = set() #: app_ids that the SteamClient instance has access to
self.licensed_depot_ids = set() #: depot_ids that the SteamClient instance has access to
if not self.servers:
self.fetch_content_servers()
self.load_licenses()
def clear_cache(self):
"""Cleared cached information. Next call on methods with caching will return fresh data"""
self.manifests.clear()
self.app_depots.clear()
self.beta_passwords.clear()
def load_licenses(self):
"""Read licenses from SteamClient instance, required for determining accessible content"""
self.licensed_app_ids.clear()
self.licensed_depot_ids.clear()
if self.steam.steam_id.type == EType.AnonUser:
packages = [17906]
else:
if not self.steam.licenses:
self._LOG.debug("No steam licenses found on SteamClient instance")
return
packages = list(map(lambda l: {'packageid': l.package_id, 'access_token': l.access_token},
itervalues(self.steam.licenses)))
for package_id, info in iteritems(self.steam.get_product_info(packages=packages)['packages']):
self.licensed_app_ids.update(info['appids'].values())
self.licensed_depot_ids.update(info['depotids'].values())
def fetch_content_servers(self, num_servers=20):
"""Update CS server list
:param num_servers: numbers of CS server to fetch
:type num_servers: int
"""
self.servers.clear()
self._LOG.debug("Trying to fetch content servers from Steam API")
servers = get_content_servers_from_webapi(self.cell_id)
servers = filter(lambda server: server.type != 'OpenCache', servers) # see #264
self.servers.extend(servers)
if not self.servers:
raise SteamError("Failed to fetch content servers")
def get_content_server(self, rotate=False):
"""Get a CS server for content download
:param rotate: forcefully rotate server list and get a new server
:type rotate: bool
"""
if rotate:
self.servers.rotate(-1)
return self.servers[0]
def get_depot_key(self, app_id, depot_id):
"""Get depot key, which is needed to decrypt files
:param app_id: app id
:type app_id: int
:param depot_id: depot id
:type depot_id: int
:return: returns decryption key
:rtype: bytes
:raises SteamError: error message
"""
if depot_id not in self.depot_keys:
msg = self.steam.get_depot_key(app_id, depot_id)
if msg and msg.eresult == EResult.OK:
self.depot_keys[depot_id] = msg.depot_encryption_key
else:
raise SteamError("Failed getting depot key",
EResult.Timeout if msg is None else EResult(msg.eresult))
return self.depot_keys[depot_id]
def cdn_cmd(self, command, args):
"""Run CDN command request
:param command: command name
:type command: str
:param args: args
:type args: str
:returns: requests response
:rtype: :class:`requests.Response`
:raises SteamError: on error
"""
server = self.get_content_server()
while True:
url = "%s://%s:%s/%s/%s" % (
'https' if server.https else 'http',
server.host,
server.port,
command,
args,
)
try:
resp = self.web.get(url, timeout=10)
except Exception as exp:
self._LOG.debug("Request error: %s", exp)
else:
if resp.ok:
return resp
elif 400 <= resp.status_code < 500:
self._LOG.debug("Got HTTP %s", resp.status_code)
raise SteamError("HTTP Error %s" % resp.status_code)
self.steam.sleep(0.5)
server = self.get_content_server(rotate=True)
def get_chunk(self, app_id, depot_id, chunk_id):
"""Download a single content chunk
:param app_id: App ID
:type app_id: int
:param depot_id: Depot ID
:type depot_id: int
:param chunk_id: Chunk ID
:type chunk_id: int
:returns: chunk data
:rtype: bytes
:raises SteamError: error message
"""
if (depot_id, chunk_id) not in self._chunk_cache:
resp = self.cdn_cmd('depot', '%s/chunk/%s' % (depot_id, chunk_id))
data = symmetric_decrypt(resp.content, self.get_depot_key(app_id, depot_id))
if data[:2] == b'VZ':
if data[-2:] != b'zv':
raise SteamError("VZ: Invalid footer: %s" % repr(data[-2:]))
if data[2:3] != b'a':
raise SteamError("VZ: Invalid version: %s" % repr(data[2:3]))
vzfilter = lzma._decode_filter_properties(lzma.FILTER_LZMA1, data[7:12])
vzdec = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[vzfilter])
checksum, decompressed_size = struct.unpack('<II', data[-10:-2])
# decompress_size is needed since lzma will sometime produce longer output
# [12:-9] is need as sometimes lzma will produce shorter output
# together they get us the right data
data = vzdec.decompress(data[12:-9])[:decompressed_size]
if crc32(data) != checksum:
raise SteamError("VZ: CRC32 checksum doesn't match for decompressed data")
else:
with ZipFile(BytesIO(data)) as zf:
data = zf.read(zf.filelist[0])
self._chunk_cache[(depot_id, chunk_id)] = data
return self._chunk_cache[(depot_id, chunk_id)]
def get_manifest(self, app_id, depot_id, manifest_gid, decrypt=True):
"""Download a manifest file
:param app_id: App ID
:type app_id: int
:param depot_id: Depot ID
:type depot_id: int
:param manifest_gid: Manifest gid
:type manifest_gid: int
:param decrypt: Decrypt manifest filenames
:type decrypt: bool
:returns: manifest instance
:rtype: :class:`.CDNDepotManifest`
"""
if (app_id, depot_id, manifest_gid) not in self.manifests:
resp = self.cdn_cmd('depot', '%s/manifest/%s/5' % (depot_id, manifest_gid))
if resp.ok:
manifest = self.DepotManifestClass(self, app_id, resp.content)
if decrypt:
manifest.decrypt_filenames(self.get_depot_key(app_id, depot_id))
self.manifests[(app_id, depot_id, manifest_gid)] = manifest
return self.manifests[(app_id, depot_id, manifest_gid)]
def check_beta_password(self, app_id, password):
"""Check branch beta password to unlock encrypted branches
:param app_id: App ID
:type app_id: int
:param password: beta password
:type password: str
:returns: result
:rtype: :class:`.EResult`
"""
resp = self.steam.send_job_and_wait(MsgProto(EMsg.ClientCheckAppBetaPassword),
{'app_id': app_id, 'betapassword': password})
if resp.eresult == EResult.OK:
self._LOG.debug("Unlocked following beta branches: %s",
', '.join(map(lambda x: x.betaname.lower(), resp.betapasswords)))
for entry in resp.betapasswords:
self.beta_passwords[(app_id, entry.betaname.lower())] = unhexlify(entry.betapassword)
else:
self._LOG.debug("App beta password check failed. %r" % EResult(resp.eresult))
return EResult(resp.eresult)
def get_app_depot_info(self, app_id):
if app_id not in self.app_depots:
self.app_depots[app_id] = self.steam.get_product_info([app_id])['apps'][app_id]['depots']
return self.app_depots[app_id]
def get_manifests(self, app_id, branch='public', password=None, filter_func=None, decrypt=True):
"""Get a list of CDNDepotManifest for app
:param app_id: App ID
:type app_id: int
:param branch: branch name
:type branch: str
:param password: branch password for locked branches
:type password: str
:param filter_func:
Function to filter depots. ``func(depot_id, depot_info)``
:returns: list of :class:`.CDNDepotManifest`
:rtype: :class:`list` [:class:`.CDNDepotManifest`]
:raises SteamError: error message
"""
depots = self.get_app_depot_info(app_id)
is_enc_branch = False
if branch not in depots.get('branches', {}):
raise SteamError("No branch named %s for app_id %s" % (repr(branch), app_id))
elif int(depots['branches'][branch].get('pwdrequired', 0)) > 0:
is_enc_branch = True
if (app_id, branch) not in self.beta_passwords:
if not password:
raise SteamError("Branch %r requires a password" % branch)
result = self.check_beta_password(app_id, password)
if result != EResult.OK:
raise SteamError("Branch password is not valid. %r" % result)
if (app_id, branch) not in self.beta_passwords:
raise SteamError("Incorrect password for branch %r" % branch)
def async_fetch_manifest(app_id, depot_id, manifest_gid, decrypt, name):
manifest = self.get_manifest(app_id, depot_id, manifest_gid, decrypt)
manifest.name = name
return manifest
tasks = []
shared_depots = {}
for depot_id, depot_info in iteritems(depots):
if not depot_id.isdigit():
continue
depot_id = int(depot_id)
# if filter_func set, use it to filter the list the depots
if filter_func and not filter_func(depot_id, depot_info):
continue
# if we have no license for the depot, no point trying as we won't get depot_key
if (decrypt
and depot_id not in self.licensed_depot_ids
and depot_id not in self.licensed_app_ids):
self._LOG.debug("No license for depot %s (%s). Skipping...",
repr(depot_info['name']),
depot_id,
)
continue
# accumulate the shared depots
if 'depotfromapp' in depot_info:
shared_depots.setdefault(int(depot_info['depotfromapp']), set()).add(depot_id)
continue
# process depot, and get manifest for branch
if is_enc_branch:
egid = depot_info.get('encryptedmanifests', {}).get(branch, {}).get('encrypted_gid_2')
if egid is not None:
manifest_gid = decrypt_manifest_gid_2(unhexlify(egid),
self.beta_passwords[(app_id, branch)])
else:
manifest_gid = depot_info.get('manifests', {}).get('public')
else:
manifest_gid = depot_info.get('manifests', {}).get(branch)
if manifest_gid is not None:
tasks.append(self.gpool.spawn(async_fetch_manifest,
app_id,
depot_id,
manifest_gid,
decrypt,
depot_info['name'],
))
# collect results
manifests = []
for task in tasks:
manifests.append(task.get())
# try:
# result = task.get()
# except SteamError as exp:
# self._LOG.error("Error: %s", exp)
# raise
# else:
# if isinstance(result, list):
# manifests.extend(result)
# else:
# manifests.append(result)
# load shared depot manifests
for app_id, depot_ids in iteritems(shared_depots):
def nested_ffunc(depot_id, depot_info, depot_ids=depot_ids, ffunc=filter_func):
return (int(depot_id) in depot_ids
and (ffunc is None or ffunc(depot_id, depot_info)))
manifests += self.get_manifests(app_id, filter_func=nested_ffunc)
return manifests
def iter_files(self, app_id, filename_filter=None, branch='public', password=None, filter_func=None):
"""Like :meth:`.get_manifests` but returns a iterator that goes through all the files
in all the manifest.
:param app_id: App ID
:type app_id: int
:param filename_filter: wildcard filter for file paths
:type branch: str
:param branch: branch name
:type branch: str
:param password: branch password for locked branches
:type password: str
:param filter_func:
Function to filter depots. ``func(depot_id, depot_info)``
:returns: generator of of CDN files
:rtype: [:class:`.CDNDepotFile`]
"""
for manifest in self.get_manifests(app_id, branch, password, filter_func):
for fp in manifest.iter_files(filename_filter):
yield fp
def get_manifest_for_workshop_item(self, item_id):
"""Get the manifest file for a worshop item that is hosted on SteamPipe
:param item_id: Workshop ID
:type item_id: int
:returns: manifest instance
:rtype: :class:`.CDNDepotManifest`
:raises SteamError: error message
"""
resp = self.steam.send_um_and_wait('PublishedFile.GetDetails#1', {
'publishedfileids': [item_id],
'includetags': False,
'includeadditionalpreviews': False,
'includechildren': False,
'includekvtags': False,
'includevotes': False,
'short_description': True,
'includeforsaledata': False,
'includemetadata': False,
'language': 0
}, timeout=7)
if resp.header.eresult != EResult.OK:
raise SteamError(resp.header.error_message or 'No message', resp.header.eresult)
wf = None if resp is None else resp.body.publishedfiledetails[0]
if wf is None or wf.result != EResult.OK:
raise SteamError("Failed getting workshop file info",
EResult.Timeout if resp is None else EResult(wf.result))
elif not wf.hcontent_file:
raise SteamError("Workshop file is not on SteamPipe", EResult.FileNotFound)
app_id = ws_app_id = wf.consumer_appid
manifest = self.get_manifest(app_id, ws_app_id, wf.hcontent_file)
manifest.name = wf.title
return manifest
| |
from doajtest.helpers import DoajTestCase, with_es
from portality import models
from doajtest.fixtures import ApplicationFixtureFactory, ArticleFixtureFactory, JournalFixtureFactory
from copy import deepcopy
import json
class TestCrudReturnValues(DoajTestCase):
@with_es(indices=[models.Account.__type__])
def test_01_all_crud(self):
self.make_account()
with self.app_test.test_client() as t_client:
for route in ['', '/v1', '/v2', '/v3']:
# we should get a JSON 404 if we try to hit a nonexistent endpoint
response = t_client.get('/api{0}/not_valid'.format(route))
assert response.status_code == 404
assert response.mimetype == 'application/json'
response = t_client.post('/api{0}/not_valid'.format(route))
assert response.status_code == 404
assert response.mimetype == 'application/json'
response = t_client.put('/api{0}/not_valid'.format(route))
assert response.status_code == 404
assert response.mimetype == 'application/json'
response = t_client.delete('/api{0}/not_valid'.format(route))
assert response.status_code == 404
assert response.mimetype == 'application/json'
response = t_client.patch('/api{0}/not_valid'.format(route))
assert response.status_code == 404
assert response.mimetype == 'application/json'
response = t_client.head('/api{0}/not_valid'.format(route))
assert response.status_code == 404
assert response.mimetype == 'application/json'
# All versions of the API should respond to their root
response = t_client.get('/api{0}/'.format(route), follow_redirects=True)
assert response.status_code < 400
assert response.mimetype == 'application/json'
@with_es(indices=[models.Account.__type__, models.Application.__type__, models.Journal.__type__, models.Article.__type__,
models.Lock.__type__, models.News.__type__])
def test_02_applications_crud(self):
self.make_account()
# add some data to the index with a Create
user_data = ApplicationFixtureFactory.incoming_application()
del user_data["admin"]["current_journal"]
with self.app_test.test_client() as t_client:
# log into the app as our user
self.login(t_client, 'test', 'password123')
# CREATE a new application
response = t_client.post('/api/applications?api_key=' + self.api_key, data=json.dumps(user_data))
assert response.status_code == 201, response.status_code # 201 "Created"
assert response.mimetype == 'application/json'
# Check it gives back a newly created application, with an ID
new_app_id = json.loads(response.data.decode("utf-8"))['id']
new_app_loc = json.loads(response.data.decode("utf-8"))['location']
assert new_app_id is not None
assert new_app_id in new_app_loc
# RETRIEVE the same application using the ID
response = t_client.get('/api/applications/{0}?api_key={1}'.format(new_app_id, self.api_key))
assert response.status_code == 200, response.status_code # 200 "OK"
assert response.mimetype == 'application/json'
retrieved_application = json.loads(response.data.decode("utf-8"))
new_app_title = retrieved_application['bibjson']['title']
assert new_app_title == user_data['bibjson']['title']
# UPDATE the title of the application
updated_data = deepcopy(user_data)
updated_data['bibjson']['title'] = 'This is a new title for this application'
response = t_client.put('/api/applications/{0}?api_key={1}'.format(new_app_id, self.api_key), data=json.dumps(updated_data))
assert response.status_code == 204, response.status_code # 204 "No Content"
assert response.mimetype == 'application/json'
response = t_client.get('/api/applications/{0}?api_key={1}'.format(new_app_id, self.api_key))
retrieved_application = json.loads(response.data.decode("utf-8"))
new_app_title = retrieved_application['bibjson']['title']
assert new_app_title == updated_data['bibjson']['title']
assert new_app_title != user_data['bibjson']['title']
# DELETE the application
assert models.Suggestion.pull(new_app_id) is not None
response = t_client.delete('/api/applications/{0}?api_key={1}'.format(new_app_id, self.api_key))
assert response.status_code == 204 # 204 "No Content"
assert response.mimetype == 'application/json'
# Try to RETRIEVE the Application again - check it isn't there anymore
response = t_client.get('/api/applications/{0}?api_key={1}'.format(new_app_id, self.api_key))
assert response.status_code == 404
assert response.mimetype == 'application/json'
self.logout(t_client)
@with_es(indices=[models.Account.__type__, models.Application.__type__, models.Journal.__type__, models.Article.__type__,
models.Lock.__type__],
warm_mappings=[models.Article.__type__])
def test_03_articles_crud(self):
self.make_account()
# add some data to the index with a Create
user_data = ArticleFixtureFactory.make_article_source()
# Add a journal so we can assign articles to it
journal = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))
journal.set_owner(self.account.id)
journal.save(blocking=True)
with self.app_test.test_client() as t_client:
# log into the app as our user
self.login(t_client, 'test', 'password123')
# CREATE a new article
response = t_client.post('/api/articles?api_key=' + self.api_key, data=json.dumps(user_data))
assert response.status_code == 201, response.status_code # 201 "Created"
assert response.mimetype == 'application/json'
# Check it gives back a newly created article, with an ID
new_ar_id = json.loads(response.data.decode("utf-8"))['id']
new_ar_loc = json.loads(response.data.decode("utf-8"))['location']
assert new_ar_id is not None
assert new_ar_id in new_ar_loc
# RETRIEVE the same article using the ID
response = t_client.get('/api/articles/{0}?api_key={1}'.format(new_ar_id, self.api_key))
assert response.status_code == 200 # 200 "OK"
assert response.mimetype == 'application/json'
retrieved_article = json.loads(response.data.decode("utf-8"))
new_ar_title = retrieved_article['bibjson']['title']
assert new_ar_title == user_data['bibjson']['title']
# UPDATE the title of the article
updated_data = deepcopy(user_data)
updated_data['bibjson']['title'] = 'This is a new title for this article'
response = t_client.put('/api/articles/{0}?api_key={1}'.format(new_ar_id, self.api_key), data=json.dumps(updated_data))
assert response.status_code == 204 # 204 "No Content"
assert response.mimetype == 'application/json'
response = t_client.get('/api/articles/{0}?api_key={1}'.format(new_ar_id, self.api_key))
retrieved_article = json.loads(response.data.decode("utf-8"))
new_ar_title = retrieved_article['bibjson']['title']
assert new_ar_title == updated_data['bibjson']['title']
assert new_ar_title != user_data['bibjson']['title']
# DELETE the article
assert models.Article.pull(new_ar_id) is not None
response = t_client.delete('/api/articles/{0}?api_key={1}'.format(new_ar_id, self.api_key))
assert response.status_code == 204 # 204 "No Content"
assert response.mimetype == 'application/json'
# Try to RETRIEVE the article again - check it isn't there anymore
response = t_client.get('/api/applications/{0}?api_key={1}'.format(new_ar_id, self.api_key))
assert response.status_code == 404
assert response.mimetype == 'application/json'
@with_es(indices=[models.Account.__type__, models.Journal.__type__, models.Article.__type__,
models.Lock.__type__],
warm_mappings=[models.Article.__type__])
def test_04_article_structure_exceptions(self):
self.make_account()
# add some data to the index with a Create
user_data = ArticleFixtureFactory.make_article_source()
with self.app_test.test_client() as t_client:
# log into the app as our user
self.login(t_client, 'test', 'password123')
# attempt to CREATE a new article with invalid JSON
bad_data = json.dumps(user_data) + 'blarglrandomblah'
response = t_client.post('/api/articles?api_key=' + self.api_key, data=bad_data)
assert response.status_code == 400, response.status_code # 400 "Bad Request"
assert response.mimetype == 'application/json'
assert 'Supplied data was not valid JSON' in response.json['error']
# limit removed - https://github.com/DOAJ/doajPM/issues/2950 - should now succeed
# attempt to CREATE a new article with too many keywords (exception propagates from DataObj)
# too_many_kwds = deepcopy(user_data)
# too_many_kwds['bibjson']['keywords'] = ['one', 'two', 'three', 'four', 'five', 'six', 'SEVEN']
#
# response = t_client.post('/api/articles?api_key=' + self.api_key, data=json.dumps(too_many_kwds))
# assert response.status_code == 400 # 400 "Bad Request"
# assert response.mimetype == 'application/json'
# assert 'maximum of 6 keywords' in response.json['error']
# attempt to CREATE an article with a missing required field (exception propagates from DataObj)
missing_title = deepcopy(user_data)
del missing_title['bibjson']['title']
response = t_client.post('/api/articles?api_key=' + self.api_key, data=json.dumps(missing_title))
assert response.status_code == 400 # 400 "Bad Request"
assert response.mimetype == 'application/json'
assert "Field 'title' is required but not present" in response.json['error']
@staticmethod
def login(app, username, password):
return app.post('/account/login',
data=dict(username=username, password=password),
follow_redirects=True)
@staticmethod
def logout(app):
return app.get('/account/logout', follow_redirects=True)
def make_account(self):
account = models.Account.make_account(email="test@test.com", username="test", name="Tester",
roles=["publisher", "api"],
associated_journal_ids=['abcdefghijk_journal'])
account.set_password('password123')
self.api_key = account.api_key
self.account = account
account.save(blocking=True)
| |
import statistics
from copy import deepcopy
from collections import defaultdict, namedtuple
from data_structs import VprSwitch, MuxEdgeTiming, DriverTiming, SinkTiming
from utils import yield_muxes, add_named_item
# =============================================================================
def linear_regression(xs, ys):
"""
Computes linear regression coefficients
https://en.wikipedia.org/wiki/Simple_linear_regression
Returns a and b coefficients of the function f(y) = a * x + b
"""
x_mean = statistics.mean(xs)
y_mean = statistics.mean(ys)
num, den = 0.0, 0.0
for x, y in zip(xs, ys):
num += (x - x_mean) * (y - y_mean)
den += (x - x_mean) * (x - x_mean)
a = num / den
b = y_mean - a * x_mean
return a, b
# =============================================================================
def create_vpr_switch(type, tdel, r, c):
"""
Creates a VPR switch with the given parameters. Autmatically generates
its name with these parameters encoded.
The VPR switch parameters are:
- type: Switch type. See VPR docs for the available types
- tdel: Constant propagation delay [s]
- r: Internal resistance [ohm]
- c: Internal capacitance (active only when the switch is "on") [F]
"""
# Format the switch name
name = ["sw"]
name += ["T{:>08.6f}".format(tdel * 1e9)]
name += ["R{:>08.6f}".format(r)]
name += ["C{:>010.6f}".format(c * 1e12)]
# Create the VPR switch
switch = VprSwitch(
name="_".join(name),
type=type,
t_del=tdel,
r=r,
c_in=0.0,
c_out=0.0,
c_int=c,
)
return switch
def compute_switchbox_timing_model(switchbox, timing_data):
"""
Processes switchbox timing data.
The timing data is provided in a form of delays for each mux edge (path
from its input pin to the output pin). The delay varies with number of
active loads of the source.
This data is used to compute driver resistances and load capacitances
as well as constant propagation delays.
The timing model assumes that each output of a mux has a certain resistance
and constant propagation time. Then, every load has a capacitance which is
connected when it is active. All capacitances are identical. The input
timing data does not allow to distinguish between them. Additionally, each
load can have a constant propagation delay.
For multiplexers that are driver by switchbox inputs, fake drivers are
assumed solely for the purpose of the timing model.
"""
# A helper struct
Timing = namedtuple("Timing", "driver_r driver_tdel sink_c sink_tdel")
# Delay scaling factor
FACTOR = 1.0
# Error threshold (for reporting) in ns
ERROR_THRESHOLD = 0.4 * 1e-9
# Build a map of sinks for each driver
# For internal drivers key = (stage_id, switch_id, mux_id)
# For external drivers key = (stage_id, input_name)
sink_map = defaultdict(lambda: [])
for connection in switchbox.connections:
src = connection.src
dst = connection.dst
dst_key = (dst.stage_id, dst.switch_id, dst.mux_id, dst.pin_id)
src_key = (src.stage_id, src.switch_id, src.mux_id)
sink_map[src_key].append(dst_key)
for pin in switchbox.inputs.values():
for loc in pin.locs:
dst_key = (loc.stage_id, loc.switch_id, loc.mux_id, loc.pin_id)
src_key = (loc.stage_id, pin.name)
sink_map[src_key].append(dst_key)
# Compute timing model for each driver
driver_timing = {}
for driver, sinks in sink_map.items():
# Collect timing data for each sink edge
edge_timings = {}
for stage_id, switch_id, mux_id, pin_id in sinks:
# Try getting timing data. If not found then probably we are
# computing timing for VCC or GND input.
try:
data = timing_data[stage_id][switch_id][mux_id][pin_id]
except KeyError:
continue
# Sanity check. The number of load counts must be equal to the
# number of sinks for the driver.
assert len(data) == len(sinks)
# Take the worst case (max), convert ns to seconds.
data = {n: max(d) * 1e-9 for n, d in data.items()}
# Store
key = (stage_id, switch_id, mux_id, pin_id)
edge_timings[key] = data
# No timing data, probably it is a VCC or GND input
if not len(edge_timings):
continue
# Compute linear regression for each sink data
coeffs = {}
for sink in sinks:
xs = sorted(edge_timings[sink].keys())
ys = [edge_timings[sink][x] for x in xs]
a, b = linear_regression(xs, ys)
# Cannot have a < 0 (decreasing relation). If such thing happens
# force the regression line to be flat.
if a < 0.0:
print(
"WARNING: For '{} {}' the delay model slope is negative! (a={:.2e})"
.format(switchbox.type, sink, a)
)
a = 0.0
# Cannot have any delay higher than the model. Check if all delays
# lie below the regression line and if not then shift the line up
# accordingly.
for x, y in zip(xs, ys):
t = a * x + b
if y > t:
b += y - t
coeffs[sink] = (a, b)
# Assumed driver resistance [ohm]
driver_r = 1.0
# Compute driver's Tdel
driver_tdel = min([cfs[1] for cfs in coeffs.values()])
# Compute per-sink Tdel
sink_tdel = {s: cfs[1] - driver_tdel for s, cfs in coeffs.items()}
# Compute sink capacitance. Since we have multiple edge timings that
# should yield the same capacitance, compute one for each timing and
# then choose the worst case (max).
sink_cs = {
s: (cfs[0] / (FACTOR * driver_r) - sink_tdel[s])
for s, cfs in coeffs.items()
}
sink_c = max(sink_cs.values())
# Sanity check
assert sink_c >= 0.0, (switchbox.type, sink, sink_c)
# Compute error of the delay model
for sink in sinks:
# Compute for this sink
error = {}
for n, true_delay in edge_timings[sink].items():
model_delay = driver_tdel + FACTOR * driver_r * sink_c * n + sink_tdel[
sink]
error[n] = true_delay - model_delay
max_error = max([abs(e) for e in error.values()])
# Report the error
if max_error > ERROR_THRESHOLD:
print(
"WARNING: Error of the timing model of '{} {}' is too high:"
.format(switchbox.type, sink)
)
print("--------------------------------------------")
print("| # loads | actual | model | error |")
print("|---------+----------+----------+----------|")
for n in edge_timings[sink].keys():
print(
"| {:<8}| {:<9.3f}| {:<9.3f}| {:<9.3f}|".format(
n, 1e9 * edge_timings[sink][n],
1e9 * (edge_timings[sink][n] - error[n]),
1e9 * error[n]
)
)
print("--------------------------------------------")
print("")
# Store the data
driver_timing[driver] = Timing(
driver_r=driver_r,
driver_tdel=driver_tdel,
sink_tdel={s: d
for s, d in sink_tdel.items()},
sink_c=sink_c
)
return driver_timing, sink_map
def populate_switchbox_timing(
switchbox, driver_timing, sink_map, vpr_switches
):
"""
Populates the switchbox timing model by annotating its muxes with the timing
data. Creates new VPR switches with required parameters or uses existing
ones if already created.
"""
# Populate timing data to the switchbox
for driver, timing in driver_timing.items():
# Driver VPR switch
driver_vpr_switch = create_vpr_switch(
type="mux",
tdel=timing.driver_tdel,
r=timing.driver_r,
c=0.0,
)
driver_vpr_switch = add_named_item(
vpr_switches, driver_vpr_switch, driver_vpr_switch.name
)
# Annotate all driver's edges
for sink in sink_map[driver]:
stage_id, switch_id, mux_id, pin_id = sink
# Sink VPR switch
sink_vpr_switch = create_vpr_switch(
type="mux",
tdel=timing.sink_tdel[sink],
r=0.0,
c=timing.sink_c,
)
sink_vpr_switch = add_named_item(
vpr_switches, sink_vpr_switch, sink_vpr_switch.name
)
# Get the mux
stage = switchbox.stages[stage_id]
switch = stage.switches[switch_id]
mux = switch.muxes[mux_id]
assert pin_id not in mux.timing
mux.timing[pin_id] = MuxEdgeTiming(
driver=DriverTiming(
tdel=timing.driver_tdel,
r=timing.driver_r,
vpr_switch=driver_vpr_switch.name
),
sink=SinkTiming(
tdel=timing.sink_tdel,
c=timing.sink_c,
vpr_switch=sink_vpr_switch.name
)
)
def copy_switchbox_timing(src_switchbox, dst_switchbox):
"""
Copies all timing information from the source switchbox to the destination
one.
"""
# Mux timing
for dst_stage, dst_switch, dst_mux in yield_muxes(dst_switchbox):
src_stage = src_switchbox.stages[dst_stage.id]
src_switch = src_stage.switches[dst_switch.id]
src_mux = src_switch.muxes[dst_mux.id]
dst_mux.timing = deepcopy(src_mux.timing)
# =============================================================================
def add_vpr_switches_for_cell(cell_type, cell_timings):
"""
Creates VPR switches for IOPATH delays read from SDF file(s) for the given
cell type.
"""
# Filter timings for the cell
timings = {
k: v
for k, v in cell_timings.items()
if k.startswith(cell_type)
}
# Add VPR switches
vpr_switches = {}
for celltype, cell_data in timings.items():
for instance, inst_data in cell_data.items():
# Add IOPATHs
for timing, timing_data in inst_data.items():
if timing_data["type"].lower() != "iopath":
continue
# Get data
name = "{}.{}.{}.{}".format(
cell_type, instance, timing_data["from_pin"],
timing_data["to_pin"]
)
tdel = timing_data["delay_paths"]["slow"]["avg"]
# Add the switch
sw = VprSwitch(
name=name,
type="mux",
t_del=tdel,
r=0.0,
c_in=0.0,
c_out=0.0,
c_int=0.0,
)
vpr_switches[sw.name] = sw
return vpr_switches
| |
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
from django.db.models import Model
from django.template import Context, Node, Template, TemplateSyntaxError
from django.template.base import (TOKEN_BLOCK, TOKEN_COMMENT,
TOKEN_TEXT, TOKEN_VAR, TextNode)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from zhiliao.conf import settings
from zhiliao.core.fields import RichTextField
from zhiliao.core.forms import get_edit_form
from zhiliao.utils.cache import nevercache_token, cache_installed
from zhiliao.utils.html import decode_entities
from zhiliao.utils.importing import import_dotted_path
from zhiliao.utils.sites import current_site_id, has_site_permission
from zhiliao.utils.urls import admin_url
from zhiliao.utils.views import is_editable
from zhiliao import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token.token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
@register.simple_tag(takes_context=True)
def fields_for(context, form, template="includes/form_fields.html"):
"""
Renders fields for a form with an optional template choice.
"""
context["form_for_fields"] = form
return get_template(template).render(Context(context))
@register.inclusion_tag("includes/form_errors.html", takes_context=True)
def errors_for(context, form):
"""
Renders an alert if the form has any errors.
"""
context["form"] = form
return context
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
context["search_model_choices"] = sorted(search_model_choices)
return context
@register.simple_tag
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5,
top=.5, padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width
and height on the first time it is requested, and returns the URL
to the new resized image. If width or height are zero then original
ratio is maintained. When ``upscale`` is False, images smaller than
the given size will not be grown to fill that size. The given width
and height thus act as maximum dimensions.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if not upscale:
thumb_name += "-no-upscale"
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
try:
os.makedirs(thumb_dir)
except OSError:
pass
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
if not upscale:
to_width = min(to_width, from_width)
to_height = min(to_height, from_height)
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(thumb_url, File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
context["has_site_permission"] = has_site_permission(user)
if settings.INLINE_EDITING_ENABLED and context["has_site_permission"]:
t = get_template("includes/editable_toolbar.html")
context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
try:
context["editable_obj"]
except KeyError:
context["editable_obj"] = context.get("page", None)
context["toolbar"] = t.render(Context(context))
context["richtext_media"] = RichTextField().formfield().widget.media
return context
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
filter_names = settings.RICHTEXT_FILTERS
if not filter_names:
try:
filter_names = [settings.RICHTEXT_FILTER]
except AttributeError:
pass
else:
from warnings import warn
warn("The `RICHTEXT_FILTER` setting is deprecated in favor of "
"the new plural setting `RICHTEXT_FILTERS`.")
for filter_name in filter_names:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content
@register.filter
def richtext_filter(content):
"""
Deprecated version of richtext_filters above.
"""
from warnings import warn
warn("The `richtext_filter` template tag is deprecated in favor of "
"the new plural tag `richtext_filters`.")
return richtext_filters(content)
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(Context(context))
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for
url names in admin templates as these won't resolve when admin
tests are running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if in_menu and request.user.has_module_perms(opts.app_label):
perms = model_admin.get_model_perms(request)
admin_url_name = ""
if perms["change"]:
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if perms["add"]:
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
app_title = opts.app_config.verbose_name.title()
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
user = context["request"].user
if user.is_staff:
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
if user.is_superuser:
sites = Site.objects.all()
else:
sites = user.sitepermissions.sites.all()
context["dropdown_menu_sites"] = list(sites)
context["dropdown_menu_selected_site_id"] = current_site_id()
return context
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(Context(context)))
return "".join(output)
@register.simple_tag(takes_context=True)
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url
| |
import numpy as np
from ndnn.lstm.lstm_loss import LogLoss, HingeLoss, HingeLossOutput
from ndnn.lstm.lstm_node import Attention
from ndnn.graph import Graph
from ndnn.init import Xavier, Zero
from ndnn.node import Concat, Sigmoid, Add, Dot, Tanh, Mul, Collect, Embed, SoftMax, MDEmbed, Average, ArgMax
class LSTMGraph(Graph):
def __init__(self, loss, update, dict_size, hidden_dim):
super().__init__(loss, update)
self.dict_size = dict_size
self.hidden_dim = hidden_dim
self.h0 = self.input()
self.c0 = self.input()
self.embed = self.param_of([dict_size, hidden_dim], Xavier())
self.wf = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.bf = self.param_of([hidden_dim], Zero())
self.wi = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.bi = self.param_of([hidden_dim], Zero())
self.wc = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.bc = self.param_of([hidden_dim], Zero())
self.wo = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.bo = self.param_of([hidden_dim], Zero())
self.v2c = self.param_of([hidden_dim, dict_size], Xavier())
self.resetNum = len(self.nodes)
def lstm_cell(self, x, h, c):
concat = Concat(h, x)
# Forget Gate
f_gate = Sigmoid(Add(Dot(concat, self.wf), self.bf))
# Input Gate
i_gate = Sigmoid(Add(Dot(concat, self.wi), self.bi))
# Temp Vars
c_temp = Tanh(Add(Dot(concat, self.wc), self.bc))
o_temp = Sigmoid(Add(Dot(concat, self.wo), self.bo))
# Output
c_next = Add(Mul(f_gate, c), Mul(i_gate, c_temp))
h_next = Mul(o_temp, Tanh(c_next))
return h_next, c_next
class LogGraph(LSTMGraph):
def __init__(self, update, dict_size, hidden_dim):
super().__init__(LogLoss(), update, dict_size, hidden_dim)
def build_graph(self, batch):
self.reset()
# Build Computation Graph according to length
bsize, length = batch.data.shape
self.h0.value = np.zeros([bsize, self.hidden_dim])
self.c0.value = np.zeros([bsize, self.hidden_dim])
h = self.h0
c = self.c0
outputs = []
for idx in range(length - 1):
in_i = self.input()
in_i.value = batch.data[:, idx] # Get value from batch
x = Embed(in_i, self.embed)
h, c = self.lstm_cell(x, h, c)
out_i = SoftMax(Dot(h, self.v2c))
outputs.append(out_i)
self.output(Collect(outputs))
self.expect(batch.data[:, 1:])
class HingeGraph(LSTMGraph):
def __init__(self, update, dict_size, hidden_dim, num_neg_sample, sep_embed):
super().__init__(HingeLoss(), update, dict_size, hidden_dim)
self.num_neg_sample = num_neg_sample
self.neg_sample = self.input()
if sep_embed:
self.sample_embed = self.param_of([dict_size, hidden_dim], Xavier())
else:
self.sample_embed = self.embed
self.resetNum = len(self.nodes)
def build_graph(self, batch):
self.reset()
# Build Computation Graph according to length
bsize, length = batch.data.shape
if self.num_neg_sample == -1:
negSampleIdx = range(self.dict_size)
else:
negSampleIdx = np.array([np.random.randint(low=0, high=self.dict_size) for i in range(self.num_neg_sample)])
self.neg_sample.value = np.int32(negSampleIdx)
self.h0.value = np.zeros([bsize, self.hidden_dim])
self.c0.value = np.zeros([bsize, self.hidden_dim])
h = self.h0
c = self.c0
outputs = []
for idx in range(length - 1):
in_i = self.input()
in_i.value = batch.data[:, idx] # Get value from batch
x = Embed(in_i, self.embed)
h, c = self.lstm_cell(x, h, c)
outputs.append(h)
self.output(HingeLossOutput(Collect(outputs), self.sample_embed, self.neg_sample))
self.expect(batch.data[:, 1:])
class LSTMEncodeGraph(Graph):
def __init__(self, loss, update, dict_size, hidden_dim):
super().__init__(loss, update)
self.dict_size = dict_size
self.hidden_dim = hidden_dim
self.h0 = self.input()
self.c0 = self.input()
self.eembed = self.param_of([dict_size, hidden_dim], Xavier())
self.ewf = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.ebf = self.param_of([hidden_dim], Zero())
self.ewi = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.ebi = self.param_of([hidden_dim], Zero())
self.ewc = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.ebc = self.param_of([hidden_dim], Zero())
self.ewo = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.ebo = self.param_of([hidden_dim], Zero())
self.ev2c = self.param_of([hidden_dim, dict_size], Xavier())
self.dembed = self.param_of([dict_size, hidden_dim], Xavier())
self.dwf = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbf = self.param_of([hidden_dim], Zero())
self.dwi = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbi = self.param_of([hidden_dim], Zero())
self.dwc = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbc = self.param_of([hidden_dim], Zero())
self.dwo = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbo = self.param_of([hidden_dim], Zero())
self.dv2c = self.param_of([hidden_dim, dict_size], Xavier())
self.resetNum = len(self.nodes)
def enc_lstm_cell(self, x, h, c):
concat = Concat(h, x)
# Forget Gate
f_gate = Sigmoid(Add(Dot(concat, self.ewf), self.ebf))
# Input Gate
i_gate = Sigmoid(Add(Dot(concat, self.ewi), self.ebi))
# Temp Vars
c_temp = Tanh(Add(Dot(concat, self.ewc), self.ebc))
o_temp = Sigmoid(Add(Dot(concat, self.ewo), self.ebo))
# Output
c_next = Add(Mul(f_gate, c), Mul(i_gate, c_temp))
h_next = Mul(o_temp, Tanh(c_next))
return h_next, c_next
def dec_lstm_cell(self, x, h, c):
concat = Concat(h, x)
# Forget Gate
f_gate = Sigmoid(Add(Dot(concat, self.dwf), self.dbf))
# Input Gate
i_gate = Sigmoid(Add(Dot(concat, self.dwi), self.dbi))
# Temp Vars
c_temp = Tanh(Add(Dot(concat, self.dwc), self.dbc))
o_temp = Sigmoid(Add(Dot(concat, self.dwo), self.dbo))
# Output
c_next = Add(Mul(f_gate, c), Mul(i_gate, c_temp))
h_next = Mul(o_temp, Tanh(c_next))
return h_next, c_next
def build_graph(self, batch):
enc_data = batch.data[0]
dec_data = batch.data[1]
self.reset()
bsize, enc_length = enc_data.shape
dec_length = dec_data.shape[1]
outputs = []
# Build Encode Graph
self.h0.value = np.zeros([bsize, self.hidden_dim])
self.c0.value = np.zeros([bsize, self.hidden_dim])
h = self.h0
c = self.c0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, idx] # Get value from batch
x = Embed(in_i, self.eembed)
h, c = self.enc_lstm_cell(x, h, c)
# out_i = SoftMax(Dot(h, graph.ev2c))
# outputs.append(out_i)
self.encoded_h = h
self.encoded_c = c
# Build Decode Graph
for idx in range(dec_length - 1):
in_i = self.input()
in_i.value = dec_data[:, idx]
x = Embed(in_i, self.dembed)
h, c = self.dec_lstm_cell(x, h, c)
out_i = SoftMax(Dot(h, self.dv2c))
outputs.append(out_i)
self.output(Collect(outputs))
self.expect(dec_data[:, 1:])
def encode_result(self):
# return np.concatenate((self.encoded_h.value, self.encoded_c.value), axis=1)
return self.encoded_c.value
class LSTMDecodeGraph(LSTMEncodeGraph):
def __init__(self, loss, dict_size, hidden_dim, predict_len):
super().__init__(loss, None, dict_size, hidden_dim)
self.predict_len = predict_len
def build_graph(self, batch):
enc_data = batch.data
self.reset()
bsize, enc_length = enc_data.shape
outputs = []
# Build Encode Graph
self.h0.value = np.zeros([bsize, self.hidden_dim])
self.c0.value = np.zeros([bsize, self.hidden_dim])
h = self.h0
c = self.c0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, idx] # Get value from batch
x = Embed(in_i, self.eembed)
h, c = self.enc_lstm_cell(x, h, c)
# out_i = SoftMax(Dot(h, graph.ev2c))
# outputs.append(out_i)
self.encoded_h = h
self.encoded_c = c
# Build Decode Graph
decode_in = self.input()
decode_in.value = np.zeros([bsize])
decode_embed = Embed(decode_in, self.dembed)
x = decode_embed
for idx in range(self.predict_len):
h, c = self.dec_lstm_cell(x, h, c)
out_i = ArgMax(SoftMax(Dot(h, self.dv2c)))
outputs.append(out_i)
x = Embed(out_i, self.dembed)
self.output(Collect(outputs))
self.expect(np.zeros([bsize, self.predict_len]))
class BiLSTMEncodeGraph(Graph):
def __init__(self, loss, update, dict_size, hidden_dim):
super().__init__(loss, update)
half_dim = int(hidden_dim / 2)
self.dict_size = dict_size
self.hidden_dim = hidden_dim
self.half_dim = half_dim
self.feh0 = self.input()
self.fec0 = self.input()
self.feembed = self.param_of([dict_size, half_dim], Xavier())
self.fewf = self.param_of([2 * half_dim, half_dim], Xavier())
self.febf = self.param_of([half_dim], Zero())
self.fewi = self.param_of([2 * half_dim, half_dim], Xavier())
self.febi = self.param_of([half_dim], Zero())
self.fewc = self.param_of([2 * half_dim, half_dim], Xavier())
self.febc = self.param_of([half_dim], Zero())
self.fewo = self.param_of([2 * half_dim, half_dim], Xavier())
self.febo = self.param_of([half_dim], Zero())
self.fev2c = self.param_of([half_dim, dict_size], Xavier())
self.beh0 = self.input()
self.bec0 = self.input()
self.beembed = self.param_of([dict_size, half_dim], Xavier())
self.bewf = self.param_of([2 * half_dim, half_dim], Xavier())
self.bebf = self.param_of([half_dim], Zero())
self.bewi = self.param_of([2 * half_dim, half_dim], Xavier())
self.bebi = self.param_of([half_dim], Zero())
self.bewc = self.param_of([2 * half_dim, half_dim], Xavier())
self.bebc = self.param_of([half_dim], Zero())
self.bewo = self.param_of([2 * half_dim, half_dim], Xavier())
self.bebo = self.param_of([half_dim], Zero())
self.bev2c = self.param_of([half_dim, dict_size], Xavier())
self.dembed = self.param_of([dict_size, hidden_dim], Xavier())
self.dwf = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbf = self.param_of([hidden_dim], Zero())
self.dwi = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbi = self.param_of([hidden_dim], Zero())
self.dwc = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbc = self.param_of([hidden_dim], Zero())
self.dwo = self.param_of([2 * hidden_dim, hidden_dim], Xavier())
self.dbo = self.param_of([hidden_dim], Zero())
self.dv2c = self.param_of([hidden_dim, dict_size], Xavier())
self.resetNum = len(self.nodes)
def fenc_lstm_cell(self, x, h, c):
concat = Concat(h, x)
# Forget Gate
f_gate = Sigmoid(Add(Dot(concat, self.fewf), self.febf))
# Input Gate
i_gate = Sigmoid(Add(Dot(concat, self.fewi), self.febi))
# Temp Vars
c_temp = Tanh(Add(Dot(concat, self.fewc), self.febc))
o_temp = Sigmoid(Add(Dot(concat, self.fewo), self.febo))
# Output
c_next = Add(Mul(f_gate, c), Mul(i_gate, c_temp))
h_next = Mul(o_temp, Tanh(c_next))
return h_next, c_next
def benc_lstm_cell(self, x, h, c):
concat = Concat(h, x)
# Forget Gate
f_gate = Sigmoid(Add(Dot(concat, self.bewf), self.bebf))
# Input Gate
i_gate = Sigmoid(Add(Dot(concat, self.bewi), self.bebi))
# Temp Vars
c_temp = Tanh(Add(Dot(concat, self.bewc), self.bebc))
o_temp = Sigmoid(Add(Dot(concat, self.bewo), self.bebo))
# Output
c_next = Add(Mul(f_gate, c), Mul(i_gate, c_temp))
h_next = Mul(o_temp, Tanh(c_next))
return h_next, c_next
def dec_lstm_cell(self, x, h, c):
concat = Concat(h, x)
# Forget Gate
f_gate = Sigmoid(Add(Dot(concat, self.dwf), self.dbf))
# Input Gate
i_gate = Sigmoid(Add(Dot(concat, self.dwi), self.dbi))
# Temp Vars
c_temp = Tanh(Add(Dot(concat, self.dwc), self.dbc))
o_temp = Sigmoid(Add(Dot(concat, self.dwo), self.dbo))
# Output
c_next = Add(Mul(f_gate, c), Mul(i_gate, c_temp))
h_next = Mul(o_temp, Tanh(c_next))
return h_next, c_next
def build_graph(self, batch):
enc_data = batch.data[0]
dec_data = batch.data[1]
self.reset()
bsize, enc_length = enc_data.shape
dec_length = dec_data.shape[1]
outputs = []
# Build Fwd Encode Graph
self.feh0.value = np.zeros([bsize, self.half_dim])
self.fec0.value = np.zeros([bsize, self.half_dim])
fh = self.feh0
fc = self.fec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, idx] # Get value from batch
x = Embed(in_i, self.feembed)
fh, fc = self.fenc_lstm_cell(x, fh, fc)
# Build Bwd Encode Graph
self.beh0.value = np.zeros([bsize, self.half_dim])
self.bec0.value = np.zeros([bsize, self.half_dim])
bh = self.beh0
bc = self.bec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, enc_length - 1 - idx] # Get value from batch
x = Embed(in_i, self.beembed)
bh, bc = self.benc_lstm_cell(x, bh, bc)
# Build Decode Graph
h = Concat(fh, bh)
c = Concat(fc, bc)
self.encoded_h = h
self.encoded_c = c
for idx in range(dec_length - 1):
in_i = self.input()
in_i.value = dec_data[:, idx]
x = Embed(in_i, self.dembed)
h, c = self.dec_lstm_cell(x, h, c)
out_i = SoftMax(Dot(h, self.dv2c))
outputs.append(out_i)
self.output(Collect(outputs))
self.expect(dec_data[:, 1:])
def encode_result(self):
# return np.concatenate((self.encoded_h.value, self.encoded_c.value), axis=1)
return self.encoded_c.value
class BiLSTMDecodeGraph(BiLSTMEncodeGraph):
def __init__(self, loss, dict_size, hidden_dim, predict_len):
super().__init__(loss, None, dict_size, hidden_dim)
self.predict_len = predict_len
def build_graph(self, batch):
enc_data = batch.data
self.reset()
bsize, enc_length = enc_data.shape
outputs = []
# Build Fwd Encode Graph
self.feh0.value = np.zeros([bsize, self.half_dim])
self.fec0.value = np.zeros([bsize, self.half_dim])
fh = self.feh0
fc = self.fec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, idx] # Get value from batch
x = Embed(in_i, self.feembed)
fh, fc = self.fenc_lstm_cell(x, fh, fc)
# Build Bwd Encode Graph
self.beh0.value = np.zeros([bsize, self.half_dim])
self.bec0.value = np.zeros([bsize, self.half_dim])
bh = self.beh0
bc = self.bec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, enc_length - 1 - idx] # Get value from batch
x = Embed(in_i, self.beembed)
bh, bc = self.benc_lstm_cell(x, bh, bc)
# Build Decode Graph
h = Concat(fh, bh)
c = Concat(fc, bc)
self.encoded_h = h
self.encoded_c = c
# Build Decode Graph
decode_in = self.input()
decode_in.value = np.zeros([bsize])
decode_embed = Embed(decode_in, self.dembed)
x = decode_embed
for idx in range(self.predict_len):
h, c = self.dec_lstm_cell(x, h, c)
out_i = ArgMax(SoftMax(Dot(h, self.dv2c)))
outputs.append(out_i)
x = Embed(out_i, self.dembed)
self.output(Collect(outputs))
self.expect(np.zeros([bsize, self.predict_len]))
class AttentionGraph(BiLSTMEncodeGraph):
def __init__(self, loss, update, dict_size, hidden_dim):
super().__init__(loss, update, dict_size, hidden_dim)
def build_graph(self, batch):
enc_data = batch.data[0]
dec_data = batch.data[1]
self.reset()
bsize, enc_length = enc_data.shape
dec_length = dec_data.shape[1]
outputs = []
fwd_encode_result = [None] * enc_length
bwd_encode_result = [None] * enc_length
# Build Fwd Encode Graph
self.feh0.value = np.zeros([bsize, self.half_dim])
self.fec0.value = np.zeros([bsize, self.half_dim])
fh = self.feh0
fc = self.fec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, idx] # Get value from batch
x = Embed(in_i, self.feembed)
fh, fc = self.fenc_lstm_cell(x, fh, fc)
fwd_encode_result[idx] = fh
# Build Bwd Encode Graph
self.beh0.value = np.zeros([bsize, self.half_dim])
self.bec0.value = np.zeros([bsize, self.half_dim])
bh = self.beh0
bc = self.bec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, enc_length - 1 - idx] # Get value from batch
x = Embed(in_i, self.beembed)
bh, bc = self.benc_lstm_cell(x, bh, bc)
bwd_encode_result[enc_length - 1 - idx] = bh
# Build Decode Graph
h = Concat(fh, bh)
# c = Concat(fc, bc)
self.encoded_h = h
# self.encoded_c = c
encode_result = []
for idx in range(enc_length):
encode_result.append(Concat(fwd_encode_result[idx], bwd_encode_result[idx]))
encode_state = Collect(encode_result)
for idx in range(dec_length - 1):
in_i = self.input()
in_i.value = dec_data[:, idx]
x = Embed(in_i, self.dembed)
c = Attention(encode_state, h)
h, c = self.dec_lstm_cell(x, h, c)
out_i = SoftMax(Dot(h, self.dv2c))
outputs.append(out_i)
self.output(Collect(outputs))
self.expect(dec_data[:, 1:])
class AttentionDecodeGraph(BiLSTMEncodeGraph):
def __init__(self, loss, dict_size, hidden_dim, predict_len):
super().__init__(loss, None, dict_size, hidden_dim)
self.predict_len = predict_len
def build_graph(self, batch):
enc_data = batch.data
self.reset()
bsize, enc_length = enc_data.shape
outputs = []
fwd_encode_result = [None] * enc_length
bwd_encode_result = [None] * enc_length
# Build Fwd Encode Graph
self.feh0.value = np.zeros([bsize, self.half_dim])
self.fec0.value = np.zeros([bsize, self.half_dim])
fh = self.feh0
fc = self.fec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, idx] # Get value from batch
x = Embed(in_i, self.feembed)
fh, fc = self.fenc_lstm_cell(x, fh, fc)
fwd_encode_result[idx] = fh
# Build Bwd Encode Graph
self.beh0.value = np.zeros([bsize, self.half_dim])
self.bec0.value = np.zeros([bsize, self.half_dim])
bh = self.beh0
bc = self.bec0
for idx in range(enc_length):
in_i = self.input()
in_i.value = enc_data[:, enc_length - 1 - idx] # Get value from batch
x = Embed(in_i, self.beembed)
bh, bc = self.benc_lstm_cell(x, bh, bc)
bwd_encode_result[enc_length - 1 - idx] = bh
# Build Decode Graph
h = Concat(fh, bh)
# c = Concat(fc, bc)
self.encoded_h = h
# self.encoded_c = c
encode_result = []
for idx in range(enc_length):
encode_result.append(Concat(fwd_encode_result[idx], bwd_encode_result[idx]))
encode_state = Collect(encode_result)
init = self.input()
init.value = np.zeros([bsize])
x = Embed(init, self.dembed)
for idx in range(self.predict_len):
c = Attention(encode_state, h)
h, c = self.dec_lstm_cell(x, h, c)
out_i = ArgMax(SoftMax(Dot(h, self.dv2c)))
outputs.append(out_i)
x = Embed(out_i, self.dembed)
self.output(Collect(outputs))
self.expect(np.zeros([bsize, self.predict_len]))
class BowEncodeGraph(LSTMGraph):
def __init__(self, loss, update, dict_size, hidden_dim):
super().__init__(loss, update, dict_size, hidden_dim)
def bow_encode(self, data):
self.h0.value = None
self.c0.value = None
h0c0 = self.input()
h0c0.value = data
emb = MDEmbed(h0c0, self.embed)
avg = Average(emb)
self.encoded_h = avg
self.encoded_c = avg
return avg, avg
def build_graph(self, batch):
data = batch.data[1]
self.reset()
# Build Computation Graph according to length
bsize, length = data.shape
h, c = self.bow_encode(batch.data[0])
outputs = []
for idx in range(length - 1):
in_i = self.input()
in_i.value = data[:, idx] # Get value from batch
x = Embed(in_i, self.embed)
h, c = self.lstm_cell(x, h, c)
out_i = SoftMax(Dot(h, self.v2c))
outputs.append(out_i)
self.output(Collect(outputs))
self.expect(data[:, 1:])
def encode_result(self):
# return np.concatenate((self.encoded_h.value, self.encoded_c.value), axis=1)
return self.encoded_c.value
| |
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import textwrap
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.metadata.services import base
from cloudbaseinit.metadata.services import opennebulaservice
from cloudbaseinit.models import network as network_model
from cloudbaseinit.tests import testutils
MAC = "54:EE:75:19:F4:61" # output must be upper
ADDRESS = "192.168.122.101"
NETMASK = "255.255.255.0"
BROADCAST = "192.168.122.255"
GATEWAY = "192.168.122.1"
DNSNS = "8.8.8.8 8.8.4.4"
PUBLIC_KEY = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDJitRvac/fr1jWrZw"
"j6mgDxlrBN2xAtKExtm5cPkexQUuxTma61ZijP/aWiQg9Q93baSwsBi"
"IPM0SO1ro0szv84cC9GmSHWVOnCVWGY3nojplqL5VfV9NDLlmSceFc5"
"cLpUTMnoUiXt8QXfDm50gh/5vGgJJXuMz1BKwfJH232ajM5r9xUfKDZ"
"jzhTVooPlWoJJmn6xJDOJG7cjszZpv2N+Xzq7GRo6fa7ygTASOnES5t"
"vbcqM8432P6Bg7Hkr2bOjQF11RyJofFcOvECKfbX4jQ9JGzbocNnepw"
"2YlV08UYa/8aoFgzyo/FiR6cc/jQupbFIe92xBSNiMEioeZ26nTac6C"
"oRQXEKrb95Ntg7ysYUqjKQFWJdx6AW7hlE8mMjA6nRqvswXsp1atNdU"
"DylyVxlvUHo9rEHEs3GKjkO4tr8KKR0N+oWVAO8S2RfSaD/wFcTokW8"
"DeLz2Fnc04pyqOnCjdG7b7HqQVUupuxJNc3EUxZEjbUYiDi22MWF0Oa"
"vM7e0xZHMOsdhUPUUnBWngETuOTVSo26bRfzOcUzjwyv2n5PS9rvzYz"
"ooXIqcK4BdJ8TLh4OQZwV862PjiafxxWC1L90Tou+BkMTFvwoiWDGMc"
"ckPkjvg6p9E2viSFgaKMq2S6EjbzsHG/9BilLBDHLOcbhUU6E76dqGk"
"4jl0ZzQ== jfontan@zooloo")
HOST_NAME = "ws2012r2"
USER_DATA = """#cloud-config
bootcmd:
- ifdown -a
runcmd:
- curl http://10.0.1.1:8999/I_am_alive
write_files:
- encoding: b64
content: RG9lcyBpdCB3b3JrPwo=
owner: root:root
path: /etc/test_file
permissions: '\''0644'\''
packages:
- ruby2.0"""
CONTEXT = """
DISK_ID='1'
ETH0_DNS='{dnsns}'
ETH0_GATEWAY='{gateway}'
ETH0_IP='{address}'
ETH0_MASK='{netmask}'
ETH0_MAC='{mac}'
ETH0_SEARCH_DOMAIN='example.org'
NETWORK='YES'
SET_HOSTNAME='{host_name}'
SSH_PUBLIC_KEY='{public_key}'
TARGET='hda'
USER_DATA='{user_data}'
""".format(
dnsns=DNSNS,
gateway=GATEWAY,
address=ADDRESS,
netmask=NETMASK,
mac=MAC.lower(), # warning: mac is in lowercase
host_name=HOST_NAME,
public_key=PUBLIC_KEY,
user_data=USER_DATA
)
CONTEXT2 = ("""
ETH1_DNS='{dnsns}'
ETH1_GATEWAY='{gateway}'
ETH1_IP='{address}'
ETH1_MASK='{netmask}'
ETH1_MAC='{mac}'
""" + CONTEXT).format(
dnsns=DNSNS,
gateway=GATEWAY,
address=ADDRESS,
netmask=NETMASK,
mac=MAC.lower()
)
OPEN = mock.mock_open(read_data=CONTEXT.encode())
def _get_nic_details(iid=0):
details = network_model.NetworkDetails(
opennebulaservice.IF_FORMAT.format(iid=iid),
MAC,
ADDRESS,
None,
NETMASK,
None,
BROADCAST,
GATEWAY,
None,
DNSNS.split(" ")
)
return details
class _TestOpenNebulaService(unittest.TestCase):
def setUp(self):
self._service = opennebulaservice.OpenNebulaService()
@mock.patch("six.moves.builtins.open", new=OPEN)
class TestOpenNebulaService(_TestOpenNebulaService):
@classmethod
def setUpClass(cls):
OPEN.return_value.read.return_value = CONTEXT.encode()
def _test_parse_shell_variables(self, crlf=False, comment=False):
content = textwrap.dedent("""
VAR1='1'
var2='abcdef'
VAR_VAR3='aaa.bbb.123.ccc'
# suddenly, a comment
VaR4='aaa
bbb
x -- c
d: e
'
ivar=10
""")
if comment:
content += "# A simple comment\n"
if crlf:
content = content.replace("\n", "\r\n")
pairs = self._service._parse_shell_variables(content.encode())
_pairs = {
"VAR1": b"1",
"var2": b"abcdef",
"VAR_VAR3": b"aaa.bbb.123.ccc",
"VaR4": b"aaa\nbbb\nx -- c\nd: e\n",
"ivar": 10
}
if crlf:
for key, value in _pairs.items():
if isinstance(value, bytes):
_pairs[key] = value.replace(b"\n", b"\r\n")
self.assertEqual(_pairs, pairs)
def test_parse_shell_variables(self):
# 1. no CRLF, no comment
# 2. CRLF, no comment
# 3. no CRLF, comment
for crlf, comment in (
(False, False),
(True, False),
(False, True)):
self._test_parse_shell_variables(crlf=crlf, comment=comment)
def test_calculate_netmask(self):
address, gateway, _netmask = (
"192.168.0.10",
"192.168.1.1",
"255.255.0.0"
)
netmask = self._service._calculate_netmask(address, gateway)
self.assertEqual(_netmask, netmask)
def test_compute_broadcast(self):
address, netmask, _broadcast = (
"192.168.0.10",
"255.255.0.0",
"192.168.255.255"
)
broadcast = self._service._compute_broadcast(address, netmask)
self.assertEqual(_broadcast, broadcast)
@mock.patch("cloudbaseinit.metadata.services"
".opennebulaservice.os.path")
@mock.patch("cloudbaseinit.metadata.services"
".opennebulaservice.osutils_factory")
def _test_load(self, mock_osutils_factory, mock_os_path, level=0):
# fake data
fakes = {
"drive": "mount_point",
"label": "fake_label",
"context_path": "fake_path",
"context_data": "fake_data"
}
# mocking part
mock_osutils = mock.MagicMock()
mock_osutils_factory.get_os_utils.return_value = mock_osutils
mock_osutils.get_cdrom_drives.return_value = []
# custom mocking according to level of testing
if level > 1:
mock_osutils.get_cdrom_drives.return_value = [fakes["drive"]]
mock_osutils.get_volume_label.return_value = fakes["label"]
mock_os_path.join.return_value = fakes["context_path"]
mock_os_path.isfile.return_value = False
if level > 2:
mock_os_path.isfile.return_value = True
# run the method being tested
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'opennebulaservice'):
ret = self._service.load()
# check calls
if level > 0:
mock_osutils_factory.get_os_utils.assert_called_once_with()
mock_osutils.get_cdrom_drives.assert_called_once_with()
if level > 1:
(mock_osutils.get_volume_label
.assert_called_once_with(fakes["drive"]))
mock_os_path.join.assert_called_once_with(
"mount_point", opennebulaservice.CONTEXT_FILE)
mock_os_path.isfile.assert_called_once_with("fake_path")
# check response and members
if level in (1, 2):
self.assertFalse(ret)
elif level == 3:
self.assertTrue(ret)
self.assertEqual(fakes["context_path"],
self._service._context_path)
def test_load_no_drives(self):
self._test_load(level=1)
def test_load_no_relevant_drive(self):
self._test_load(level=2)
def test_load_relevant_drive(self):
self._test_load(level=3)
def test_parse_context(self):
with self.assertRaises(base.NotExistingMetadataException):
self._service._parse_context()
self._service._context_path = "path"
self._service._parse_context()
open.assert_called_with("path", "rb")
self.assertTrue(self._service._dict_content)
def test_get_data(self):
self._service._context_path = "path"
self._service._parse_context()
with self.assertRaises(base.NotExistingMetadataException):
self._service._get_data("smt")
var = opennebulaservice.ADDRESS[0].format(iid=0)
ret = self._service._get_data(var).decode()
self.assertEqual(ADDRESS, ret)
class TestLoadedOpenNebulaService(_TestOpenNebulaService):
def setUp(self):
super(TestLoadedOpenNebulaService, self).setUp()
self.load_context()
def load_context(self, context=CONTEXT):
self._service._raw_content = context.encode()
vardict = self._service._parse_shell_variables(
self._service._raw_content
)
self._service._dict_content = vardict
def test_get_cache_data(self):
names = ["smt"]
with self.assertRaises(base.NotExistingMetadataException):
self._service._get_cache_data(names)
names.append(opennebulaservice.ADDRESS[0].format(iid=0))
ret = self._service._get_cache_data(names).decode()
self.assertEqual(ADDRESS, ret)
def test_get_instance_id(self):
self.assertEqual(
opennebulaservice.INSTANCE_ID,
self._service.get_instance_id()
)
def test_get_host_name(self):
self.assertEqual(
HOST_NAME,
self._service.get_host_name()
)
def test_get_user_data(self):
self.assertEqual(
USER_DATA.encode(),
self._service.get_user_data()
)
def test_get_public_keys(self):
self.assertEqual(
[PUBLIC_KEY],
self._service.get_public_keys()
)
def _test_get_network_details(self, netmask=True):
if not netmask:
context = re.sub(r"ETH0_MASK='(\d+\.){3}\d+'", "", CONTEXT)
self.load_context(context=context)
details = _get_nic_details()
self.assertEqual(
[details],
self._service.get_network_details()
)
def test_get_network_details(self):
self._test_get_network_details(netmask=True)
def test_get_network_details_predict(self):
self._test_get_network_details(netmask=False)
def test_multiple_nics(self):
self.load_context(context=CONTEXT2)
nic0 = _get_nic_details(iid=0)
nic1 = _get_nic_details(iid=1)
network_details = [nic0, nic1]
self.assertEqual(
network_details,
self._service.get_network_details()
)
@mock.patch("cloudbaseinit.metadata.services"
".opennebulaservice.OpenNebulaService._get_cache_data")
def test_get_network_details_exception(self, mock_get_cache):
mock_mac = mock_address = mock.MagicMock()
mock_mac.upper.return_value = None
mock_address.side_effect = None
exc = base.NotExistingMetadataException
mock_get_cache.side_effect = [mock_mac, mock_address, exc, exc]
result_details = self._service.get_network_details()
self.assertEqual(result_details, [])
| |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from tempest.cmd import verify_tempest_config
from tempest import config
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_config
class TestGetAPIVersions(base.TestCase):
def test_url_grab_versioned_nova_nossl(self):
base_url = 'http://127.0.0.1:8774/v2/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('http://127.0.0.1:8774', endpoint)
def test_url_grab_versioned_nova_ssl(self):
base_url = 'https://127.0.0.1:8774/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:8774', endpoint)
class TestDiscovery(base.TestCase):
def setUp(self):
super(TestDiscovery, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def test_get_keystone_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_cinder_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
self.assertIn('v1.0', versions)
self.assertIn('v2.0', versions)
def test_get_nova_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_verify_api_versions(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, svc, True)
verify_mock.assert_called_once_with(fake_os, True)
def test_verify_api_versions_not_implemented(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
self.assertFalse(verify_mock.called)
def test_verify_keystone_api_versions_no_v3(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v3',
'identity_feature_enabled',
False, True)
def test_verify_keystone_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2',
'identity_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v1(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_1(self):
def fake_get_versions():
return (['v1.1'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
def fake_get_versions():
return (['v1.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v1(self):
def fake_get_versions():
return (['v2.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
False, True)
def test_verify_extensions_neutron(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('fake1', results['neutron'])
self.assertTrue(results['neutron']['fake1'])
self.assertIn('fake2', results['neutron'])
self.assertTrue(results['neutron']['fake2'])
self.assertIn('fake3', results['neutron'])
self.assertFalse(results['neutron']['fake3'])
self.assertIn('not_fake', results['neutron'])
self.assertFalse(results['neutron']['not_fake'])
def test_verify_extensions_neutron_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['neutron']['extensions']))
def test_verify_extensions_cinder(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('fake1', results['cinder'])
self.assertTrue(results['cinder']['fake1'])
self.assertIn('fake2', results['cinder'])
self.assertTrue(results['cinder']['fake2'])
self.assertIn('fake3', results['cinder'])
self.assertFalse(results['cinder']['fake3'])
self.assertIn('not_fake', results['cinder'])
self.assertFalse(results['cinder']['not_fake'])
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['cinder']['extensions']))
def test_verify_extensions_nova(self):
def fake_list_extensions():
return ([{'alias': 'fake1'}, {'alias': 'fake2'},
{'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('fake1', results['nova'])
self.assertTrue(results['nova']['fake1'])
self.assertIn('fake2', results['nova'])
self.assertTrue(results['nova']['fake2'])
self.assertIn('fake3', results['nova'])
self.assertFalse(results['nova']['fake3'])
self.assertIn('not_fake', results['nova'])
self.assertFalse(results['nova']['not_fake'])
def test_verify_extensions_nova_all(self):
def fake_list_extensions():
return ({'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['nova']['extensions']))
def test_verify_extensions_swift(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
self.assertIn('swift', results)
self.assertIn('fake1', results['swift'])
self.assertTrue(results['swift']['fake1'])
self.assertIn('fake2', results['swift'])
self.assertTrue(results['swift']['fake2'])
self.assertIn('fake3', results['swift'])
self.assertFalse(results['swift']['fake3'])
self.assertIn('not_fake', results['swift'])
self.assertFalse(results['swift']['not_fake'])
def test_verify_extensions_swift_all(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
sorted(results['swift']['extensions']))
| |
"""
Statistical functions and tests, following scipy.stats.
Some differences
- We don't handle missing values at all
"""
# This is lightly adapted from scipy.stats 0.19
# https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py
# The original copyright notice follows:
# Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
import math
from collections import namedtuple
import numpy as np
import dask.array as da
from dask import delayed
from dask.array.ufunc import wrap_elemwise
from dask.utils import derived_from
try:
import scipy.stats
except ImportError as e:
raise ImportError("`dask.array.stats` requires `scipy` to be installed.") from e
from scipy import special
from scipy.stats import distributions
# copied from https://github.com/scipy/scipy/blob/v1.8.0/scipy/stats/_stats_py.py since
# these are all private after v1.8.0
F_onewayResult = namedtuple("F_onewayResult", ("statistic", "pvalue"))
KurtosistestResult = namedtuple("KurtosistestResult", ("statistic", "pvalue"))
NormaltestResult = namedtuple("NormaltestResult", ("statistic", "pvalue"))
Power_divergenceResult = namedtuple("Power_divergenceResult", ("statistic", "pvalue"))
SkewtestResult = namedtuple("SkewtestResult", ("statistic", "pvalue"))
Ttest_1sampResult = namedtuple("Ttest_1sampResult", ("statistic", "pvalue"))
Ttest_indResult = namedtuple("Ttest_indResult", ("statistic", "pvalue"))
Ttest_relResult = namedtuple("Ttest_relResult", ("statistic", "pvalue"))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2 / 3,
}
__all__ = [
"ttest_ind",
"ttest_1samp",
"ttest_rel",
"chisquare",
"power_divergence",
"skew",
"skewtest",
"kurtosis",
"kurtosistest",
"normaltest",
"f_oneway",
"moment",
]
# -----------------
# Statistical Tests
# -----------------
@derived_from(scipy.stats)
def ttest_ind(a, b, axis=0, equal_var=True):
v1 = da.var(a, axis, ddof=1) # XXX: np -> da
v2 = da.var(b, axis, ddof=1) # XXX: np -> da
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(da.mean(a, axis), da.mean(b, axis), denom, df)
return delayed(Ttest_indResult, nout=2)(*res)
@derived_from(scipy.stats)
def ttest_1samp(a, popmean, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis]
df = n - 1
d = da.mean(a, axis) - popmean
v = da.var(a, axis, ddof=1)
denom = da.sqrt(v / float(n))
with np.errstate(divide="ignore", invalid="ignore"):
t = da.divide(d, denom)
t, prob = _ttest_finish(df, t)
return delayed(Ttest_1sampResult, nout=2)(t, prob)
@derived_from(scipy.stats)
def ttest_rel(a, b, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = da.var(d, axis, ddof=1)
dm = da.mean(d, axis)
denom = da.sqrt(v / float(n))
with np.errstate(divide="ignore", invalid="ignore"):
t = da.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return delayed(Ttest_relResult, nout=2)(t, prob)
@derived_from(scipy.stats)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_="pearson")
@derived_from(scipy.stats)
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError(
f"invalid string for lambda_: {lambda_!r}. "
f"Valid strings are {names}"
)
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
if f_exp is not None:
# f_exp = np.atleast_1d(np.asanyarray(f_exp))
pass
else:
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp) ** 2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * _xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * _xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp) ** lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
# ddof = asarray(ddof)
p = delayed(distributions.chi2.sf)(stat, num_obs - 1 - ddof)
return delayed(Power_divergenceResult, nout=2)(stat, p)
@derived_from(scipy.stats)
def skew(a, axis=0, bias=True, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis] # noqa; for bias
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = m2 == 0
vals = da.where(~zero, m3 / m2**1.5, 0.0)
# vals = da.where(~zero, (m2, m3),
# lambda m2, m3: m3 / m2**1.5,
# 0.)
if not bias:
# Need a version of np.place
raise NotImplementedError("bias=False is not implemented.")
if vals.ndim == 0:
# TODO: scalar, min is a workaround
return vals.min()
return vals
@derived_from(scipy.stats)
def skewtest(a, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n)
)
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (
3.0
* (n**2 + 27 * n - 70)
* (n + 1)
* (n + 3)
/ ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))
)
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha) ** 2 + 1))
return delayed(SkewtestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))
@derived_from(scipy.stats)
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis] # noqa; for bias
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = m2 == 0
olderr = np.seterr(all="ignore")
try:
vals = da.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
# need a version of np.place
raise NotImplementedError("bias=False is not implemented.")
if fisher:
return vals - 3
else:
if vals.ndim == 0:
# TODO: scalar, min is a workaround
return vals.min()
return vals
@derived_from(scipy.stats)
def kurtosistest(a, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = float(a.shape[axis])
b2 = kurtosis(a, axis, fisher=False)
E = 3.0 * (n - 1) / (n + 1)
varb2 = (
24.0 * n * (n - 2) * (n - 3) / ((n + 1) * (n + 1.0) * (n + 3) * (n + 5))
) # [1]_ Eq. 1
x = (b2 - E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = (
6.0
* (n * n - 5 * n + 2)
/ ((n + 7) * (n + 9))
* np.sqrt((6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))
)
# [1]_ Eq. 3:
A = 6.0 + 8.0 / sqrtbeta1 * (2.0 / sqrtbeta1 + np.sqrt(1 + 4.0 / (sqrtbeta1**2)))
term1 = 1 - 2 / (9.0 * A)
denom = 1 + x * np.sqrt(2 / (A - 4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1 - 2.0 / A) / denom, 1 / 3.0))
Z = (term1 - term2) / np.sqrt(2 / (9.0 * A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return delayed(KurtosistestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))
@derived_from(scipy.stats)
def normaltest(a, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s * s + k * k
return delayed(NormaltestResult, nout=2)(k2, delayed(distributions.chi2.sf)(k2, 2))
@derived_from(scipy.stats)
def f_oneway(*args):
# args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = da.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= _square_of_sums(alldata) / float(bign)
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = _fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return delayed(F_onewayResult, nout=2)(f, prob)
@derived_from(scipy.stats)
def moment(a, moment=1, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
return da.moment(a, moment, axis=axis)
# -------
# Helpers
# -------
# Don't really want to do all of scipy.special (or do we?)
_xlogy = wrap_elemwise(special.xlogy, source=special)
_fdtrc = wrap_elemwise(special.fdtrc, source=special)
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = da.sqrt(svar * (1.0 / n1 + 1.0 / n2)) # XXX: np -> da
return df, denom
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide="ignore", invalid="ignore"):
df = (vn1 + vn2) ** 2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = da.where(da.isnan(df), 1, df) # XXX: np -> da
denom = da.sqrt(vn1 + vn2)
return df, denom
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide="ignore", invalid="ignore"):
t = da.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
# XXX: np.abs -> da.absolute
# XXX: delayed(distributions.t.sf)
prob = (
delayed(distributions.t.sf)(da.absolute(t), df) * 2
) # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _count(x, axis=None):
if axis is None:
return x.size
else:
return x.shape[axis]
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
return da.sum(a * a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
s = da.sum(a, axis)
return s * s
| |
import numpy as np
def topattern(pd, patlen, patternlength):
m=np.zeros((len(pd), patternlength))
for i, ID in enumerate(pd.keys()):
starts = pd[ID]
length = patlen[ID]
for t in starts:
start = min(t, patternlength)
le = min(length, patternlength - t)
m[i, start: t + le] = 1
return m
class PatternManager:
"""
PatternManager is a container of different patterns, in charge of creating sequence of overlapping unsync patterns in time.
The distribution of overlapping patterns that PatternManager creates is determined by mixing probabilities.
If mixing probabilities has k elements, then each element represents probability that one of N available patterns in PatternManager (each with some possibly different lenght in time) is presented in the input.
E.g. for some simulation time T and a mixing probability [0.9, 0.8, 0.7] PatternManager creates distribution of patterns in time such that:
- there are at most 3 different patterns in the input at any given time (non-sync)
- probability that there is no pattern present in the input is (1 - 0.9) * (1 - 0.8) * (1 - 0.7)
- probability that some 3 (out of N) of patterns are present at the same time is 0.9 * 0.8 * 0.7
- probability that there is exactly 1 pattern present is:
0.9 * (1 - 0.8) * (1 - 0.7) + (1 - 0.9) * 0.8 * (1 - 0.7) + (1 - 0.9) * (1 - 0.8) * 0.7
- etc
"""
def __init__(self, dt):
self.dt = dt
self.patterns = {}
self.npatterns = 0
self.patlen = {}
def addPatterns(self, patterns, IDs):
"""
Add patterns with IDs
Patterns should be a list or an 2d array (nchannels x pattern_length)
"""
for ind, p in enumerate(patterns):
ID = IDs[ind]
if ID not in self.patterns.keys():
self.patterns[ID] = p
self.patlen[ID] = p.shape[1]
self.npatterns += 1
def getPatternsIDs(self):
return self.patterns.keys()
# onoff: periods of patterns on and patterns off -> when they are shown and when they are not
# first param gives a range for patters on [0.5, 0.7] means patterns are on for random time between 0.5 and 0.7s
# second param gives a range for patters off [0.3, 0.5] means patterns are on for random time between 0.3 and 0.5s
def createUnsyncPatterns(self, simulationtime, IDs, mixingprob, onoff, offset=0):
onoff_isRange_on = isinstance(onoff[0], list)
onoff_isRange_off = isinstance(onoff[1], list)
assert not (onoff_isRange_on ^ onoff_isRange_off)
# onofftimes: array with 0(no patterns) and 1(patterns are on)
# simulationtime : sec
simulationtimeTS = int(np.ceil(simulationtime / self.dt)) # sim time in timesteps
onoffTS = np.array(np.ceil(np.array(onoff) / self.dt), dtype=int) # sim time in timesteps
#create onofftimes
onofftimes=np.zeros(simulationtimeTS)
t = 0
onoroff = 0 #0 is on, 1 is off
while t < simulationtimeTS:
#duration of on/off time
if onoff_isRange_on:
minOnOffTime = onoff[onoroff][0]
maxOnOffTime = onoff[onoroff][1]
onofftime = minOnOffTime + np.random.rand() * (maxOnOffTime - minOnOffTime)
else:
onofftime = onoff[onoroff]
steps=np.array(np.ceil(np.array(onofftime) / self.dt), dtype=int)
steps = min(steps, simulationtimeTS - t)
onofftimes[t: t + steps] = 1 - onoroff
t += steps
onoroff = 1 - onoroff
# check weather all IDs exists
pIDs = []
patlen = []
for ID in IDs:
if ID in self.patterns.keys():
pIDs.append(ID)
patlen.append(self.patlen[ID])
npatterns = len(pIDs)
maxnpatterns = len(mixingprob) # max overlap of patterns
patact = np.zeros((maxnpatterns, simulationtimeTS), dtype = 'int')
# probability of mixing channels (each can contain any pattern)
pa = np.array(mixingprob) # active percentage, size is maxnpatterns
apatlen = sum(patlen) / float(len(patlen)) # average length of pattern
pa /= (apatlen - pa * (apatlen - 1)) # probability of activating some pattern
# prepare random numbers
r = np.random.rand(maxnpatterns, simulationtimeTS)
# nov generate patterns
for t in xrange(simulationtimeTS):
if onofftimes[t] == 1: # if pattern time
for p in xrange(maxnpatterns):
if patact[p, t] == 0: # if we can put new pattern
if pa[p] > r[p, t]:
# then chose one of patterns to put, eliminate those that are already active
#available patterns are
s = range(1, npatterns + 1)
for pp in patact[:, t]:
if pp > 0 and pp in s:
s.remove(pp)
rp = s[np.random.random_integers(0, len(s) - 1)] # random pattern, 1-based index
patact[p, t: t + min(patlen[rp - 1], simulationtimeTS - t)] = rp
# count how many time combination occured (number of overlapping patterns)
sp = sum(patact > 0)
k = np.zeros(maxnpatterns + 1)
for i in xrange(maxnpatterns + 1):
k[i] = sum(sp == i) / float(simulationtimeTS)
print "Distribution of number of overlapping patterns [ 0 to", maxnpatterns, "]"
print k
# now create activity of patterns to start times of patterns (conversion of IDs to pIDs!)
# patterndistribution
pd = dict()
for i in xrange(npatterns):
pd[pIDs[i]] = []
for p in xrange(maxnpatterns):
t = 0
while t < simulationtimeTS:
if patact[p, t] > 0: # if there is start of pattern
ID = pIDs[patact[p, t] - 1]
pd[ID] += [t + offset]
t += self.patlen[ID]
else:
t += 1
for k in pd.keys():
pd[k].sort()
return pd
class TPattern:
"""
General (abstract) class for pattern.
It creates pattern of given length based on given rates.
mask : pattern rates (array)
length : length of pattern
"""
def info(self):
"""
Print some info about pattern (class).
"""
print "Number of patterns:", self.npatterns
print "Number of channels in pattern:", self.nchannels
print "Lenght of pattern(sec):", self.length
print "Rates:", self.rates
def createFromRates(self, rates):
"""
Creates one (poisson) pattern from rates.
-> rates : array([nchannels, time])
time == 1 means const rate for channel
"""
if rates.shape[1] == 1: # if is const rate
trates = np.tile(rates, (1, self.lengthTS))
else:
trates = rates
r = np.random.rand(self.nchannels, self.lengthTS) * 1000.
spikes = []
bsp = trates > r
for ch in xrange(self.nchannels):
spikes.append(bsp[ch, :].nonzero()[0])
return [spikes, bsp.sum()]
def limitRates(self, minrate, maxrate):
"""
Limits rates to some range.
-> minrate : minimum rate
-> maxrate : maximum rate
"""
self.patterns[self.patterns < minrate] = minrate
self.patterns[self.patterns > maxrate] = maxrate
class BarRatePatterns(TPattern):
"""
Create rate bar pattern generator.
It creates internally set of patterns defined with rates : patterns
and binary masks which defines for each channels whether is it high or low rate : masks
"""
def __init__(self, patternshape, bars, rates, length,dt):
"""
Inits class:
-> patternshape : shape of pattern [width, height]
-> bars : set of indecies (of bars vertical and horizontal) we want to create as pattern
-> rates : dictionary with 'low' and 'high' rates
-> lenght : lenght of pattern in sec
-> dt : simulation timestep
"""
self.patternshape = patternshape
self.rates = rates
self.length = length
self.dt = dt
self.lengthTS = np.ceil(self.length / dt) # length in timesteps
# create masks of patterns
self.nchannels = self.patternshape[0] * self.patternshape[1]
n = len(bars)
if n == 0:
n = self.patternshape[0] + self.patternshape[1]
bars = range(n)
self.npatterns = len(bars)
self.masks = np.zeros((n, self.nchannels, self.lengthTS), dtype='byte')
for ind, i in enumerate(bars):
pattern = np.zeros(self.patternshape)
# first vertical bars
if i >= 0 and i < self.patternshape[1]:
pattern[:, i] = 1
elif i >= self.patternshape[1] and i < self.patternshape[1] + self.patternshape[0]:
pattern[i - self.patternshape[1], :] = 1
newpattern = pattern.ravel().reshape((self.nchannels, 1))
self.masks[ind, :, :] = newpattern.repeat(self.lengthTS, 1)
# each mask multiply with rates
HR = rates['high']
LR = rates['low']
self.patterns = np.array(self.masks * (HR - LR) + LR, dtype='float')
def info(self):
"""
Prints additional info about pattern
"""
super(BarRatePatterns, self).info()
print "Name : Bar rate pattern"
print "Shape of pattern:", self.patternshape
class OrientedBarRatePatterns(TPattern):
"""
Create rate oriented (rotated) bar pattern generator.
It creates internally set of patterns defined with rates : patterns
and binary masks which defines for each channels whether is it high or low rate : masks
Note:
It can be seen as a special case of random rate patterns, with constraints 2,1
(even weaker due to max overlap all =1 for 2 groups (vertical and horizontal bars)
which dont have any overlap!)
"""
def __init__(self, patternshape, barwidth, angles, rates, length, dt):
"""
Inits class:
-> patternshape : shape of pattern [width, height]
-> angles : set of angles (starting from vertical bar in clock-wise direction) we want to create as pattern
-> rates : dictionary with 'low' and 'high' rates
-> lenght : lenght of pattern in sec
-> dt : simulation timestep
"""
self.patternshape = patternshape
self.barwidth = barwidth
self.rates = rates
self.length = length
self.dt = dt
self.lengthTS = np.ceil(self.length / dt) # length in timesteps
# create masks of patterns
self.nchannels = self.patternshape[0] * self.patternshape[1]
n = len(angles)
self.npatterns = n
self.masks = np.zeros((n, self.nchannels, self.lengthTS), dtype='byte')
pattern0 = np.zeros(self.patternshape)
pattern0[:, patternshape[1] / 2 - barwidth / 2 : patternshape[1] / 2 - barwidth / 2 + barwidth] = 1
from scipy import ndimage
for ind, angle in enumerate(angles):
# ensure angle is correct
assert angle >= 0 and angle <= 360
pattern = (ndimage.rotate(pattern0, angle, reshape=False) > 0.1) * 1.
newpattern = pattern.ravel().reshape((self.nchannels, 1))
self.masks[ind, :, :] = newpattern.repeat(self.lengthTS, 1)
# each mask multiply with rates
HR = rates['high']
LR = rates['low']
self.patterns = np.array(self.masks * (HR - LR) + LR, dtype='float')
def info(self):
"""
Prints additional info about pattern
"""
print "Name : Rotated bar rate pattern"
print "Shape of pattern:", self.patternshape
class SpatioTemporalPatterns(TPattern):
"""
Create variable rate random pattern generator.
It creates internally set of patterns defined with rates in time : time patterns
(the binary mask is created for compatiblity reasons, it is set to 0)
"""
def __init__(self, nchannels, npatterns, rates, length, dt, process=None, patternsrates=None):
"""
Init class
-> nchannels : (int) number of channels
-> npatterns : (int) number of different patterns
-> rates : dictionary defining max and min rates {'low', 'high'}
-> length : (float) length of pattern in sec
-> dt : simulation timestep
-> process: process class which creates rates, it is applied to each pattern and each
channel in it for duration length
default process is random
It requires .Create(length) method
-> patternsrates : external matrix of rates for each channel through time for each pattern
array(npatterns, nchannels, length)
"""
self.rates = rates
self.length = length
self.nchannels = nchannels
self.npatterns = npatterns
self.dt = dt
self.lengthTS = int(np.ceil(self.length / dt)) # length in timesteps
print self.lengthTS
# create masks for compatiblity reasons
self.masks = np.zeros((self.npatterns, self.nchannels, self.lengthTS))
self.patterns = np.zeros((self.npatterns, self.nchannels, self.lengthTS))
if patternsrates == None: # if external rates description not provided
if process == None : # if no external process is provided
# process = random
for n in xrange(self.npatterns):
for ch in xrange(self.nchannels):
rrs = np.random.random_integers(rates['low'], rates['high'], self.lengthTS)
self.patterns[n, ch, :] = rrs
else: # process is defined so use it
for n in xrange(self.npatterns):
for ch in xrange(self.nchannels):
self.patterns[n, ch, :] = process.create(self.lengthTS)
else: # external description is provided
self.patterns = patternsrates
def info(self):
"""
Prints additional info about pattern
"""
super(VariableRatePatterns, self).info()
print "Name : Variable rate pattern"
| |
# Copyright 2017 Jan Buys.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import re
import codecs
import string
import sys
import sentence as asent
import util as mrs_util
def convert_number(num_str, has_unit):
try:
unit_ind = 1 if num_str[0] in ['>', '<', '~'] else 0
if len(num_str) > 1 and num_str[1] == '=':
unit_ind = 2
if has_unit:
num_str = num_str[unit_ind+1:]
else:
num_str = num_str[unit_ind:]
# For now, ignore first value in ranges.
if '-' in num_str:
num_str = num_str[(num_str.index('-') + 1):]
value = float(num_str)
try:
value = int(value)
except ValueError:
value = value
return str(value)
except ValueError:
print 'Cannot parse:', num_str
return num_str
def convert_period(period_str):
rate_re = re.compile(r'P(\d\d*)([A-Z])')
time_re = re.compile(r'PT(\d\d*)([A-Z])')
period_m = rate_re.match(period_str)
time_m = time_re.match(period_str)
if period_m:
period = int(period_m.group(1))
unit = period_m.group(2)
if unit == 'Y':
unit = 'year'
elif unit == 'M':
unit = 'month'
elif unit == 'W':
unit = 'week'
elif unit == 'D':
unit = 'day'
return period, unit
elif time_m:
period = int(time_m.group(1))
unit = time_m.group(2)
if unit == 'H':
unit = 'hour'
elif unit == 'M':
unit = 'minute'
elif unit == 'S':
unit = 'second'
return period, unit
else:
return 0, ''
def read_sentences_normalize_ne(stanford_file_name):
stanford_file = codecs.open(stanford_file_name, 'r', 'utf-8')
sentences = []
tokens = []
token_alignments = []
text_line = ''
state = False
ne_state = False
money_state = False
percent_state = False
number_state = False
ordinal_state = False
time_state = False
date_state = False
duration_state = False
set_state = False
last_ne_tag = ''
token_counter = 0
date_re = re.compile(r'^(\d\d\d\d|XXXX)-(\d\d|XX)-(\d\d|XX)$')
date2_re = re.compile(r'^(\d\d\d\d|XXXX)-(\d\d|XX)$')
date3_re = re.compile(r'^(\d\d\d\d|XXXX)$')
for line in stanford_file:
if line.startswith('Sentence #'):
if state:
sentences.append(asent.Sentence(tokens, token_alignments))
tokens = []
token_alignments = []
state = False
ne_state = False
money_state = False
percent_state = False
number_state = False
ordinal_state = False
time_state = False
date_state = False
duration_state = False
set_state = False
last_ne_tag = ''
token_counter = 0
elif line.startswith('[Text=') and line[-2]==']':
token = asent.Token.parse_stanford_line(line[1:-2], {})
#For LOCATION, PERSON, ORGANIZATION, MISC.
if ne_state and not (token.is_ne and token.ne_tag == last_ne_tag):
ne_state = False
if not ne_state and token.is_ne and token.ne_tag in \
['LOCATION', 'PERSON', 'ORGANIZATION', 'MISC']:
ne_state = True
# Appends to the front.
last_ne_tag = token.ne_tag
token.constant_label = 'name'
token.const_lexeme = token.word
# For MONEY:
if money_state and not (token.is_ne and token.ne_tag == 'MONEY'):
money_state = False
elif not money_state and token.is_ne and token.ne_tag == 'MONEY':
money_state = True
money_str = token.normalized_ne_tag
if len(money_str) == 0:
# Not treated as money.
token.is_ne = False
token.ne_tag = ''
money_state = False
elif len(money_str) > 1: # length 1 is for units
unit_ind = 1 if money_str[0] in ['>', '<', '~'] else 0
if money_str[1] == '=':
unit_ind = 2
token.const_lexeme = convert_number(money_str, True)
# Percentage.
if percent_state and not (token.is_ne and token.ne_tag == 'PERCENT'):
percent_state = False
elif not percent_state and token.is_ne and token.ne_tag == 'PERCENT':
percent_state = True
percent_str = token.normalized_ne_tag
if len(percent_str) > 1:
token.normalized_ne_tag = convert_number(percent_str, True)
if number_state and not (token.is_ne and token.ne_tag == 'NUMBER'):
number_state = False
elif not number_state and token.is_ne and token.ne_tag == 'NUMBER':
number_state = True
number_str = token.normalized_ne_tag
if len(number_str) == 0:
number_state = False
token.is_ne = False
token.ne_tag = ''
else:
token.const_lexeme = convert_number(number_str, False)
if ordinal_state and not (token.is_ne and token.ne_tag == 'ORDINAL'):
ordinal_state = False
elif not ordinal_state and token.is_ne and token.ne_tag == 'ORDINAL':
ordinal_state = True
number_str = token.normalized_ne_tag
if len(number_str) == 0:
number_state = False
token.is_ne = False
token.ne_tag = ''
else:
token.const_lexeme = convert_number(number_str, False)
if time_state and not (token.is_timex
and token.ne_tag in ['DATE', 'TIME']):
time_state = False
elif not time_state and (token.is_timex
and token.ne_tag in ['DATE', 'TIME']):
# The same date and time expression and contain both DATE and TIME.
time_state = True
if time_state and not date_state and token.ne_tag == 'DATE':
# Only match pure date expressions
# - cannot convert compound expressions cleanly enough.
date_str = token.normalized_ne_tag
if len(date_str.split()) == 1:
# Strip time from string.
if 'T' in date_str:
date_str = date_str[:date_str.index('T')]
if re.match(r'^\d\d\dX$', date_str):
date_str = date_str[:3] + '0'
if re.match(r'^\d\dXX$', date_str):
date_str = date_str[:2] + '00'
m = date_re.match(date_str)
m2 = date2_re.match(date_str)
m3 = date3_re.match(date_str)
if m or m2 or m3:
date_state = True
if m:
date_list = list(m.groups())
elif m2:
date_list = list(m2.groups())
elif m3:
date_list = list(m3.groups())
date_list = filter(lambda d: 'X' not in d, date_list)
date_list = [convert_number(date, False) for date in date_list]
if date_list:
token.const_lexeme = date_list[0]
#else don't handle as a date.
if date_state and token.ne_tag <> 'DATE':
date_state = False
# For Duration:
if duration_state and not (token.is_timex and token.ne_tag == 'DURATION'):
duration_state = False
elif not duration_state and token.is_timex and token.ne_tag == 'DURATION':
duration_state = True
time_str = token.normalized_ne_tag
period, unit = convert_period(time_str)
if period == 0:
duration_state = False
else:
token.const_lexeme = str(period)
token.ne_tag += '_' + unit
# For SET:
if set_state and not (token.is_timex and token.ne_tag == 'SET'):
set_state = False
elif not set_state and token.is_timex and token.ne_tag == 'SET':
set_state = True
freq = 1
period = 0
unit = ''
if token.timex_attr.has_key('freq'):
rate_re = re.compile(r'P(\d\d*)([A-Z])')
freq_m = rate_re.match(token.timex_attr['freq'])
freq = int(freq_m.group(1))
if token.timex_attr.has_key('periodicity'):
period, unit = convert_period(token.timex_attr['periodicity'])
if period == 0:
set_state = False
token.ne_tag = ''
else:
if freq > 1:
token_ne_tag += '_rate'
token.const_lexeme = str(period)
token.ne_tag += '_temporal_' + unit
# Identify numbers:
if re.match(r'^[+-]?\d+(\.\d+)?$', token.word):
if token.const_lexeme == '':
token.const_lexeme = convert_number(token.word, False)
token.constant_label = 'number'
token.pred_lexeme = token.word
tokens.append(token)
state = True
if state:
sentences.append(asent.Sentence(tokens))
return sentences
def read_sentences(stanford_file_name, file_id):
stanford_file = codecs.open(stanford_file_name, 'r', 'utf-8')
sentences = []
raw_sentences = []
tokens = []
text_line = ''
state_line = ''
sent_offset = 0
state = False
state1 = False
for line in stanford_file:
if line.startswith('Sentence #'):
if state:
sentences.append(asent.Sentence(tokens))
sentences[-1].offset = sent_offset
sentences[-1].raw_txt = text_line
sentences[-1].file_id = file_id
text_line = ''
state_line = ''
tokens = []
state = False
state1 = False
elif len(line) > 1 and line[-2]==']' and (state or line.startswith('[Text=')):
if state_line:
token = asent.Token.parse_stanford_line(state_line + ' ' + line[:-2], {})
else:
token = asent.Token.parse_stanford_line(line[1:-2], {})
if not state1:
sent_offset = token.char_start
ind_start = token.char_start - sent_offset
ind_end = token.char_end - sent_offset
token.reset_char_spans(ind_start, ind_end)
word = token.original_word
word = word.replace(u"\u00A0", "_")
if '_' in word:
split_word = word.split('_')
split_inds = filter(lambda x: word[x] == '_',
range(len(word)))
first_word = word[:split_inds[0]]
token.original_word = first_word
token.word = first_word
if normalize_ne:
token.pred_lexeme = first_word.lower()
else:
token.pred_lexeme = first_word.lower() + u'/' + token.pos.lower()
token.const_lexeme = first_word
token.char_end = token.char_start + split_inds[0]
tokens.append(token)
for j, w in enumerate(split_word[1:]):
char_start = token.char_start + split_inds[j] + 1
if j + 1 < len(split_inds):
char_end = token.char_start + split_inds[j+1]
else:
char_end = token.char_start + len(word)
new_token = asent.Token(w, w, token.pos, token.constant_label,
token.is_ne, token.is_timex, token.ne_tag,
token.normalized_ne_tag, char_start=char_start, char_end=char_end)
tokens.append(new_token)
else:
tokens.append(token)
state = True
state1 = True
elif line.startswith('[Text='):
state_line = line[1:].strip()
state = True
else: #if line.strip():
if state:
state_line += ' ' + line.strip()
else:
text_line += line.replace('\n', ' ')
if state:
sentences.append(asent.Sentence(tokens))
sentences[-1].offset = sent_offset
sentences[-1].raw_txt = text_line
sentences[-1].file_id = file_id
return sentences
def process_stanford(input_dir, working_dir, erg_dir, set_name,
use_pred_lexicon=True, use_const_lexicon=True, normalize_ne=False,
read_epe=False):
nom_map = {}
wiki_map = {}
if use_pred_lexicon:
pred_map = mrs_util.read_lexicon(erg_dir + 'predicates.lexicon')
if normalize_ne:
nom_map = mrs_util.read_lexicon(erg_dir + 'nominals.lexicon')
else:
pred_map = {}
if use_const_lexicon:
const_map = mrs_util.read_lexicon(erg_dir + 'constants.lexicon')
if normalize_ne:
wiki_map = mrs_util.read_lexicon(erg_dir + 'wiki.lexicon')
else:
const_map = {}
if read_epe:
file_ids = []
in_type = input_dir[4:-1]
file_list = open(in_type + '.' + set_name + '.list', 'r').read().split('\n')[:-1]
file_ids = [name[name.rindex('/')+1:] for name in file_list]
sentences = []
for file_id in file_ids:
sentences.extend(read_sentences(
(working_dir + '/raw-' + set_name + '/' + file_id + '.out'),
file_id))
else:
suffix = '.raw'
if normalize_ne:
sentences = read_sentences_normalize_ne((working_dir + set_name + suffix + '.out'))
else:
sentences = read_sentences((working_dir + set_name + suffix + '.out'), '0')
max_token_span_length = 5
for i, sent in enumerate(sentences):
for j, token in enumerate(sent.sentence):
if normalize_ne:
sentences[i].sentence[j].find_wordnet_lemmas()
# Matches lexemes.
lexeme = ''
if token.original_word in const_map:
lexeme = const_map[token.original_word]
elif token.original_word.lower() in const_map:
lexeme = const_map[token.original_word.lower()]
elif token.word in const_map:
lexeme = const_map[token.word]
if lexeme <> '':
sentences[i].sentence[j].const_lexeme = lexeme
sentences[i].sentence[j].is_const = True
lexeme = ''
if token.original_word in pred_map:
lexeme = pred_map[token.original_word]
elif token.original_word.lower() in pred_map:
lexeme = pred_map[token.original_word.lower()]
elif token.word in pred_map: # lemma
lexeme = pred_map[token.word]
if normalize_ne:
nom_lexeme = ''
if token.original_word in nom_map:
nom_lexeme = nom_map[token.original_word]
elif token.original_word.lower() in nom_map:
nom_lexeme = nom_map[token.original_word.lower()]
elif token.word in nom_map: # lemma
nom_lexeme = nom_map[token.word]
if nom_lexeme == '':
sentences[i].sentence[j].nom_lexeme = '_' + token.word
else:
sentences[i].sentence[j].nom_lexeme = nom_lexeme
sentences[i].sentence[j].is_nom = True
if not normalize_ne:
if len(lexeme) > 2 and '+' in lexeme[:-1]:
lexeme = lexeme[:lexeme.index('+')]
elif len(lexeme) > 2 and '-' in lexeme[:-1]:
lexeme = lexeme[:lexeme.index('-')]
if lexeme <> '':
sentences[i].sentence[j].is_pred = True
if normalize_ne and lexeme == '': # for AMR
lexeme = '_' + token.word # lemma
if lexeme <> '':
sentences[i].sentence[j].pred_lexeme = lexeme
# Matches multi-token expressions.
orth = token.original_word
for k in range(j+1, min(j+max_token_span_length-1, len(sent.sentence))):
orth += ' ' + sent.sentence[k].original_word
if orth in const_map:
sentences[i].sentence[j].const_lexeme = const_map[orth]
sentences[i].sentence[j].const_char_end = sentences[i].sentence[k].char_end
sentences[i].sentence[j].is_const = True
if orth in pred_map:
if normalize_ne:
first_pred = pred_map[orth]
elif len(pred_map[orth]) > 2 and '+' in pred_map[orth][:-1]:
first_pred = pred_map[orth][:pred_map[orth].index('+')]
elif len(pred_map[orth]) > 2 and '-' in pred_map[orth][:-1]:
first_pred = pred_map[orth][:pred_map[orth].index('-')]
else:
first_pred = pred_map[orth]
sentences[i].sentence[j].pred_lexeme = first_pred
sentences[i].sentence[j].pred_char_end = sentences[i].sentence[k].pred_char_end
sentences[i].sentence[j].is_pred = True
if normalize_ne:
wiki_lexeme = ''
if token.original_word in wiki_map:
wiki_lexeme = wiki_map[token.original_word]
elif token.original_word.lower() in wiki_map:
wiki_lexeme = wiki_map[token.original_word.lower()]
elif token.word in wiki_map: # lemma
wiki_lexeme = wiki_map[token.word]
elif token.word.lower() in wiki_map:
wiki_lexeme = wiki_map[token.word.lower()]
if wiki_lexeme == '':
sentences[i].sentence[j].wiki_lexeme = token.const_lexeme
else:
sentences[i].sentence[j].wiki_lexeme = wiki_lexeme
sentences[i].sentence[j].is_wiki = True
return sentences
'''
Processing performed: Tokenize, lemmize, normalize numbers and time
expressions, insert variable tokens for named entities etc.
'''
if __name__=='__main__':
assert len(sys.argv) >= 4
input_dir = sys.argv[1] + '/'
working_dir = sys.argv[2] + '/'
erg_dir = sys.argv[3] + '/'
read_epe = len(sys.argv) > 4 and '-epe' in sys.argv[4:]
set_list = ['train', 'dev', 'test']
normalize_ne = len(sys.argv) > 4 and '-n' in sys.argv[4:]
use_pred_lexicon = True
use_const_lexicon = True
for set_name in set_list:
sentences = process_stanford(input_dir, working_dir, erg_dir, set_name,
use_pred_lexicon, use_const_lexicon, normalize_ne, read_epe)
sent_output_file = open(working_dir + set_name + '.en', 'w')
sent_offsets_file = open(working_dir + set_name + '.off', 'w')
sent_ids_file = open(working_dir + set_name + '.ids', 'w')
sent_txt_file = open(working_dir + set_name + '.txt', 'w')
pred_output_file = open(working_dir + set_name + '.lex.pred', 'w')
const_output_file = open(working_dir + set_name + '.lex.const', 'w')
wiki_output_file = open(working_dir + set_name + '.lex.wiki', 'w')
pos_output_file = open(working_dir + set_name + '.pos', 'w')
ne_output_file = open(working_dir + set_name + '.ne', 'w')
span_output_file = open(working_dir + set_name + '.span', 'w')
pred_span_output_file = open(working_dir + set_name + '.span.pred', 'w')
const_span_output_file = open(working_dir + set_name + '.span.const', 'w')
if normalize_ne:
nom_output_file = open(working_dir + set_name + '.lex.nom', 'w')
for sent in sentences:
out_str = sent.original_sentence_str()
sent_output_file.write(out_str.encode('utf-8', 'replace'))
if normalize_ne:
lex_str = sent.pred_verb_lexeme_str()
pred_output_file.write(lex_str.encode('utf-8', 'replace'))
lex_str = sent.nom_lexeme_str()
nom_output_file.write(lex_str.encode('utf-8', 'replace'))
else:
lex_str = sent.pred_lexeme_str()
pred_output_file.write(lex_str.encode('utf-8', 'replace'))
lex_str = sent.const_lexeme_str()
lex_enc = lex_str.encode('utf-8', 'replace')
const_output_file.write(lex_enc)
lex_str = sent.wiki_lexeme_str()
lex_enc = lex_str.encode('utf-8', 'replace')
sent_offsets_file.write(str(sent.offset) + '\n')
sent_ids_file.write(str(sent.file_id) + '\n')
txt_enc = sent.raw_txt.encode('utf-8', 'replace')
sent_txt_file.write(txt_enc + '\n')
wiki_output_file.write(lex_enc)
pos_output_file.write(sent.pos_str())
ne_output_file.write(sent.ne_tag_str())
span_output_file.write(sent.ch_span_str())
const_span_output_file.write(sent.const_ch_span_str())
pred_span_output_file.write(sent.pred_ch_span_str())
| |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the pxeswitch command.
This may have issues being tested somewhere that the command actually works...
"""
import os.path
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestPxeswitch(TestBrokerCommand):
"""Simplified tests for the pxeswitch command.
Since we can't actually run aii-installfe against imaginary hosts, the
unittest.conf file specifies /bin/echo as the command to use. These
tests just check that the available parameters are passed through
correctly.
"""
def testinstallunittest00(self):
command = "pxeswitch --hostname unittest00.one-nyp.ms.com --install"
# This relies on the tests being configured to use /bin/echo instead
# of the actual aii-installfe. It would be better to have a fake
# version of aii-installfe that returned output closer to the real
# one.
err = self.statustest(command.split(" "))
self.matchoutput(err, "--installlist", command)
self.matchoutput(err, "--configurelist", command)
self.matchclean(err, "--status", command)
self.matchclean(err, "--rescue", command)
self.matchclean(err, "--boot", command)
self.matchclean(err, "--firmware", command)
self.matchclean(err, "--livecd", command)
ssh = self.config.lookup_tool("ssh")
if ssh[0] == '/':
self.matchoutput(err, "--sshdir %s" % os.path.dirname(ssh), command)
user = self.config.get("broker", "installfe_user")
self.matchoutput(err,
"--servers %s@infra1.aqd-unittest.ms.com" % user,
command)
def testinstallunittest00noconf(self):
command = ["pxeswitch", "--hostname", "unittest00.one-nyp.ms.com",
"--install", "--noconfigure"]
err = self.statustest(command)
self.matchoutput(err, "--installlist", command)
self.matchclean(err, "--configure", command)
self.matchclean(err, "--status", command)
self.matchclean(err, "--rescue", command)
self.matchclean(err, "--boot", command)
self.matchclean(err, "--firmware", command)
self.matchclean(err, "--livecd", command)
ssh = self.config.lookup_tool("ssh")
if ssh[0] == '/':
self.matchoutput(err, "--sshdir %s" % os.path.dirname(ssh), command)
user = self.config.get("broker", "installfe_user")
self.matchoutput(err,
"--servers %s@infra1.aqd-unittest.ms.com" % user,
command)
def testinstallunittest02(self):
command = ["pxeswitch", "--hostname", "unittest02.one-nyp.ms.com",
"--install"]
out = self.badrequesttest(command)
self.matchoutput(out, "You should change the build status before "
"switching the PXE link to install.", command)
def testlocalbootunittest02(self):
command = "pxeswitch --hostname unittest02.one-nyp.ms.com --localboot"
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--bootlist", command)
def teststatusunittest02(self):
command = "pxeswitch --hostname unittest02.one-nyp.ms.com --status"
err = self.statustest(command.split(" "))
self.matchclean(err, "--configure", command)
self.matchoutput(err, "--statuslist", command)
def testfirmwareunittest02(self):
command = "pxeswitch --hostname unittest02.one-nyp.ms.com --firmware"
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--firmwarelist", command)
def testconfigureunittest02(self):
command = "pxeswitch --hostname unittest02.one-nyp.ms.com"
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
def testblindbuildunittest02(self):
command = "pxeswitch --hostname unittest02.one-nyp.ms.com --blindbuild"
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--livecdlist", command)
def testrescueunittest02(self):
command = "pxeswitch --hostname unittest02.one-nyp.ms.com --rescue"
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--rescuelist", command)
def testconfigurelist(self):
hosts = ["uNitTest02.one-nyp.ms.com",
"unittest00.One-Nyp.ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = "pxeswitch --list %s" % scratchfile
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
# We would like to test more of the output... we need something
# special for aii-shellfe however...
def testconfigurelisterror1(self):
hosts = ["not-an-fqdn",
"host-does-not-exist.ms.com",
"host.domain-does-not-exist.ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = "pxeswitch --list %s --configure" % scratchfile
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Invalid hosts in list:", command)
self.matchoutput(out, "not-an-fqdn: Not an FQDN.", command)
self.matchoutput(out,
"Host host-does-not-exist.ms.com not found.",
command)
self.matchoutput(out,
"Host host.domain-does-not-exist.ms.com not found.",
command)
self.matchoutput(out,
"DNS Domain domain-does-not-exist.ms.com not found.",
command)
def testconfigurelisterror2(self):
hosts = [self.aurora_without_node + ".ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = "pxeswitch --list %s --configure" % scratchfile
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Invalid hosts in list:", command)
self.matchoutput(out, "Host %s.ms.com has no bootserver." %
self.aurora_without_node, command)
def testinstallisterror(self):
hosts = ["unittest02.one-nyp.ms.com",
"unittest00.one-nyp.ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = "pxeswitch --install --list %s" % scratchfile
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "unittest02.one-nyp.ms.com: You should "
"change the build status before switching the "
"PXE link to install.", command)
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
def testblindbuildlist(self):
hosts = ["unittest02.one-nyp.ms.com",
"unittest00.one-nyp.ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = "pxeswitch --list %s --blindbuild" % scratchfile
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--livecdlist", command)
def testrescuelist(self):
hosts = ["unittest02.one-nyp.ms.com",
"unittest00.one-nyp.ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = "pxeswitch --list %s --rescue" % scratchfile
err = self.statustest(command.split(" "))
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--rescuelist", command)
def testrescuelistnoconf(self):
hosts = ["unittest02.one-nyp.ms.com",
"unittest00.one-nyp.ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = "pxeswitch --list %s --rescue --noconfigure" % scratchfile
err = self.statustest(command.split(" "))
self.matchclean(err, "--configurelist", command)
self.matchoutput(err, "--rescuelist", command)
# --configure is the default now, so this is no longer a conflict
# def teststatusconflictconfigure(self):
# command = ["pxeswitch", "--hostname=unittest02.one-nyp.ms.com",
# "--status", "--configure"]
# self.badoptiontest(command)
def teststatusconflictinstall(self):
command = ["pxeswitch", "--hostname=unittest02.one-nyp.ms.com",
"--status", "--install"]
self.badoptiontest(command)
def teststatusconflictinstalllist(self):
command = ["pxeswitch", "--list=does-not-actually-exist",
"--status", "--install"]
self.badoptiontest(command)
def testinstallconflictfirmware(self):
command = ["pxeswitch", "--hostname=unittest02.one-nyp.ms.com",
"--firmware", "--install"]
self.badoptiontest(command)
def testallowconfigureinstall(self):
command = ["pxeswitch", "--hostname=unittest00.one-nyp.ms.com",
"--configure", "--install"]
err = self.statustest(command)
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--installlist", command)
self.matchclean(err, "--firmware", command)
def testallowconfigureblindbuildlist(self):
hosts = ["unittest02.one-nyp.ms.com",
"unittest00.one-nyp.ms.com"]
scratchfile = self.writescratch("pxeswitchlist", "\n".join(hosts))
command = ["pxeswitch", "--list", scratchfile,
"--configure", "--blindbuild"]
err = self.statustest(command)
self.matchoutput(err, "--configurelist", command)
self.matchoutput(err, "--livecdlist", command)
self.matchclean(err, "--firmware", command)
def testfailoverpxeswitchlimitlist(self):
hostlimit = self.config.getint("broker", "pxeswitch_max_list_size")
hosts = []
for i in range(1, 20):
hosts.append("thishostdoesnotexist%d.aqd-unittest.ms.com" % i)
scratchfile = self.writescratch("pxeswitchlistlimit", "\n".join(hosts))
command = ["pxeswitch", "--list", scratchfile,
"--configure", "--blindbuild"]
out = self.badrequesttest(command)
self.matchoutput(out, "The number of hosts in list {0:d} can not be more "
"than {1:d}".format(len(hosts), hostlimit), command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestPxeswitch)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
#!/usr/bin/env python
# coding: UTF-8
from unittest import TestCase
from nose.tools import raises
from hyuki_graph.hyuki_graph import(
DEFAULT_MEDIUM_SEP,
DEAD, ALIVE, MEDIUM, LARGE,
StatusCell,
get_dead_or_alive_number, get_dead_medium_or_large,
is_correct_as_date, is_correct_as_inputfile_data,
get_dates, get_str_projname, get_commits_from_text,
get_ext,
get_digits,
get_date_from_text, fill_commits_by_zero, update_as_commits
)
from datetime import date, timedelta
import os.path
def get_dead_medium_or_large_test0():
assert(get_dead_medium_or_large(0) == DEAD)
def get_dead_medium_or_large_test2():
assert(get_dead_medium_or_large(1) == MEDIUM)
def get_dead_medium_or_large_test3():
assert(get_dead_medium_or_large(DEFAULT_MEDIUM_SEP - 1) == MEDIUM)
def get_dead_medium_or_large_test1():
assert(get_dead_medium_or_large(DEFAULT_MEDIUM_SEP) == LARGE)
def get_dead_or_alive_number_test0():
assert(get_dead_or_alive_number(0) == DEAD)
def get_dead_or_alive_number_test1():
assert(get_dead_or_alive_number(1) == ALIVE)
def get_dead_or_alive_number_test2():
assert(get_dead_or_alive_number(10) == ALIVE)
class StatusCellTestCase(TestCase):
def color_test(self):
c = StatusCell('D')
c.set_color(91)
assert(c.color == '91')
def color_text_test(self):
c = StatusCell('D')
c.set_color(91)
assert(c.colored_text == '\033[91m' + "D" + '\033[0m')
def is_correct_as_date_test0():
assert(is_correct_as_date(set()) is False)
def is_correct_as_date_test1():
assert(is_correct_as_date("2015/03/21") is True)
def is_correct_as_date_test2():
assert(is_correct_as_date("2015-03-21") is True)
def is_correct_as_date_test3():
assert(is_correct_as_date("2015/03-21") is False)
def is_correct_as_date_test4():
assert(is_correct_as_date("2015/03/99") is False)
def is_correct_as_inputfile_data_test1():
assert(is_correct_as_inputfile_data(1) is False)
def is_correct_as_inputfile_data_test2():
assert(is_correct_as_inputfile_data({"proj": 2}) is False)
def is_correct_as_inputfile_data_test3():
assert(is_correct_as_inputfile_data({"proj": "2015/03/20"}) is False)
def is_correct_as_inputfile_data_test4():
assert(is_correct_as_inputfile_data({"proj": {"2015/03/20": 2}}) is True)
def get_date_from_text_test1():
assert(get_date_from_text('2015/03/21') ==
date(2015, 3, 21))
@raises(TypeError)
def get_date_from_text_test2():
get_date_from_text('bear')
@raises(TypeError)
def get_date_from_text_test3():
get_date_from_text('year/02/03')
@raises(TypeError)
def get_date_from_text_test4():
get_date_from_text('2015/month/03')
@raises(TypeError)
def get_date_from_text_test5():
get_date_from_text('2015/02/day')
@raises(TypeError)
def get_date_from_text_test6():
get_date_from_text('2015/02/99')
def get_date_from_text_test7():
assert(get_date_from_text('2015-03-21') ==
date(2015, 3, 21))
@raises(TypeError)
def get_date_from_text_test8():
get_date_from_text('2015/03-21')
def get_ext_test1():
assert(get_ext('json.json') == 'json')
def get_ext_test2():
assert(get_ext('test.yaml') == 'yaml')
def get_ext_test3():
assert(get_ext('test.yaml', 'json') == 'json')
def get_ext_test4():
assert(get_ext('~/dev/') is None)
def get_commits_from_text_test0():
text = '{}'
assert(get_commits_from_text(text, 'json') == {})
def get_commits_from_text_test1():
text = '{"proj1": {"2016/01/01": 3}}'
assert(get_commits_from_text(text, 'json') ==
{'proj1':
{date(2016, 1, 1): 3}}
)
def get_commits_from_text_test2():
text = ('{"proj1": {"2016/01/01": 3,'
'"2016/01/03": 5}}')
assert(get_commits_from_text(text, 'json') ==
{'proj1': {
date(2016, 1, 1): 3,
date(2016, 1, 3): 5}})
def get_commits_from_text_test3():
text = ('{"proj1": {"2016/01/01": 3,'
'"2016/01/03": 5},'
' "proj2": {"2016/01/02": 4,'
'"2016/01/04": 6}}'
)
assert(get_commits_from_text(text, 'json') ==
{'proj1': {
date(2016, 1, 1): 3,
date(2016, 1, 3): 5},
'proj2': {
date(2016, 1, 2): 4,
date(2016, 1, 4): 6}})
@raises(TypeError)
def get_commits_from_text_test4():
text = ('{"proj1": {"2016/01/01": 3,'
'"2016/01/03": 5},'
' "proj2": {"2016/01/02": 4,'
'"2016/01/04": 6}}'
)
get_commits_from_text(text, 'sjon')
def get_dates_test1():
assert(
list(get_dates(0)) == [date.today()]
)
def get_dates_test2():
today = date.today()
assert(list(get_dates(2)) == [
today - timedelta(days=2),
today - timedelta(days=1),
today
])
def get_str_projname_test1():
projname = get_str_projname('{sep}usr{sep}bin{sep}dev{sep}'.format(
sep=os.path.sep))
assert(projname == 'dev')
def get_str_projname_test2():
projname = get_str_projname('{sep}usr{sep}bin{sep}dev'.format(
sep=os.path.sep))
assert(projname == 'dev')
def fill_commits_by_zero_test1():
commits = {
'proj1': {
date(2016, 1, 1): 3,
date(2016, 1, 3): 5},
'proj2': {
date(2016, 1, 2): 4,
date(2016, 1, 4): 6}}
assert(
fill_commits_by_zero(commits, start_day=date(2016, 1, 4), days=3) == {
'proj1': {
date(2016, 1, 1): 3,
date(2016, 1, 2): 0,
date(2016, 1, 3): 5,
date(2016, 1, 4): 0},
'proj2': {
date(2016, 1, 1): 0,
date(2016, 1, 2): 4,
date(2016, 1, 3): 0,
date(2016, 1, 4): 6}
}
)
def update_as_commits_test():
commits1 = {
'proj1': {
date(2016, 1, 1): 3,
date(2016, 1, 3): 5},
'proj2': {
date(2016, 1, 2): 4,
date(2016, 1, 4): 6}}
commits2 = {
'proj3': {
date(2016, 1, 1): 2,
date(2016, 1, 3): 3},
'proj2': {
date(2016, 1, 2): 1,
date(2016, 1, 4): 9}}
commits = update_as_commits(commits1, commits2)
assert(commits == {
'proj1': {
date(2016, 1, 1): 3,
date(2016, 1, 3): 5},
'proj2': {
date(2016, 1, 2): 5,
date(2016, 1, 4): 15},
'proj3': {
date(2016, 1, 1): 2,
date(2016, 1, 3): 3}
})
commits = update_as_commits(commits2, commits1)
assert(commits == {
'proj1': {
date(2016, 1, 1): 3,
date(2016, 1, 3): 5},
'proj2': {
date(2016, 1, 2): 5,
date(2016, 1, 4): 15},
'proj3': {
date(2016, 1, 1): 2,
date(2016, 1, 3): 3}
})
| |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A minimal interface convolutional networks module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import batch_norm
from sonnet.python.modules import conv
from sonnet.python.modules import util
import tensorflow as tf
def _replicate_elements(input_iterable, num_times):
"""Replicates entry in `input_iterable` if `input_iterable` is of length 1."""
if len(input_iterable) == 1:
return (input_iterable[0],) * num_times
return tuple(input_iterable)
class ConvNet2D(base.AbstractModule, base.Transposable):
"""A 2D Convolutional Network module."""
POSSIBLE_INITIALIZER_KEYS = {"w", "b"}
# Keep old name for backwards compatibility
POSSIBLE_KEYS = POSSIBLE_INITIALIZER_KEYS
def __init__(self,
output_channels,
kernel_shapes,
strides,
paddings,
activation=tf.nn.relu,
activate_final=False,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=False,
use_bias=True,
batch_norm_config=None,
name="conv_net_2d"):
"""Constructs a `ConvNet2D` module.
By default, neither batch normalization nor activation are applied to the
output of the final layer.
Args:
output_channels: Iterable of output channels, as defined in
`conv.Conv2D`. Output channels can be defined either as number or via a
callable. In the latter case, since the function invocation is deferred
to graph construction time, the user must only ensure that entries can
be called when build is called. Each entry in the iterable defines
properties in the corresponding convolutional layer.
kernel_shapes: Iterable of kernel sizes as defined in `conv.Conv2D`; if
the list contains one element only, the same kernel shape is used in
each layer of the network.
strides: Iterable of kernel strides as defined in `conv.Conv2D`; if the
list contains one element only, the same stride is used in each layer of
the network.
paddings: Iterable of padding options, either `snt.SAME` or
`snt.VALID`; if the Iterable contains one element only, the same padding
is used in each layer of the network.
activation: An activation op.
activate_final: Boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_batch_norm: Boolean determining if batch normalization is applied
after convolution.
use_bias: Boolean or iterable of booleans determining whether to include
bias parameters in the convolutional layers. Default `True`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules.
name: Name of the module.
Raises:
TypeError: If `output_channels` is not iterable; or if `kernel_shapes` is
not iterable; or `strides` is not iterable; or `paddings` is not
iterable; or if `activation` is not callable; or `batch_norm_config` is
not a mappable (e.g. `dict`).
ValueError: If `output_channels` is empty; or if `kernel_shapes` has not
length 1 or `len(output_channels)`; or if `strides` has not
length 1 or `len(output_channels)`; or if `paddings` has not
length 1 or `len(output_channels)`.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
if not isinstance(output_channels, collections.Iterable):
raise TypeError("output_channels must be iterable")
output_channels = tuple(output_channels)
if not isinstance(kernel_shapes, collections.Iterable):
raise TypeError("kernel_shapes must be iterable")
kernel_shapes = tuple(kernel_shapes)
if not isinstance(strides, collections.Iterable):
raise TypeError("strides must be iterable")
strides = tuple(strides)
if not isinstance(paddings, collections.Iterable):
raise TypeError("paddings must be iterable")
paddings = tuple(paddings)
super(ConvNet2D, self).__init__(name=name)
if not output_channels:
raise ValueError("output_channels must not be empty")
self._output_channels = tuple(output_channels)
self._num_layers = len(self._output_channels)
self._input_shape = None
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
if not callable(activation):
raise TypeError("Input 'activation' must be callable")
self._activation = activation
self._activate_final = activate_final
self._kernel_shapes = _replicate_elements(kernel_shapes, self._num_layers)
if len(self._kernel_shapes) != self._num_layers:
raise ValueError(
"kernel_shapes must be of length 1 or len(output_channels)")
self._strides = _replicate_elements(strides, self._num_layers)
if len(self._strides) != self._num_layers:
raise ValueError(
"""strides must be of length 1 or len(output_channels)""")
self._paddings = _replicate_elements(paddings, self._num_layers)
if len(self._paddings) != self._num_layers:
raise ValueError(
"""paddings must be of length 1 or len(output_channels)""")
self._use_batch_norm = use_batch_norm
if batch_norm_config is not None:
if not isinstance(batch_norm_config, collections.Mapping):
raise TypeError("`batch_norm_config` must be a mapping, e.g. `dict`.")
self._batch_norm_config = batch_norm_config
else:
self._batch_norm_config = {}
if isinstance(use_bias, bool):
use_bias = (use_bias,)
else:
if not isinstance(use_bias, collections.Iterable):
raise TypeError("use_bias must be either a bool or an iterable")
use_bias = tuple(use_bias)
self._use_bias = _replicate_elements(use_bias, self._num_layers)
self._instantiate_layers()
def _instantiate_layers(self):
"""Instantiates all the convolutional modules used in the network."""
with self._enter_variable_scope():
self._layers = tuple(conv.Conv2D(name="conv_2d_{}".format(i),
output_channels=self._output_channels[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
padding=self._paddings[i],
use_bias=self._use_bias[i],
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers)
for i in xrange(self._num_layers))
def _build(self, inputs, is_training=None, test_local_stats=True):
"""Assembles the `ConvNet2D` and connects it to the graph.
Args:
inputs: A 4D Tensor of shape `[batch_size, input_height, input_width,
input_channels]`.
is_training: Boolean to indicate to `snt.BatchNorm` if we are
currently training. Must be specified explicitly if `use_batchnorm` is
`True`.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normalization should use local batch statistics at test time.
By default `True`.
Returns:
A 4D Tensor of shape `[batch_size, output_height, output_width,
output_channels[-1]]`.
Raises:
ValueError: If `is_training` is not explicitly specified when using
batch normalization.
"""
if self._use_batch_norm and is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization.")
self._input_shape = tuple(inputs.get_shape().as_list())
net = inputs
final_index = len(self._layers) - 1
for i, layer in enumerate(self._layers):
net = layer(net)
if i != final_index or self._activate_final:
if self._use_batch_norm:
bn = batch_norm.BatchNorm(name="batch_norm_{}".format(i),
**self._batch_norm_config)
net = bn(net,
is_training=is_training,
test_local_stats=test_local_stats)
net = self._activation(net)
return net
@property
def layers(self):
"""Returns a tuple containing the convolutional layers of the network."""
return self._layers
@property
def initializers(self):
return self._initializers
@property
def partitioners(self):
return self._partitioners
@property
def regularizers(self):
return self._regularizers
@property
def strides(self):
return self._strides
@property
def paddings(self):
return self._paddings
@property
def kernel_shapes(self):
return self._kernel_shapes
@property
def output_channels(self):
return tuple([l() if callable(l) else l for l in self._output_channels])
@property
def use_bias(self):
return self._use_bias
@property
def use_batch_norm(self):
return self._use_batch_norm
@property
def batch_norm_config(self):
return self._batch_norm_config
@property
def activation(self):
return self._activation
@property
def activate_final(self):
return self._activate_final
# Implements Transposable interface.
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
def _transpose(self,
transpose_constructor,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None,
use_bias=None,
batch_norm_config=None):
"""Returns transposed version of this network.
Args:
transpose_constructor: A method that creates an instance of the transposed
network type. The method must accept the same kwargs as this methods
with the exception of the `transpose_constructor` argument.
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
Returns:
Matching transposed module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
"""
if output_channels is None:
output_channels = []
for layer in reversed(self._layers):
output_channels.append(lambda l=layer: l.input_shape[-1])
elif len(output_channels) != len(self._layers):
# Note that we only have to do this check for the output channels. Any
# other inconsistencies will be picked up by ConvNet2D.__init__.
raise ValueError("Iterable output_channels length must match the "
"number of layers ({}), but is {} instead.".format(
len(self._layers), len(output_channels)))
if kernel_shapes is None:
kernel_shapes = reversed(self.kernel_shapes)
if strides is None:
strides = reversed(self.strides)
if paddings is None:
paddings = reversed(self.paddings)
if activation is None:
activation = self.activation
if activate_final is None:
activate_final = self.activate_final
if initializers is None:
initializers = self.initializers
if partitioners is None:
partitioners = self.partitioners
if regularizers is None:
regularizers = self.regularizers
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if use_bias is None:
use_bias = reversed(self.use_bias)
if batch_norm_config is None:
batch_norm_config = self.batch_norm_config
if name is None:
name = self.module_name + "_transpose"
return transpose_constructor(output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
batch_norm_config=batch_norm_config,
name=name)
# Implements Transposable interface.
def transpose(self,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None,
use_bias=None,
batch_norm_config=None):
"""Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
Returns:
Matching `ConvNet2DTranspose` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
"""
output_shapes = []
for layer in reversed(self._layers):
output_shapes.append(lambda l=layer: l.input_shape[1:-1])
transpose_constructor = functools.partial(ConvNet2DTranspose,
output_shapes=output_shapes)
return self._transpose(transpose_constructor=transpose_constructor,
name=name,
output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
batch_norm_config=batch_norm_config)
class ConvNet2DTranspose(ConvNet2D):
"""A 2D Transpose-Convolutional Network module."""
def __init__(self,
output_channels,
output_shapes,
kernel_shapes,
strides,
paddings,
activation=tf.nn.relu,
activate_final=False,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=False,
use_bias=True,
batch_norm_config=None,
name="conv_net_2d_transpose"):
"""Constructs a `ConvNetTranspose2D` module.
`output_{shapes,channels}` can be defined either as iterable of
{iterables,integers} or via a callable. In the latter case, since the
function invocation is deferred to graph construction time, the user
must only ensure that entries can be called returning meaningful values when
build is called. Each entry in the iterable defines properties in the
corresponding convolutional layer.
By default, neither batch normalization nor activation are applied to the
output of the final layer.
Args:
output_channels: Iterable of numbers of output channels.
output_shapes: Iterable of output shapes as defined in
`conv.conv2DTranpose`; if the iterable contains one element only, the
same shape is used in each layer of the network.
kernel_shapes: Iterable of kernel sizes as defined in `conv.Conv2D`; if
the list contains one element only, the same kernel shape is used in
each layer of the network.
strides: Iterable of kernel strides as defined in `conv.Conv2D`; if the
list contains one element only, the same stride is used in each layer of
the network.
paddings: Iterable of padding options, either `snt.SAME` or
`snt.VALID`; if the Iterable contains one element only, the same padding
is used in each layer of the network.
activation: An activation op.
activate_final: Boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b').
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
use_batch_norm: Boolean determining if batch normalization is applied
after convolution.
use_bias: Boolean or iterable of booleans determining whether to include
bias parameters in the convolutional layers. Default `True`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules.
name: Name of the module.
Raises:
TypeError: If `output_channels` is not iterable; or if `output_shapes`
is not iterable; or if `kernel_shapes` is not iterable; or if `strides`
is not iterable; or if `paddings` is not iterable; or if `activation` is
not callable.
ValueError: If `output_channels` is empty; or if `kernel_shapes` has not
length 1 or `len(output_channels)`; or if `strides` has not
length 1 or `len(output_channels)`; or if `paddings` has not
length 1 or `len(output_channels)`.
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
if not isinstance(output_channels, collections.Iterable):
raise TypeError("output_channels must be iterable")
output_channels = tuple(output_channels)
num_layers = len(output_channels)
if not isinstance(output_shapes, collections.Iterable):
raise TypeError("output_shapes must be iterable")
output_shapes = tuple(output_shapes)
self._output_shapes = _replicate_elements(output_shapes, num_layers)
if len(self._output_shapes) != num_layers:
raise ValueError(
"output_shapes must be of length 1 or len(output_channels)")
super(ConvNet2DTranspose, self).__init__(
output_channels,
kernel_shapes,
strides,
paddings,
activation=activation,
activate_final=activate_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
batch_norm_config=batch_norm_config,
name=name)
def _instantiate_layers(self):
"""Instantiates all the convolutional modules used in the network."""
with self._enter_variable_scope():
self._layers = tuple(
conv.Conv2DTranspose(name="conv_2d_transpose_{}".format(i),
output_channels=self._output_channels[i],
output_shape=self._output_shapes[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
padding=self._paddings[i],
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
use_bias=self._use_bias[i])
for i in xrange(self._num_layers))
@property
def output_shapes(self):
return tuple([l() if callable(l) else l for l in self._output_shapes])
# Implements Transposable interface.
def transpose(self,
name=None,
output_channels=None,
kernel_shapes=None,
strides=None,
paddings=None,
activation=None,
activate_final=None,
initializers=None,
partitioners=None,
regularizers=None,
use_batch_norm=None,
use_bias=None,
batch_norm_config=None):
"""Returns transposed version of this network.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
output_channels: Optional iterable of numbers of output channels.
kernel_shapes: Optional iterable of kernel sizes. The default value is
constructed by reversing `self.kernel_shapes`.
strides: Optional iterable of kernel strides. The default value is
constructed by reversing `self.strides`.
paddings: Optional iterable of padding options, either `snt.SAME` or
`snt.VALID`; The default value is constructed by reversing
`self.paddings`.
activation: Optional activation op. Default value is `self.activation`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
initializers: Optional dict containing ops to initialize the filters of
the whole network (with key 'w') or biases (with key 'b'). The default
value is `self.initializers`.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). The default value is
`self.partitioners`.
regularizers: Optional dict containing regularizers for the filters of the
whole network (with key 'w') or biases (with key 'b'). The default is
`self.regularizers`.
use_batch_norm: Optional boolean determining if batch normalization is
applied after convolution. The default value is `self.use_batch_norm`.
use_bias: Optional boolean or iterable of booleans determining whether to
include bias parameters in the convolutional layers. Default
is constructed by reversing `self.use_bias`.
batch_norm_config: Optional mapping of additional configuration for the
`snt.BatchNorm` modules. Default is `self.batch_norm_config`.
Returns:
Matching `ConvNet2D` module.
Raises:
ValueError: If output_channels is specified and its length does not match
the number of layers.
"""
return self._transpose(transpose_constructor=ConvNet2D,
name=name,
output_channels=output_channels,
kernel_shapes=kernel_shapes,
strides=strides,
paddings=paddings,
activation=activation,
activate_final=activate_final,
initializers=initializers,
partitioners=partitioners,
regularizers=regularizers,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
batch_norm_config=batch_norm_config)
| |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the Dispatcher class."""
import logging
import warnings
import weakref
from collections import defaultdict
from functools import wraps
from queue import Empty, Queue
from threading import BoundedSemaphore, Event, Lock, Thread, current_thread
from time import sleep
from typing import (
TYPE_CHECKING,
Callable,
DefaultDict,
Dict,
List,
Optional,
Set,
Union,
Generic,
TypeVar,
overload,
cast,
)
from uuid import uuid4
from telegram import TelegramError, Update
from telegram.ext import BasePersistence, ContextTypes
from telegram.ext.callbackcontext import CallbackContext
from telegram.ext.handler import Handler
import telegram.ext.extbot
from telegram.ext.callbackdatacache import CallbackDataCache
from telegram.utils.deprecate import TelegramDeprecationWarning, set_new_attribute_deprecated
from telegram.ext.utils.promise import Promise
from telegram.utils.helpers import DefaultValue, DEFAULT_FALSE
from telegram.ext.utils.types import CCT, UD, CD, BD
if TYPE_CHECKING:
from telegram import Bot
from telegram.ext import JobQueue
DEFAULT_GROUP: int = 0
UT = TypeVar('UT')
def run_async(
func: Callable[[Update, CallbackContext], object]
) -> Callable[[Update, CallbackContext], object]:
"""
Function decorator that will run the function in a new thread.
Will run :attr:`telegram.ext.Dispatcher.run_async`.
Using this decorator is only possible when only a single Dispatcher exist in the system.
Note:
DEPRECATED. Use :attr:`telegram.ext.Dispatcher.run_async` directly instead or the
:attr:`Handler.run_async` parameter.
Warning:
If you're using ``@run_async`` you cannot rely on adding custom attributes to
:class:`telegram.ext.CallbackContext`. See its docs for more info.
"""
@wraps(func)
def async_func(*args: object, **kwargs: object) -> object:
warnings.warn(
'The @run_async decorator is deprecated. Use the `run_async` parameter of '
'your Handler or `Dispatcher.run_async` instead.',
TelegramDeprecationWarning,
stacklevel=2,
)
return Dispatcher.get_instance()._run_async( # pylint: disable=W0212
func, *args, update=None, error_handling=False, **kwargs
)
return async_func
class DispatcherHandlerStop(Exception):
"""
Raise this in handler to prevent execution of any other handler (even in different group).
In order to use this exception in a :class:`telegram.ext.ConversationHandler`, pass the
optional ``state`` parameter instead of returning the next state:
.. code-block:: python
def callback(update, context):
...
raise DispatcherHandlerStop(next_state)
Attributes:
state (:obj:`object`): Optional. The next state of the conversation.
Args:
state (:obj:`object`, optional): The next state of the conversation.
"""
__slots__ = ('state',)
def __init__(self, state: object = None) -> None:
super().__init__()
self.state = state
class Dispatcher(Generic[CCT, UD, CD, BD]):
"""This class dispatches all kinds of updates to its registered handlers.
Args:
bot (:class:`telegram.Bot`): The bot object that should be passed to the handlers.
update_queue (:obj:`Queue`): The synchronized queue that will contain the updates.
job_queue (:class:`telegram.ext.JobQueue`, optional): The :class:`telegram.ext.JobQueue`
instance to pass onto handler callbacks.
workers (:obj:`int`, optional): Number of maximum concurrent worker threads for the
``@run_async`` decorator and :meth:`run_async`. Defaults to 4.
persistence (:class:`telegram.ext.BasePersistence`, optional): The persistence class to
store data that should be persistent over restarts.
use_context (:obj:`bool`, optional): If set to :obj:`True` uses the context based callback
API (ignored if `dispatcher` argument is used). Defaults to :obj:`True`.
**New users**: set this to :obj:`True`.
context_types (:class:`telegram.ext.ContextTypes`, optional): Pass an instance
of :class:`telegram.ext.ContextTypes` to customize the types used in the
``context`` interface. If not passed, the defaults documented in
:class:`telegram.ext.ContextTypes` will be used.
.. versionadded:: 13.6
Attributes:
bot (:class:`telegram.Bot`): The bot object that should be passed to the handlers.
update_queue (:obj:`Queue`): The synchronized queue that will contain the updates.
job_queue (:class:`telegram.ext.JobQueue`): Optional. The :class:`telegram.ext.JobQueue`
instance to pass onto handler callbacks.
workers (:obj:`int`, optional): Number of maximum concurrent worker threads for the
``@run_async`` decorator and :meth:`run_async`.
user_data (:obj:`defaultdict`): A dictionary handlers can use to store data for the user.
chat_data (:obj:`defaultdict`): A dictionary handlers can use to store data for the chat.
bot_data (:obj:`dict`): A dictionary handlers can use to store data for the bot.
persistence (:class:`telegram.ext.BasePersistence`): Optional. The persistence class to
store data that should be persistent over restarts.
context_types (:class:`telegram.ext.ContextTypes`): Container for the types used
in the ``context`` interface.
.. versionadded:: 13.6
"""
# Allowing '__weakref__' creation here since we need it for the singleton
__slots__ = (
'workers',
'persistence',
'use_context',
'update_queue',
'job_queue',
'user_data',
'chat_data',
'bot_data',
'_update_persistence_lock',
'handlers',
'groups',
'error_handlers',
'running',
'__stop_event',
'__exception_event',
'__async_queue',
'__async_threads',
'bot',
'__dict__',
'__weakref__',
'context_types',
)
__singleton_lock = Lock()
__singleton_semaphore = BoundedSemaphore()
__singleton = None
logger = logging.getLogger(__name__)
@overload
def __init__(
self: 'Dispatcher[CallbackContext[Dict, Dict, Dict], Dict, Dict, Dict]',
bot: 'Bot',
update_queue: Queue,
workers: int = 4,
exception_event: Event = None,
job_queue: 'JobQueue' = None,
persistence: BasePersistence = None,
use_context: bool = True,
):
...
@overload
def __init__(
self: 'Dispatcher[CCT, UD, CD, BD]',
bot: 'Bot',
update_queue: Queue,
workers: int = 4,
exception_event: Event = None,
job_queue: 'JobQueue' = None,
persistence: BasePersistence = None,
use_context: bool = True,
context_types: ContextTypes[CCT, UD, CD, BD] = None,
):
...
def __init__(
self,
bot: 'Bot',
update_queue: Queue,
workers: int = 4,
exception_event: Event = None,
job_queue: 'JobQueue' = None,
persistence: BasePersistence = None,
use_context: bool = True,
context_types: ContextTypes[CCT, UD, CD, BD] = None,
):
self.bot = bot
self.update_queue = update_queue
self.job_queue = job_queue
self.workers = workers
self.use_context = use_context
self.context_types = cast(ContextTypes[CCT, UD, CD, BD], context_types or ContextTypes())
if not use_context:
warnings.warn(
'Old Handler API is deprecated - see https://git.io/fxJuV for details',
TelegramDeprecationWarning,
stacklevel=3,
)
if self.workers < 1:
warnings.warn(
'Asynchronous callbacks can not be processed without at least one worker thread.'
)
self.user_data: DefaultDict[int, UD] = defaultdict(self.context_types.user_data)
self.chat_data: DefaultDict[int, CD] = defaultdict(self.context_types.chat_data)
self.bot_data = self.context_types.bot_data()
self.persistence: Optional[BasePersistence] = None
self._update_persistence_lock = Lock()
if persistence:
if not isinstance(persistence, BasePersistence):
raise TypeError("persistence must be based on telegram.ext.BasePersistence")
self.persistence = persistence
self.persistence.set_bot(self.bot)
if self.persistence.store_user_data:
self.user_data = self.persistence.get_user_data()
if not isinstance(self.user_data, defaultdict):
raise ValueError("user_data must be of type defaultdict")
if self.persistence.store_chat_data:
self.chat_data = self.persistence.get_chat_data()
if not isinstance(self.chat_data, defaultdict):
raise ValueError("chat_data must be of type defaultdict")
if self.persistence.store_bot_data:
self.bot_data = self.persistence.get_bot_data()
if not isinstance(self.bot_data, self.context_types.bot_data):
raise ValueError(
f"bot_data must be of type {self.context_types.bot_data.__name__}"
)
if self.persistence.store_callback_data:
self.bot = cast(telegram.ext.extbot.ExtBot, self.bot)
persistent_data = self.persistence.get_callback_data()
if persistent_data is not None:
if not isinstance(persistent_data, tuple) and len(persistent_data) != 2:
raise ValueError('callback_data must be a 2-tuple')
self.bot.callback_data_cache = CallbackDataCache(
self.bot,
self.bot.callback_data_cache.maxsize,
persistent_data=persistent_data,
)
else:
self.persistence = None
self.handlers: Dict[int, List[Handler]] = {}
"""Dict[:obj:`int`, List[:class:`telegram.ext.Handler`]]: Holds the handlers per group."""
self.groups: List[int] = []
"""List[:obj:`int`]: A list with all groups."""
self.error_handlers: Dict[Callable, Union[bool, DefaultValue]] = {}
"""Dict[:obj:`callable`, :obj:`bool`]: A dict, where the keys are error handlers and the
values indicate whether they are to be run asynchronously."""
self.running = False
""":obj:`bool`: Indicates if this dispatcher is running."""
self.__stop_event = Event()
self.__exception_event = exception_event or Event()
self.__async_queue: Queue = Queue()
self.__async_threads: Set[Thread] = set()
# For backward compatibility, we allow a "singleton" mode for the dispatcher. When there's
# only one instance of Dispatcher, it will be possible to use the `run_async` decorator.
with self.__singleton_lock:
if self.__singleton_semaphore.acquire(blocking=False): # pylint: disable=R1732
self._set_singleton(self)
else:
self._set_singleton(None)
def __setattr__(self, key: str, value: object) -> None:
# Mangled names don't automatically apply in __setattr__ (see
# https://docs.python.org/3/tutorial/classes.html#private-variables), so we have to make
# it mangled so they don't raise TelegramDeprecationWarning unnecessarily
if key.startswith('__'):
key = f"_{self.__class__.__name__}{key}"
if issubclass(self.__class__, Dispatcher) and self.__class__ is not Dispatcher:
object.__setattr__(self, key, value)
return
set_new_attribute_deprecated(self, key, value)
@property
def exception_event(self) -> Event: # skipcq: PY-D0003
return self.__exception_event
def _init_async_threads(self, base_name: str, workers: int) -> None:
base_name = f'{base_name}_' if base_name else ''
for i in range(workers):
thread = Thread(target=self._pooled, name=f'Bot:{self.bot.id}:worker:{base_name}{i}')
self.__async_threads.add(thread)
thread.start()
@classmethod
def _set_singleton(cls, val: Optional['Dispatcher']) -> None:
cls.logger.debug('Setting singleton dispatcher as %s', val)
cls.__singleton = weakref.ref(val) if val else None
@classmethod
def get_instance(cls) -> 'Dispatcher':
"""Get the singleton instance of this class.
Returns:
:class:`telegram.ext.Dispatcher`
Raises:
RuntimeError
"""
if cls.__singleton is not None:
return cls.__singleton() # type: ignore[return-value] # pylint: disable=not-callable
raise RuntimeError(f'{cls.__name__} not initialized or multiple instances exist')
def _pooled(self) -> None:
thr_name = current_thread().name
while 1:
promise = self.__async_queue.get()
# If unpacking fails, the thread pool is being closed from Updater._join_async_threads
if not isinstance(promise, Promise):
self.logger.debug(
"Closing run_async thread %s/%d", thr_name, len(self.__async_threads)
)
break
promise.run()
if not promise.exception:
self.update_persistence(update=promise.update)
continue
if isinstance(promise.exception, DispatcherHandlerStop):
self.logger.warning(
'DispatcherHandlerStop is not supported with async functions; func: %s',
promise.pooled_function.__name__,
)
continue
# Avoid infinite recursion of error handlers.
if promise.pooled_function in self.error_handlers:
self.logger.error('An uncaught error was raised while handling the error.')
continue
# Don't perform error handling for a `Promise` with deactivated error handling. This
# should happen only via the deprecated `@run_async` decorator or `Promises` created
# within error handlers
if not promise.error_handling:
self.logger.error('A promise with deactivated error handling raised an error.')
continue
# If we arrive here, an exception happened in the promise and was neither
# DispatcherHandlerStop nor raised by an error handler. So we can and must handle it
try:
self.dispatch_error(promise.update, promise.exception, promise=promise)
except Exception:
self.logger.exception('An uncaught error was raised while handling the error.')
def run_async(
self, func: Callable[..., object], *args: object, update: object = None, **kwargs: object
) -> Promise:
"""
Queue a function (with given args/kwargs) to be run asynchronously. Exceptions raised
by the function will be handled by the error handlers registered with
:meth:`add_error_handler`.
Warning:
* If you're using ``@run_async``/:meth:`run_async` you cannot rely on adding custom
attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.
* Calling a function through :meth:`run_async` from within an error handler can lead to
an infinite error handling loop.
Args:
func (:obj:`callable`): The function to run in the thread.
*args (:obj:`tuple`, optional): Arguments to ``func``.
update (:class:`telegram.Update` | :obj:`object`, optional): The update associated with
the functions call. If passed, it will be available in the error handlers, in case
an exception is raised by :attr:`func`.
**kwargs (:obj:`dict`, optional): Keyword arguments to ``func``.
Returns:
Promise
"""
return self._run_async(func, *args, update=update, error_handling=True, **kwargs)
def _run_async(
self,
func: Callable[..., object],
*args: object,
update: object = None,
error_handling: bool = True,
**kwargs: object,
) -> Promise:
# TODO: Remove error_handling parameter once we drop the @run_async decorator
promise = Promise(func, args, kwargs, update=update, error_handling=error_handling)
self.__async_queue.put(promise)
return promise
def start(self, ready: Event = None) -> None:
"""Thread target of thread 'dispatcher'.
Runs in background and processes the update queue.
Args:
ready (:obj:`threading.Event`, optional): If specified, the event will be set once the
dispatcher is ready.
"""
if self.running:
self.logger.warning('already running')
if ready is not None:
ready.set()
return
if self.__exception_event.is_set():
msg = 'reusing dispatcher after exception event is forbidden'
self.logger.error(msg)
raise TelegramError(msg)
self._init_async_threads(str(uuid4()), self.workers)
self.running = True
self.logger.debug('Dispatcher started')
if ready is not None:
ready.set()
while 1:
try:
# Pop update from update queue.
update = self.update_queue.get(True, 1)
except Empty:
if self.__stop_event.is_set():
self.logger.debug('orderly stopping')
break
if self.__exception_event.is_set():
self.logger.critical('stopping due to exception in another thread')
break
continue
self.logger.debug('Processing Update: %s', update)
self.process_update(update)
self.update_queue.task_done()
self.running = False
self.logger.debug('Dispatcher thread stopped')
def stop(self) -> None:
"""Stops the thread."""
if self.running:
self.__stop_event.set()
while self.running:
sleep(0.1)
self.__stop_event.clear()
# async threads must be join()ed only after the dispatcher thread was joined,
# otherwise we can still have new async threads dispatched
threads = list(self.__async_threads)
total = len(threads)
# Stop all threads in the thread pool by put()ting one non-tuple per thread
for i in range(total):
self.__async_queue.put(None)
for i, thr in enumerate(threads):
self.logger.debug('Waiting for async thread %s/%s to end', i + 1, total)
thr.join()
self.__async_threads.remove(thr)
self.logger.debug('async thread %s/%s has ended', i + 1, total)
@property
def has_running_threads(self) -> bool: # skipcq: PY-D0003
return self.running or bool(self.__async_threads)
def process_update(self, update: object) -> None:
"""Processes a single update and updates the persistence.
Note:
If the update is handled by least one synchronously running handlers (i.e.
``run_async=False``), :meth:`update_persistence` is called *once* after all handlers
synchronous handlers are done. Each asynchronously running handler will trigger
:meth:`update_persistence` on its own.
Args:
update (:class:`telegram.Update` | :obj:`object` | \
:class:`telegram.error.TelegramError`):
The update to process.
"""
# An error happened while polling
if isinstance(update, TelegramError):
try:
self.dispatch_error(None, update)
except Exception:
self.logger.exception('An uncaught error was raised while handling the error.')
return
context = None
handled = False
sync_modes = []
for group in self.groups:
try:
for handler in self.handlers[group]:
check = handler.check_update(update)
if check is not None and check is not False:
if not context and self.use_context:
context = self.context_types.context.from_update(update, self)
context.refresh_data()
handled = True
sync_modes.append(handler.run_async)
handler.handle_update(update, self, check, context)
break
# Stop processing with any other handler.
except DispatcherHandlerStop:
self.logger.debug('Stopping further handlers due to DispatcherHandlerStop')
self.update_persistence(update=update)
break
# Dispatch any error.
except Exception as exc:
try:
self.dispatch_error(update, exc)
except DispatcherHandlerStop:
self.logger.debug('Error handler stopped further handlers')
break
# Errors should not stop the thread.
except Exception:
self.logger.exception('An uncaught error was raised while handling the error.')
# Update persistence, if handled
handled_only_async = all(sync_modes)
if handled:
# Respect default settings
if all(mode is DEFAULT_FALSE for mode in sync_modes) and self.bot.defaults:
handled_only_async = self.bot.defaults.run_async
# If update was only handled by async handlers, we don't need to update here
if not handled_only_async:
self.update_persistence(update=update)
def add_handler(self, handler: Handler[UT, CCT], group: int = DEFAULT_GROUP) -> None:
"""Register a handler.
TL;DR: Order and priority counts. 0 or 1 handlers per group will be used. End handling of
update with :class:`telegram.ext.DispatcherHandlerStop`.
A handler must be an instance of a subclass of :class:`telegram.ext.Handler`. All handlers
are organized in groups with a numeric value. The default group is 0. All groups will be
evaluated for handling an update, but only 0 or 1 handler per group will be used. If
:class:`telegram.ext.DispatcherHandlerStop` is raised from one of the handlers, no further
handlers (regardless of the group) will be called.
The priority/order of handlers is determined as follows:
* Priority of the group (lower group number == higher priority)
* The first handler in a group which should handle an update (see
:attr:`telegram.ext.Handler.check_update`) will be used. Other handlers from the
group will not be used. The order in which handlers were added to the group defines the
priority.
Args:
handler (:class:`telegram.ext.Handler`): A Handler instance.
group (:obj:`int`, optional): The group identifier. Default is 0.
"""
# Unfortunately due to circular imports this has to be here
from .conversationhandler import ConversationHandler # pylint: disable=C0415
if not isinstance(handler, Handler):
raise TypeError(f'handler is not an instance of {Handler.__name__}')
if not isinstance(group, int):
raise TypeError('group is not int')
# For some reason MyPy infers the type of handler is <nothing> here,
# so for now we just ignore all the errors
if (
isinstance(handler, ConversationHandler)
and handler.persistent # type: ignore[attr-defined]
and handler.name # type: ignore[attr-defined]
):
if not self.persistence:
raise ValueError(
f"ConversationHandler {handler.name} " # type: ignore[attr-defined]
f"can not be persistent if dispatcher has no persistence"
)
handler.persistence = self.persistence # type: ignore[attr-defined]
handler.conversations = ( # type: ignore[attr-defined]
self.persistence.get_conversations(handler.name) # type: ignore[attr-defined]
)
if group not in self.handlers:
self.handlers[group] = []
self.groups.append(group)
self.groups = sorted(self.groups)
self.handlers[group].append(handler)
def remove_handler(self, handler: Handler, group: int = DEFAULT_GROUP) -> None:
"""Remove a handler from the specified group.
Args:
handler (:class:`telegram.ext.Handler`): A Handler instance.
group (:obj:`object`, optional): The group identifier. Default is 0.
"""
if handler in self.handlers[group]:
self.handlers[group].remove(handler)
if not self.handlers[group]:
del self.handlers[group]
self.groups.remove(group)
def update_persistence(self, update: object = None) -> None:
"""Update :attr:`user_data`, :attr:`chat_data` and :attr:`bot_data` in :attr:`persistence`.
Args:
update (:class:`telegram.Update`, optional): The update to process. If passed, only the
corresponding ``user_data`` and ``chat_data`` will be updated.
"""
with self._update_persistence_lock:
self.__update_persistence(update)
def __update_persistence(self, update: object = None) -> None:
if self.persistence:
# We use list() here in order to decouple chat_ids from self.chat_data, as dict view
# objects will change, when the dict does and we want to loop over chat_ids
chat_ids = list(self.chat_data.keys())
user_ids = list(self.user_data.keys())
if isinstance(update, Update):
if update.effective_chat:
chat_ids = [update.effective_chat.id]
else:
chat_ids = []
if update.effective_user:
user_ids = [update.effective_user.id]
else:
user_ids = []
if self.persistence.store_callback_data:
self.bot = cast(telegram.ext.extbot.ExtBot, self.bot)
try:
self.persistence.update_callback_data(
self.bot.callback_data_cache.persistence_data
)
except Exception as exc:
try:
self.dispatch_error(update, exc)
except Exception:
message = (
'Saving callback data raised an error and an '
'uncaught error was raised while handling '
'the error with an error_handler'
)
self.logger.exception(message)
if self.persistence.store_bot_data:
try:
self.persistence.update_bot_data(self.bot_data)
except Exception as exc:
try:
self.dispatch_error(update, exc)
except Exception:
message = (
'Saving bot data raised an error and an '
'uncaught error was raised while handling '
'the error with an error_handler'
)
self.logger.exception(message)
if self.persistence.store_chat_data:
for chat_id in chat_ids:
try:
self.persistence.update_chat_data(chat_id, self.chat_data[chat_id])
except Exception as exc:
try:
self.dispatch_error(update, exc)
except Exception:
message = (
'Saving chat data raised an error and an '
'uncaught error was raised while handling '
'the error with an error_handler'
)
self.logger.exception(message)
if self.persistence.store_user_data:
for user_id in user_ids:
try:
self.persistence.update_user_data(user_id, self.user_data[user_id])
except Exception as exc:
try:
self.dispatch_error(update, exc)
except Exception:
message = (
'Saving user data raised an error and an '
'uncaught error was raised while handling '
'the error with an error_handler'
)
self.logger.exception(message)
def add_error_handler(
self,
callback: Callable[[object, CCT], None],
run_async: Union[bool, DefaultValue] = DEFAULT_FALSE, # pylint: disable=W0621
) -> None:
"""Registers an error handler in the Dispatcher. This handler will receive every error
which happens in your bot.
Note:
Attempts to add the same callback multiple times will be ignored.
Warning:
The errors handled within these handlers won't show up in the logger, so you
need to make sure that you reraise the error.
Args:
callback (:obj:`callable`): The callback function for this error handler. Will be
called when an error is raised. Callback signature for context based API:
``def callback(update: object, context: CallbackContext)``
The error that happened will be present in context.error.
run_async (:obj:`bool`, optional): Whether this handlers callback should be run
asynchronously using :meth:`run_async`. Defaults to :obj:`False`.
Note:
See https://git.io/fxJuV for more info about switching to context based API.
"""
if callback in self.error_handlers:
self.logger.debug('The callback is already registered as an error handler. Ignoring.')
return
if run_async is DEFAULT_FALSE and self.bot.defaults and self.bot.defaults.run_async:
run_async = True
self.error_handlers[callback] = run_async
def remove_error_handler(self, callback: Callable[[object, CCT], None]) -> None:
"""Removes an error handler.
Args:
callback (:obj:`callable`): The error handler to remove.
"""
self.error_handlers.pop(callback, None)
def dispatch_error(
self, update: Optional[object], error: Exception, promise: Promise = None
) -> None:
"""Dispatches an error.
Args:
update (:obj:`object` | :class:`telegram.Update`): The update that caused the error.
error (:obj:`Exception`): The error that was raised.
promise (:class:`telegram.utils.Promise`, optional): The promise whose pooled function
raised the error.
"""
async_args = None if not promise else promise.args
async_kwargs = None if not promise else promise.kwargs
if self.error_handlers:
for callback, run_async in self.error_handlers.items(): # pylint: disable=W0621
if self.use_context:
context = self.context_types.context.from_error(
update, error, self, async_args=async_args, async_kwargs=async_kwargs
)
if run_async:
self.run_async(callback, update, context, update=update)
else:
callback(update, context)
else:
if run_async:
self.run_async(callback, self.bot, update, error, update=update)
else:
callback(self.bot, update, error)
else:
self.logger.exception(
'No error handlers are registered, logging exception.', exc_info=error
)
| |
from chunk import *
from recording import *
from recording_defined_output import *
import keras
from keras import backend as K
import pprint
pp = pprint.PrettyPrinter(depth=6)
# Register our custom loss functions so our rod model can load
def me(y_true, y_pred):
return keras.backend.clip(K.abs(keras.backend.clip(y_pred,0.0,1.0) - keras.backend.clip(y_true,0.0,1.0)),0.0,0.2)*0.001 # Hackjob so Keras iterations show exponential value of MSE to get precision.
class Experiment(object):
'''
Simple class wraps an experiment
'''
def __init__(self, experiment_file):
self.tree = ET.parse(experiment_file)
self.root = self.tree.getroot()
self.base_path = os.path.dirname(experiment_file)
# Chunk settings
self.chunk_min_frames = int(self.root.find("PROCESSING").find("CHUNK_MIN_FRAMES").text)
self.chunk_rod_failure_limit = int(self.root.find("PROCESSING").find("CHUNK_ROD_FAILURE_LIMIT").text)
# Results
self.output_columns = self.root.find("OUTPUTS").text.split(",")
self.output_folder = os.path.join(self.base_path, self.root.find("OUTPUT_FOLDER").text)
self.crop_to_rod = None
if self.root.find("PROCESSING").find("VIDEO").find("CROPTOROD") is not None:
self.crop_to_rod = self.root.find("PROCESSING").find("VIDEO").find("CROPTOROD").text
self.output_width = int(self.root.find("PROCESSING").find("VIDEO").find("SIZE").find("W").text)
self.output_height = int(self.root.find("PROCESSING").find("VIDEO").find("SIZE").find("H").text)
# Load any rod blackouts
self.blackouts = []
if self.root.find("BLACKOUTS") is not None:
for blackout in self.root.find("BLACKOUTS").iter("BLACKOUT"):
self.blackouts.append(blackout.text)
# Load the defined ML models
self.models = {}
for model in self.root.find("MODELS").iter("MODEL"):
model_path = os.path.join(self.base_path, model.find("FILE").text)
model_name = model.find("NAME").text
print("Loading model %s from %s." % (model_name, model_path))
self.models[model_name] = {'model':keras.models.load_model(model_path, custom_objects={'me': me}), 'width':int(model.find("SIZE").text.split(",")[0]), 'height':int(model.find("SIZE").text.split(",")[1])}
print("Done loading model %s." % model_name)
# Recordings
self.recordings = []
for recording in self.root.find("RECORDINGS").iter("RECORDING"):
recording_path = os.path.join(self.base_path, recording.text)
self.recordings.append( Recording(recording_path, self.models, self.blackouts, self.crop_to_rod) )
# Defined-output recordings
self.defined_output_recordings = []
for recording in self.root.find("RECORDINGS").iter("DEFINED_OUTPUT_RECORDING"):
recording_path = os.path.join(self.base_path, recording.text)
self.defined_output_recordings.append( RecordingDefinedOutput(recording_path, self.models) )
def play(self):
# Play the recordings
for recording in self.recordings:
recording.play()
def process(self):
# Process the recordings into continuous valid chunks
chunk_number = 0
f_settings = open(os.path.join(self.output_folder, "settings.tsv"), "w")
f_settings.close()
for recording in self.recordings:
# Process the chunks from this recording
print("Processing recording %s into training chunks..." % recording.recording_file)
chunk = None
recording.initialize()
frame_count = 0
chunk_files = []
chunk_positions_max = [None]*len(self.output_columns)
chunk_positions_min = [None]*len(self.output_columns)
while recording.has_more:
(frame, frame_with_markup, rod_positions, failure_count) = recording.get_next_frame()
# TODO: improve calculation to require minimum movement of rods in chunk!
chunk_meets_movement = True
movement_check = True
if chunk is not None and not movement_check and not chunk_meets_movement:
# Chunk is finished without results
pass
elif chunk is not None and not movement_check and chunk_meets_movement:
# Chunk is finished with results
pass
#elif chunk is not None and (failure_count >= self.chunk_rod_failure_limit or frame is None):
elif chunk is not None and frame is None:
# We failed to find the necessary number of rods. Chunk is finished.
if chunk_meets_movement:
print("%i failures at frame %i. Chunk finished." % (failure_count, frame_count))
(video_file, position_file) = chunk.write()
if video_file != None:
chunk_files.append([video_file, position_file])
chunk = None
elif failure_count >= self.chunk_rod_failure_limit:
# TODO: FIX THIS! skip the frame for now.
print("Skipping tracking failure frame")
frame = None
pass
elif chunk is None and failure_count < self.chunk_rod_failure_limit and frame is not None:
# Start of a new chunk
print("Started new chunk %i at frame %i." % (chunk_number, frame_count))
chunk = Chunk(self.output_folder, chunk_number, (self.output_width, self.output_height),self.chunk_min_frames)
chunk_number += 1
if chunk is not None and frame is not None:
# Log this chunk
positions = []
for idx, column in enumerate(self.output_columns):
positions.append(str(rod_positions[column]))
if float(frame_count) / float(recording.num_frames) > 0:
if chunk_positions_max[idx] is None:
# Use current positions as starting point for max/min value
chunk_positions_min[idx] = rod_positions[column]
chunk_positions_max[idx] = rod_positions[column]
else:
# Update the min and max
chunk_positions_min[idx] = min(chunk_positions_min[idx], rod_positions[column])
chunk_positions_max[idx] = max(chunk_positions_max[idx], rod_positions[column])
chunk.add_frame(frame, frame_with_markup, positions)
if (frame_count % 100) == 0 and chunk is not None:
print("Processed %i of %i frames. Added %i frames to chunk so far. On chunk %i in %s." % (frame_count,recording.num_frames, chunk.get_count(), chunk_number-1,recording.recording_file))
print("Current min bounds:")
pp.pprint(chunk_positions_min)
print("Current max bounds:")
pp.pprint(chunk_positions_max)
frame_count += 1
# Write the final chunk
if chunk is not None:
(video_file, position_file) = chunk.write()
if video_file is not None:
chunk_files.append([video_file, position_file])
# Write the list of chunks
f_settings = open(os.path.join(self.output_folder, "settings.tsv"), "a")
for chunk_file in chunk_files:
f_settings.write("\t".join(chunk_file))
for value in chunk_positions_min:
f_settings.write("\t%i" % value)
for value in chunk_positions_max:
f_settings.write("\t%i" % value)
f_settings.write("\n")
f_settings.close()
for recording in self.defined_output_recordings:
# Process the chunks from this recording
print("Processing recording %s into training chunks..." % recording.recording_file)
chunk = None
recording.initialize()
frame_count = 0
chunk_files = []
while recording.has_more:
(frame, output) = recording.get_next_frame()
if chunk is not None and frame is None:
# Chunk is finished.
print("Frame %i. Chunk finished." % (frame_count))
(video_file, position_file) = chunk.write()
if video_file != None:
chunk_files.append([video_file, position_file])
chunk = None
elif chunk is None and frame is not None:
# Start of a new chunk
print("Started new chunk %i at frame %i." % (chunk_number, frame_count))
chunk = Chunk(self.output_folder, chunk_number, (self.output_width, self.output_height),self.chunk_min_frames)
chunk_number += 1
if chunk is not None and frame is not None:
# Log this chunk
positions = []
for idx, column in enumerate(self.output_columns):
positions.append(str(output[column]))
chunk.add_frame(frame, positions)
if (frame_count % 100) == 0 and chunk is not None:
print("Processed %i of %i frames. Added %i frames to chunk so far. On chunk %i in %s." % (frame_count,recording.num_frames, chunk.get_count(), chunk_number-1,recording.recording_file))
frame_count += 1
# Write the final chunk
if chunk is not None:
(video_file, position_file) = chunk.write()
if video_file is not None:
chunk_files.append([video_file, position_file])
# Write the list of chunks
f_settings = open(os.path.join(self.output_folder, "settings.tsv"), "a")
for chunk_file in chunk_files:
f_settings.write("\t".join(chunk_file))
f_settings.write("\n")
f_settings.close()
| |
import itertools
import json
import mwparserfromhell
import pywikibot
import requests
from unidecode import unidecode
PLAYER_GROUP = 2
TEAM_GROUP = 1
def splitTeamByYears(nodes):
nodesCpy = []
for index, value in enumerate(nodes):
if(value['group'] is TEAM_GROUP):
for year, players in value['wins'].iteritems():
nodesCpy.append({
'id': len(nodesCpy),
'name': value['name'],
'roster': players,
'year': year,
'group': value['group']
})
nodesCpy.sort(key=lambda x: int(x['year']))
return nodesCpy
def getTeamImage( team ):
url = "https://api.cognitive.microsoft.com/bing/v5.0/images/search?q=" + \
team.replace(' ', '+') + "+hockey+team+logo&mkt=en-us"
res = requests.get( url, headers = {
'Ocp-Apim-Subscription-Key': '44d9098965f44ac8bc101c979e16d235'
})
return res.json()['value'][0]['thumbnailUrl']
def buildPlayerNodes( nodes ):
players = []
site = pywikibot.Site('en', 'wikipedia')
for index, value in enumerate(nodes):
if( value['group'] is PLAYER_GROUP ):
p = {}
p['name'] = value['name']
p['winCount'] = value['winCount']
p['wins'] = value['wins']
p['id'] = value['id']
try:
name = value['name']
print type(name)
name = unidecode(name)
try:
page = pywikibot.Page(site, name)
except UnicodeDecodeError:
print "Can't get page for " + p['name'] + " as " + name
continue
if( page.isRedirectPage() ):
page = page.getRedirectTarget()
wikitext = page.get()
wikicode = mwparserfromhell.parse(wikitext)
infobox = wikicode.filter_templates(matches="infobox")
if( len(infobox) == 0 and wikicode.filter_text(matches="may refer to:") ):
try:
manualRedirect = wikicode.filter_wikilinks(matches="hockey|NHL")[0]
page = pywikibot.Page(site, manualRedirect.title)
if( page.isRedirectPage() ):
page = page.getRedirectTarget()
wikicode = mwparserfromhell.parse( page.get() )
infobox = wikicode.filter_templates(matches="infobox")
except IndexError:
print "Manual Redirect Failed for " + p['name']
try:
infobox = infobox[0]
for value in infobox.params:
p[value.name] = value.value
p['found'] = True
except IndexError:
#print "Can't extract infobox for " + p['name']
p['found'] = False
except pywikibot.exceptions.NoPage as e:
p['found'] = False
print players
nodes['players'] = players
return nodes
def buildArcLinks(nodes):
links = []
players = {}
for index, value in enumerate(nodes):
if(value['group'] is TEAM_GROUP):
value['id'] = index;
for player in value['roster']:
if(player not in players.keys()):
players[player] = value['id']
elif(index > players[player]):
links.append({
'source': players[player],
'target': value['id'],
'player': player,
})
players[player] = value['id']
return links
def buildTeamRosters(nodes):
winningYears = {}
for index, value in enumerate(nodes):
if(value['group'] is PLAYER_GROUP):
#add player to each roster
for year, team in value['wins'].iteritems():
if(year+team not in winningYears.keys()):
winningYears[year+team] = [value['id']]
else:
winningYears[year+team].extend([value['id']])
for index, value in enumerate(nodes):
if(value['group'] is TEAM_GROUP):
#collect the winning years
teamYears = {}
for year, roster in value['wins'].iteritems():
nodes[index]['wins'][year] = winningYears[year+value['name']]
return nodes
def buildLinks(nodes):
output = []
for index, value in enumerate(nodes):
if(value['group'] is TEAM_GROUP):
for year, roster in value['wins'].iteritems():
for player in roster:
link = {
'target': player,
'source': index,
'value': year
}
output.append(link)
return output
def buildTeamNodes(data):
nodes = []
teams = {}
for playerData in data:
for year, team in playerData['wins'].iteritems():
if( team not in teams.keys() ):
teams[team] = {
'wins': {
year: []
},
'winCount': 1,
'name': team,
'group': TEAM_GROUP,
'logoURL': getTeamImage( team ),
}
else:
if( year not in teams[team]['wins'] ):
#add year to wins
teams[team]['wins'][year] = []
teams[team]['winCount'] = teams[team]['winCount'] + 1
for key, value in teams.iteritems():
nodes.append(value)
nodes.extend(data)
return nodes
def parse_to_json(csv, outFile):
df = []
parse(csv, df)
with open(outFile, "w+") as out:
out.write(json.dumps(df, indent=4))
return df
def parse(fn, df):
with open(fn) as f:
content = f.readlines()
parser(content, df)
def parser(content, data):
playerCount = 0
for line in content:
player = {}
alternator = 'T'
pd = line.split(',')
player['name'] = pd[1].strip() + ' ' + pd[0].strip()
winningTeams = {}
teamName = ''
for i in itertools.islice(pd, 2, len(pd) - 1):
if alternator == 'T':
alternator = 'Y'
teamName = i.strip()
else:
year = i.strip()
year = year.decode('unicode_escape').encode('ascii', 'replace')
if( '???' not in year and '-' not in year):
winningTeams[year] = teamName
if '???' in year or '-' in year :
years = parseYears(year)
prefix = ''
y2=[]
for y in years:
if( len(y) == 4 ):
prefix = y[:2]
if( len(y) == 2 and prefix != '' ):
y = str( int(prefix)*100 + int(y) )
y2.append(y)
winningTeams[y] = teamName
alternator = 'T'
player['wins'] = winningTeams
player['winCount'] = pd[len(pd) - 1].strip().replace('{','').replace('}', '')
player['group'] = PLAYER_GROUP
player['id'] = playerCount
playerCount+=1
data.extend([player])
def parseYears(year):
cleanerList = []
splitter = ''
if('-' in year):
splitter = '-'
elif('???' in year):
splitter = '???'
if(splitter is not ''):
cleanerList= year.split(splitter)
clean = False
while( not clean and splitter is not ''):
output = []
clean = True
for o in cleanerList:
if( '-' in o ):
clean = False
y = o.split( '-' )
elif( '???' in o ):
clean = False
y = o.split( '???' )
else:
y = [o]
output.extend(y)
cleanerList = output
return output
| |
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import time
import urllib
from common import util
from common import pb2dict
from bootstrap import settings
from trace import dao
from common import http
from galaxy import sdk
from ftrace import sdk as fsdk
from galaxy import galaxy_pb2
from galaxy import agent_pb2
from galaxy import log_pb2
from galaxy import initd_pb2
from galaxy import master_pb2
from sql import sql_parser
from ftrace import query_pb2
import logging
logger = logging.getLogger("console")
def sql_decorator(func):
def sql_wrapper(request, *args, **kwds):
request.has_err = False
db = request.GET.get("db", None)
if not db:
request.has_err = True
request.err = "db is required"
return func(request, *args, **kwds)
request.db = db
sql = request.GET.get("sql", None)
if not sql:
request.has_err = True
request.err = "sql is required"
return func(request, *args, **kwds)
request.sql = urllib.unquote(sql)
limit = request.GET.get("limit", "10000")
request.limit = int(limit)
return func(request, *args, **kwds)
return sql_wrapper
def query_decorator(func):
def query_wrapper(request, *args, **kwds):
start_time = request.GET.get("start", None)
end_time = request.GET.get("end", None)
request.has_err = False
if not end_time or not start_time:
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
request.start_time = long(time.mktime(start_time.timetuple())) * 1000000
request.end_time = long(time.mktime(end_time.timetuple())) * 1000000
else:
request.start_time = long(start_time)
request.end_time = long(end_time)
db = request.GET.get("db", None)
if not db :
request.has_err = True
request.err = "db is required"
return func(request, *args, **kwds)
request.db = db
table = request.GET.get("table", None)
if not table:
request.has_err = True
request.err = "table is required"
return func(request, *args, **kwds)
request.table = table
fields = request.GET.get("fields", None)
if not fields:
request.has_err = True
request.err = "fields is required"
return func(request, *args, **kwds)
request.fields = fields.split(",")
request.reverse = request.GET.get("reverse", None)
limit = request.GET.get("limit", "100")
request.limit = int(limit)
return func(request, *args, **kwds)
return query_wrapper
def data_filter(data, fields = []):
new_dict = {}
if fields[0] == "*":
return data
for key in data:
if key not in fields:
continue
new_dict[key] = data[key]
return new_dict
def job_event_processor(resultset, fields=[], limit=100):
if not fields:
return []
job_event = log_pb2.JobEvent()
events = []
for result in resultset:
for d in result.data_list:
job_event.ParseFromString(d)
data = pb2dict.protobuf_to_dict(job_event)
data["state"] = master_pb2.JobState.Name(data["state"])
data["level"] = log_pb2.TraceLevel.Name(job_event.level)
data["update_state"] = master_pb2.JobUpdateState.Name(data["update_state"])
events.append(data_filter(data, fields))
return events
def job_stat_processor(resultset, fields=[], limit=100):
if not fields:
return []
stats = []
job_stat = log_pb2.JobStat()
for result in resultset:
for d in result.data_list:
job_stat.ParseFromString(d)
data = util.pb2dict(job_stat)
stats.append(data_filter(data, fields))
stats = sorted(stats, key=lambda x:x["time"])
return stats
def pod_event_processor(resultset, fields=[], limit=100):
if not fields:
return []
events = []
pod_event = log_pb2.PodEvent()
for data in resultset:
for d in data.data_list:
pod_event.ParseFromString(d)
e = util.pb2dict(pod_event)
e["stage"] = galaxy_pb2.PodStage.Name(pod_event.stage)
e["level"] = log_pb2.TraceLevel.Name(pod_event.level)
if e["level"] not in ["TERROR", "TWARNING"]:
continue
e["state"] = galaxy_pb2.PodState.Name(pod_event.state)
events.append(data_filter(e, fields))
events = sorted(events, key=lambda x:x["time"])
return events[0:limit]
def task_event_processor(resultset, fields=[], limit=100):
events = []
task_event = log_pb2.TaskEvent()
for data in resultset:
for d in data.data_list:
task_event.ParseFromString(d)
e = util.pb2dict(task_event)
e["initd_port"] = e["initd_addr"].split(":")[-1]
e["stage"] = agent_pb2.TaskStage.Name(task_event.stage)
e["level"] = log_pb2.TraceLevel.Name(task_event.level)
e["state"] = galaxy_pb2.TaskState.Name(task_event.state)
e["main"] = initd_pb2.ProcessStatus.Name(task_event.main)
e["ftime"] = datetime.datetime.fromtimestamp(e['ttime']/1000000).strftime("%Y-%m-%d %H:%M:%S")
e["deploy"] = initd_pb2.ProcessStatus.Name(task_event.deploy)
events.append(data_filter(e, fields))
events = sorted(events, key=lambda x:x["ttime"], reverse = True)
return events[0:limit]
def cluster_stat_processor(resultset, fields=[], limit=100):
stats = []
cluster_stat = log_pb2.ClusterStat()
for data in resultset:
for d in data.data_list:
cluster_stat.ParseFromString(d)
stats.append(data_filter(util.pb2dict(cluster_stat), fields))
stats = sorted(stats, key=lambda x:x["time"])
return stats[0:limit]
def agent_event_processor(resultset, fields=[], limit=100):
stats = []
agent_event = log_pb2.AgentEvent()
for data in resultset:
for d in data.data_list:
agent_event.ParseFromString(d)
stats.append(data_filter(util.pb2dict(agent_event), fields))
return stats[0:limit]
PROCESSOR_MAP={
"baidu.galaxy":{
"JobEvent":job_event_processor,
"JobStat":job_stat_processor,
"PodEvent":pod_event_processor,
"TaskEvent":task_event_processor,
"ClusterStat":cluster_stat_processor,
"AgentEvent":agent_event_processor
}
}
@query_decorator
def query(request):
builder = http.ResponseBuilder()
if request.has_err:
return builder.error(request.err).build_json()
ftrace = fsdk.FtraceSDK(settings.TRACE_QUERY_ENGINE)
id = request.GET.get("id", None)
jobid = request.GET.get("jobid", None)
podid = request.GET.get("podid", None)
resultset = []
status = False
if id :
resultset, status = ftrace.simple_query(request.db,
request.table,
id,
request.start_time,
request.end_time,
request.limit)
elif jobid:
resultset, status = ftrace.index_query(request.db,
request.table,
"jobid",
jobid,
request.start_time,
request.end_time,
request.limit)
elif podid:
resultset, status = ftrace.index_query(request.db,
request.table,
"pod_id",
podid,
request.start_time,
request.end_time,
request.limit)
if not status:
return builder.error("fail to make a query").build_json()
proc_func = PROCESSOR_MAP[request.db][request.table]
datas= proc_func(resultset, request.fields, request.limit)
return builder.ok(data = {"datas":datas}).build_json()
def index(request):
return util.render_tpl(request, {}, "index.html")
def sql_to_mdt(db, sql, limit):
operator_dict = {
"=":query_pb2.RpcEqualTo,
"<":query_pb2.RpcLess,
"<=":query_pb2.RpcLessEqual,
">":query_pb2.RpcGreater,
}
context, status = sql_parser.SimpleSqlParser().parse(sql)
if not status:
return None, None, False
logger.info(context)
request = query_pb2.RpcSearchRequest()
request.db_name = db
request.table_name = context["table"]
request.limit = limit
conds = []
has_start_time = False
has_end_time = False
for cond in context["conditions"]:
if cond[0] == "id":
request.primary_key = cond[2]
elif cond[0] == "time" and cond[1].startswith(">"):
if isinstance(cond[2], unicode) or isinstance(cond[2], str):
request.start_timestamp = long(time.mktime(time.strptime(cond[2], "%Y-%m-%d %H:%M"))) * 1000000
else:
request.start_timestamp = cond[2]
has_start_time = True
elif cond[0] == "time" and cond[1].startswith("<"):
if isinstance(cond[2], unicode) or isinstance(cond[2], str):
request.end_timestamp = long(time.mktime(time.strptime(cond[2], "%Y-%m-%d %H:%M"))) * 1000000
else:
request.end_timestamp = cond[2]
has_end_time = True
else:
condition = request.condition.add()
condition.cmp_key = cond[2]
condition.cmp = operator_dict[cond[1]]
condition.index_table_name = cond[0]
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 24)
if not has_start_time:
request.start_timestamp = long(time.mktime(start_time.timetuple())) * 1000000
if not has_end_time:
request.end_timestamp = long(time.mktime(end_time.timetuple())) * 1000000
return context, request, True
def gen_tpl(fields):
tpl="""
<table class="table">
<thead>
<tr>
%(head)s
</tr>
</thead>
<tbody>
{{#datas}}
<tr>
%(body)s
</tr>
{{/datas}}
</tbody>
</table>
"""
head = ""
body = ""
for field in fields:
head += "<th>%s</th>"%field
body += "<td>{{%s}}</td>"%field
tpl = tpl%{"head":head, "body":body}
return tpl
@sql_decorator
def squery(request):
builder = http.ResponseBuilder()
if request.has_err:
return builder.error(request.err).build_json()
context, pb_req, ok = sql_to_mdt(request.db, request.sql, request.limit)
if not ok:
return builder.error("fail to parse sql").build_json()
ftrace = fsdk.FtraceSDK(settings.TRACE_QUERY_ENGINE)
resultset, ok = ftrace.make_req(pb_req)
if not ok:
return builder.error("fail to parse sql").build_json()
proc_func = PROCESSOR_MAP[request.db][context["table"]]
datas= proc_func(resultset, context["fields"], request.limit)
return builder.ok(data = {"datas":datas, "tpl":gen_tpl(context["fields"])}).build_json()
def sql(request):
return util.render_tpl(request, {},"sql.html")
def cluster(request):
return util.render_tpl(request, {},"cluster.html")
def job_stat(request):
jobid = request.GET.get("jobid", None)
reverse = request.GET.get("reverse", None)
builder = http.ResponseBuilder()
if not jobid:
return builder.error("jobid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
stats, status = trace_dao.get_job_stat(jobid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get job stat").build_json()
if reverse:
stats = sorted(stats, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"stats":stats}).build_json()
def get_pod_event_by_jobid(request):
jobid = request.GET.get("jobid", None)
reverse = request.GET.get("reverse", None)
builder = http.ResponseBuilder()
if not jobid:
return builder.error("jobid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 24)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
events, status = trace_dao.get_pod_event_by_jobid(jobid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()),
limit=50)
if not status:
return builder.error("fail to get pod event").build_json()
filter_events = []
for e in events:
if e["level"] == "TINFO":
continue
e["ftime"] = datetime.datetime.fromtimestamp(e['time']/1000000).strftime("%Y-%m-%d %H:%M:%S")
filter_events.append(e)
filter_events = sorted(filter_events, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"events": filter_events}).build_json()
def job_event(request):
jobid = request.GET.get("jobid", None)
builder = http.ResponseBuilder()
if not jobid:
return builder.error("jobid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
events, status = trace_dao.get_job_event(jobid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get job evnets").build_json()
events = sorted(events, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"events":events}).build_json()
def get_pod(request):
podid = request.GET.get("podid", None)
if not podid:
return util.render_tpl(request, {}, "404.html")
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
pod_events, status = trace_dao.get_pod_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return util.render_tpl(request, {"err":"fail to get trace"}, "500.html")
task_events, status = trace_dao.get_task_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
return util.render_tpl(request, {"podid":podid,
"pod_events":pod_events,
"task_events":task_events},
"pod_trace.html")
def pod_event(request):
podid = request.GET.get("podid", None)
builder = http.ResponseBuilder()
if not podid:
return builder.error("podid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
events, status = trace_dao.get_pod_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get pod event").build_json()
events = sorted(events, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"events": events}).build_json()
def task_event(request):
podid = request.GET.get("podid", None)
builder = http.ResponseBuilder()
if not podid:
return builder.error("podid is required").build_json()
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
task_events, status = trace_dao.get_task_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()),
limit=20)
if not status:
return builder.error("fail to get task event").build_json()
filter_events = []
for e in task_events:
if e["level"] == "TINFO":
continue
e["ftime"] = datetime.datetime.fromtimestamp(e['ttime']/1000000).strftime("%Y-%m-%d %H:%M:%S")
filter_events.append(e)
filter_events = sorted(filter_events, key=lambda x:x["ttime"], reverse = True)
return builder.ok(data = {"events": filter_events}).build_json()
def pod_stat(request):
podid = request.GET.get("podid", None)
builder = http.ResponseBuilder()
if not podid:
return builder.error("podid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
stats, status = trace_dao.get_pod_stat(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get pod stat").build_json()
stats = sorted(stats, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"stats":stats}).build_json()
def job_all(request):
galaxy = sdk.GalaxySDK(settings.GALAXY_MASTER)
jobs, status = galaxy.get_all_job()
job_dicts = []
for job in jobs:
job_dict = pb2dict.protobuf_to_dict(job)
job_dict['state'] = master_pb2.JobState.Name(job_dict['state'])
job_dicts.append(job_dict)
return util.render_tpl(request, {"jobs":job_dicts}, "index.html")
def job_detail(request):
return util.render_tpl(request, {"jobid":request.GET.get("jobid", None)},
"job.html")
def pod_detail(request):
return util.render_tpl(request, {"podid":request.GET.get("podid", None),
"time":request.GET.get("time",None)},
"pod_detail.html")
def get_real_time_status(request):
galaxy = sdk.GalaxySDK(settings.GALAXY_MASTER)
response = galaxy.get_real_time_status()
builder = http.ResponseBuilder()
status = pb2dict.protobuf_to_dict(response)
return builder.ok(data = {"status":status}).build_json()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import datetime
import functools
import inspect
import json
import lockfile
import netaddr
import os
import random
import re
import shlex
import socket
import struct
import sys
import time
import types
import uuid
import pyclbr
from xml.sax import saxutils
from eventlet import event
from eventlet import greenthread
from eventlet import semaphore
from eventlet.green import subprocess
from nova import exception
from nova import flags
from nova import log as logging
from nova import version
LOG = logging.getLogger("nova.utils")
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
FLAGS = flags.FLAGS
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
LOG.debug(_('Inner Exception: %s'), exc)
raise exception.ClassNotFound(class_name=class_str)
def import_object(import_str):
"""Returns an object including a module or module and class."""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess
def fetchfile(url, target):
LOG.debug(_('Fetching %s') % url)
execute('curl', '--fail', url, '-o', target)
def execute(*cmd, **kwargs):
"""
Helper method to execute command with optional retry.
:cmd Passed to subprocess.Popen.
:process_input Send to opened process.
:check_exit_code Defaults to 0. Raise exception.ProcessExecutionError
unless program exits with this code.
:delay_on_retry True | False. Defaults to True. If set to True, wait a
short amount of time before retrying.
:attempts How many times to retry cmd.
:run_as_root True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper FLAG.
:raises exception.Error on receiving unknown arguments
:raises exception.ProcessExecutionError
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', 0)
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
if len(kwargs):
raise exception.Error(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root:
cmd = shlex.split(FLAGS.root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=True)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if type(check_exit_code) == types.IntType \
and _returncode != check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd))
if addl_env:
raise exception.Error(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise exception.Error(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
#stdin.write('process_input would go here')
#stdin.flush()
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return (stdout, stderr)
def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
def novadir():
import nova
return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
def default_flagfile(filename='nova.conf', args=None):
if args is None:
args = sys.argv
for arg in args:
if arg.find('flagfile') != -1:
break
else:
if not os.path.isabs(filename):
# turn relative filename into an absolute path
script_dir = os.path.dirname(inspect.stack()[-1][1])
filename = os.path.abspath(os.path.join(script_dir, filename))
if not os.path.exists(filename):
filename = "./nova.conf"
if not os.path.exists(filename):
filename = '/etc/nova/nova.conf'
if os.path.exists(filename):
flagfile = '--flagfile=%s' % filename
args.insert(1, flagfile)
def debug(arg):
LOG.debug(_('debug in callback: %s'), arg)
return arg
def runthis(prompt, *cmd, **kwargs):
LOG.debug(_('Running %s'), (' '.join(cmd)))
rv, err = execute(*cmd, **kwargs)
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789' # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ' # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def usage_from_instance(instance_ref, **kw):
usage_info = dict(
project_id=instance_ref['project_id'],
user_id=instance_ref['user_id'],
instance_id=instance_ref['id'],
instance_type=instance_ref['instance_type']['name'],
instance_type_id=instance_ref['instance_type_id'],
display_name=instance_ref['display_name'],
created_at=str(instance_ref['created_at']),
launched_at=str(instance_ref['launched_at']) \
if instance_ref['launched_at'] else '',
image_ref=instance_ref['image_ref'])
usage_info.update(kw)
return usage_info
def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbols.
Believed to be reasonably secure (with a reasonable password length!)
"""
r = random.SystemRandom()
return ''.join([r.choice(symbols) for _i in xrange(length)])
def last_octet(address):
return int(address.split('.')[-1])
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
raise exception.Error(_('Link Local address is not found.:%s')
% if_str)
except Exception as ex:
raise exception.Error(_("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % locals())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
return utcnow.override_time
return datetime.datetime.utcnow()
utcnow.override_time = None
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
return utcnow() - before > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return time.mktime(utcnow().timetuple())
def set_time_override(override_time=datetime.datetime.utcnow()):
"""Override utils.utcnow to return a constant time."""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overriden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overriden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def isotime(at=None):
"""Returns iso formatted utcnow."""
return strtime(at, ISO_TIME_FORMAT)
def parse_isotime(timestr):
"""Turn an iso formatted time back into a datetime."""
return parse_strtime(timestr, ISO_TIME_FORMAT)
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = self.__pivot.value
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if type(backend) == type(tuple()):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, now=True):
self._running = True
done = event.Event()
def _inner():
if not now:
greenthread.sleep(interval)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
logging.exception('in looping call')
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
return saxutils.escape(value, {'"': '"'})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
def to_primitive(value, convert_instances=False, level=0):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
for test in nasty:
if test(value):
return unicode(value)
if level > 3:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
if type(value) is type([]) or type(value) is type((None,)):
o = []
for v in value:
o.append(to_primitive(v, convert_instances=convert_instances,
level=level))
return o
elif type(value) is type({}):
o = {}
for k, v in value.iteritems():
o[k] = to_primitive(v, convert_instances=convert_instances,
level=level)
return o
elif isinstance(value, datetime.datetime):
return str(value)
elif hasattr(value, 'iteritems'):
return to_primitive(dict(value.iteritems()),
convert_instances=convert_instances,
level=level)
elif hasattr(value, '__iter__'):
return to_primitive(list(value), level)
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return to_primitive(value.__dict__,
convert_instances=convert_instances,
level=level + 1)
else:
return value
except TypeError, e:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)
def dumps(value):
try:
return json.dumps(value)
except TypeError:
pass
return json.dumps(to_primitive(value))
def loads(s):
return json.loads(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append(("nova.utils", "dumps", TypeError,
"loads", ValueError))
anyjson.force_implementation("nova.utils")
_semaphores = {}
class _NoopContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def synchronized(name, external=False):
"""Synchronization decorator.
Decorating a method like so:
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the bar method at a time.
Different methods can share the same lock:
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn\
# amically-allocating-and-destroying-mutexes
if name not in _semaphores:
_semaphores[name] = semaphore.Semaphore()
sem = _semaphores[name]
LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
'"%(method)s"...' % {'lock': name,
'method': f.__name__}))
with sem:
if external:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...' %
{'lock': name, 'method': f.__name__}))
lock_file_path = os.path.join(FLAGS.lock_path,
'nova-%s.lock' % name)
lock = lockfile.FileLock(lock_file_path)
else:
lock = _NoopContextManager()
with lock:
retval = f(*args, **kwargs)
# If no-one else is waiting for it, delete it.
# See note about possible raciness above.
if not sem.balance < 1:
del _semaphores[name]
return retval
return inner
return wrap
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, types.ListType):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, types.ListType):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def flatten_dict(dict_, flattened=None):
"""Recursively flatten a nested dictionary."""
flattened = flattened or {}
for key, value in dict_.iteritems():
if hasattr(value, 'iteritems'):
flatten_dict(value, flattened)
else:
flattened[key] = value
return flattened
def partition_dict(dict_, keys):
"""Return two dicts, one with `keys` the other with everything else."""
intersection = {}
difference = {}
for key, value in dict_.iteritems():
if key in keys:
intersection[key] = value
else:
difference[key] = value
return intersection, difference
def map_dict_keys(dict_, key_map):
"""Return a dict in which the dictionaries keys are mapped to new keys."""
mapped = {}
for key, value in dict_.iteritems():
mapped_key = key_map[key] if key in key_map else key
mapped[mapped_key] = value
return mapped
def subset_dict(dict_, keys):
"""Return a dict that only contains a subset of keys."""
subset = partition_dict(dict_, keys)[0]
return subset
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
# TODO(justinsb): Can we make this better??
return cls() # Ugly PyLint hack
def parse_server_string(server_str):
"""
Parses the given server_string and returns a list of host and port.
If it's not a combination of host part and port, the port element
is a null string. If the input is invalid expression, return a null
list.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except Exception:
LOG.debug(_('Invalid server_string: %s' % server_str))
return ('', '')
def gen_uuid():
return uuid.uuid4()
def is_uuid_like(val):
"""For our purposes, a UUID is a string in canoical form:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
if not isinstance(val, basestring):
return False
return (len(val) == 36) and (val.count('-') == 4)
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
if not val:
return False
try:
return True if int(val) else False
except ValueError:
return val.lower() == 'true'
def is_valid_ipv4(address):
"""valid the address strictly as per format xxx.xxx.xxx.xxx.
where xxx is a value between 0 and 255.
"""
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
def monkey_patch():
""" If the Flags.monkey_patch set as True,
this functuion patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using FLAGS.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See nova.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If FLAGS.monkey_patch is not True, this function do nothing.
if not FLAGS.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in FLAGS.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(clz, method,\
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,\
decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts"""
if not lst:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
| |
'''
Created on 29.11.2010
@author: The Zero
'''
class Php():
filename = 'bfphp.php'
def __init__(self, code):
self.code = code
def getPtr(self, ptr):
if None == ptr:
self.code.features['ptr'] = True
return '$this->ptr' if None == ptr else str(ptr)
def initMem(self, size):
self.code.write('$this->data = array_fill(0, ' + str(size) + ', 0);')
def find(self):
self.code.features['ptr'] = True
self.code.features['find'] = True
self.code.write('$this->find();')
def data(self, value):
self.code.features['dataop'] = True
if isinstance(value, str):
value = value.replace('tmp', '$tmp')
if '-' == value[0]:
self.code.write('$this->dec(' + value[1:] + ');')
else:
self.code.write('$this->inc(' + value + ');')
elif value == 1:
self.code.write('$this->inc();')
elif value == -1:
self.code.write('$this->dec();')
elif value > 0:
self.code.write('$this->inc(' + str(value) + ');')
else:
self.code.write('$this->dec(' + str(-value) + ');')
def staticData(self, value, ptr = None):
self.code.write('$this->data[' + self.getPtr(ptr) + '] = ' + str(value) + ';')
def tmpData(self, mul, ptr = None):
num = '$tmp'
if (None != mul) and (1 != mul) and (-1 != mul):
if mul > 0:
num += ' * ' + str(mul)
else:
num += ' * ' + str(-mul)
if None == ptr:
if mul > 0:
self.code.write('$this->inc(' + num + ');')
else:
self.code.write('$this->dec(' + num + ');')
else:
strPos = '$this->data[' + self.getPtr(ptr) + ']'
if mul == None:
self.code.write(strPos + ' = $tmp')
elif mul > 0:
self.code.write(strPos + ' = (' + strPos + ' + ' + num + ') % $this->limit;')
else:
self.code.write(strPos + ' = (' + strPos + ' - ' + num + ') % $this->limit;')
def move(self, offset):
self.code.features['move'] = True
if offset > 0:
self.code.write('$this->rgt(' + str(offset) + ');')
else:
self.code.write('$this->lft(' + str(-offset) + ');')
def staticMove(self, ptr):
self.code.features['ptr'] = True
self.code.write('$this->ptr = ' + str(ptr) + ';')
def cin(self):
self.code.features['cin'] = True
self.code.write('$this->cin();')
def tmp(self, change, ptr = None):
self.code.features['data'] = True
ptr = self.getPtr(ptr)
if change < 0:
self.code.write('$tmp = $this->data[' + ptr + '] - ' + str(-change) + ';')
elif change > 0:
self.code.write('$tmp = $this->data[' + ptr + '] + ' + str(change) + ';')
else:
self.code.write('$tmp = $this->data[' + ptr + '];')
def out(self, value = None, pos = None):
self.code.features['out'] = True
if None != value:
self.code.write('$this->out(' + str(value) + ');')
elif None != pos:
self.code.features['ptr'] = True
self.code.features['data'] = True
self.code.write('$this->out(self.data[' + str(pos) + ']);')
else:
self.code.features['data'] = True
self.code.features['ptr'] = True
self.code.write('$this->out();')
def multiOut(self, chars):
out = '"'
for char in chars:
if 10 == char or 13 == char:
out += '\\n'
elif char < 32:
out += '" + chr(' + str(char) + ') + "'
elif 92 == char:
out += '\\\\'
else:
printed = chr(char)
if '"' == printed:
printed = '\\"'
out += printed
out += '"'
self.code.write('echo ' + out + ';')
def startProgram(self):
self.code.indent = 1
self.code.write(' public function run() {')
self.code.indent = 2
return None
def endProgram(self, context):
self.code.indent = 1
self.code.write(' }')
self.code.indent = 0
def callLoop(self, pos):
self.code.write('$this->loop' + str(pos) + '();')
def startLoop(self, pos, ptr):
indent = self.code.indent
ptr = self.getPtr(ptr)
self.code.indent = 1
self.code.write(' private function loop' + str(pos) + '() {')
self.code.indent = 2
self.code.write(' while ($this->data[' + ptr + ']) {')
self.code.indent = 3
return indent
def endLoop(self, context):
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' }')
self.code.indent = context
def startInlineIf(self, ptr):
ptr = self.getPtr(ptr)
self.code.write('if ($this->data[' + ptr + ']) {')
self.code.indent += 1
return None
def endInlineIf(self, context):
self.code.indent -= 1
self.code.write('}')
def callIf(self, pos):
self.code.write('$this->if' + str(pos) + '();')
def startIf(self, pos, ptr):
indent = self.code.indent
self.code.indent = 1
self.code.write(' private function if' + str(pos) + '() {')
self.code.indent = 2
self.startInlineIf(ptr)
return indent
def endIf(self, context):
self.endInlineIf(None)
self.code.indent -= 1
self.code.write(' }')
self.code.indent = context
def debugLoop(self, loop):
self.code.write('// loop is stable: ' + str(loop.stable) + ', simple: ' + str(loop.simple))
def debugToken(self, token):
self.code.write('// token ' + str(token.__class__.__name__) + ' static pos:' + str(token.staticPos) + ', value: ' + str(token.staticValue) + ', pos: ' + str(token.pos))
'''
Generates file header, class and basic methods
'''
def header(self):
self.code.write('#!/usr/bin/php')
self.code.write('<?php')
self.code.write('')
self.code.write('class Bfphp')
self.code.write('{')
self.code.indent += 1
if True not in self.code.features.values():
return
if self.code.features['data'] or self.code.features['dataop'] or self.code.features['move'] or self.code.features['cin']:
self.code.write(' private $data = array(0);')
if self.code.features['ptr'] or self.code.features['dataop'] or self.code.features['move']:
self.code.write(' private $ptr = 0;')
if self.code.features['out']:
self.code.write(' private $lastout = 0;')
if self.code.features['data'] or self.code.features['dataop']:
self.code.write(' private $limit;')
self.code.write(' public function __construct($bits = ' + str(self.code.bits) +') {')
self.code.indent += 1
if self.code.features['data'] or self.code.features['dataop']:
self.code.write(' $this->limit = pow(2, $bits);')
self.code.indent -= 1
self.code.write(' }')
if self.code.features['dataop']:
self.code.write('')
self.code.write(' private function inc($num=1) {')
self.code.indent += 1
self.code.write(' $this->data[$this->ptr] += $num;')
self.code.write(' if ($this->data[$this->ptr] >= $this->limit) {')
self.code.indent += 1
self.code.write(' $this->data[$this->ptr] = $this->data[$this->ptr] % $this->limit;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' }')
self.code.write('')
self.code.write(' private function dec($num=1) {')
self.code.indent += 1
self.code.write(' $this->data[$this->ptr] -= $num;')
self.code.write(' if ($this->data[$this->ptr] < 0) {')
self.code.indent += 1
self.code.write(' $this->data[$this->ptr] = $this->data[$this->ptr] % $this->limit;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' }')
self.code.write('')
if self.code.features['move']:
self.code.write('')
self.code.write(' private function rgt($num=1) {')
self.code.indent += 1
self.code.write(' $this->ptr += $num;')
self.code.write(' while ($this->ptr >= count($this->data)) {')
self.code.indent += 1
self.code.write(' $this->data[] = 0;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' }')
self.code.write('')
self.code.write('')
self.code.write(' private function lft($num=1) {')
self.code.indent += 1
self.code.write(' $this->ptr -= $num;')
self.code.write(' if ($this->ptr < 0) {')
self.code.indent += 1
self.code.write(' $this->ptr = 0;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' }')
self.code.write('')
if self.code.features['out'] or self.code.features['cin']:
self.code.write('')
self.code.write(' private function out($char = null) {')
self.code.indent += 1
self.code.write(' if (null === $char) { $char = $this->data[$this->ptr]; }')
self.code.write(' if (13 == $char) {')
self.code.indent += 1
self.code.write(' if (10 != $this->lastout) {')
self.code.indent += 1
self.code.write(' echo "\\n";')
self.code.write(' $this->lastout = $char;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' } elseif (10 == $char) {')
self.code.indent += 1
self.code.write(' if (13 != $this->lastout) {')
self.code.indent += 1
self.code.write(' echo "\\n";')
self.code.write(' $this->lastout = $char;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' } else {')
self.code.indent += 1
self.code.write(' echo chr($char);')
self.code.write(' $this->lastout = $char;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' }')
if self.code.features['cin']:
raise NotImplementedError()
if self.code.features['find']:
self.code.write('')
self.code.write(' private function find() {')
self.code.indent += 1
self.code.write(' $ptr = $this->ptr;')
self.code.write(' $keys = array_keys($this->data, 0);')
self.code.write(' $key = array_shift($keys);')
self.code.write(' while ($key <= $this->ptr && $key !== null) {')
self.code.indent += 1
self.code.write(' $key = array_shift($keys);')
self.code.indent -= 1
self.code.write(' }')
self.code.write(' if ($key === null) {')
self.code.indent += 1
self.code.write(' $this->ptr = count($this->data);')
self.code.write(' $this->data[] = 0;')
self.code.indent -= 1
self.code.write(' } else {')
self.code.indent += 1
self.code.write(' $this->ptr = $key;')
self.code.indent -= 1
self.code.write(' }')
self.code.indent -= 1
self.code.write(' }')
'''
Generates file footer for running script directly
'''
def footer(self):
self.code.indent = 0
self.code.write('}')
self.code.write('')
self.code.write('$bf = new Bfphp();')
self.code.write('$bf->run();')
| |
# Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Console output utilities for OpenHTF.
This module provides convenience methods to format output for the CLI, along
with the ability to fork output to both a logger (e.g. a test record logger)
and to the CLI directly (e.g. sys.stdout).
Under the default configuration, messages printed with these utilities will be
saved to the test record logs of all running tests. To change this behavior,
the `logger` parameter should be overridden, e.g. by passing in the test record
logger for the current test.
"""
import logging
import math
import os
import re
import string
import sys
import textwrap
import time
import colorama
import contextlib2 as contextlib
from openhtf.util import argv
# Colorama module has to be initialized before use.
colorama.init()
_LOG = logging.getLogger(__name__)
# If True, all CLI output through this module will be suppressed, as well as any
# logging that uses a CliQuietFilter.
CLI_QUIET = False
ARG_PARSER = argv.ModuleParser()
ARG_PARSER.add_argument(
'--quiet', action=argv.StoreTrueInModule, target='%s.CLI_QUIET' % __name__,
help=textwrap.dedent('''\
Suppress all CLI output from OpenHTF's printing functions and logging.
This flag will override any verbosity levels set with -v.'''))
ANSI_ESC_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
class ActionFailedError(Exception):
"""Indicates an action failed. Used internally by action_result_context()."""
def _printed_len(some_string):
"""Compute the visible length of the string when printed."""
return len([x for x in ANSI_ESC_RE.sub('', some_string)
if x in string.printable])
def _linesep_for_file(file):
"""Determine which line separator to use based on the file's mode."""
if 'b' in file.mode:
return os.linesep
return '\n'
def banner_print(msg, color='', width=60, file=sys.stdout, logger=_LOG):
"""Print the message as a banner with a fixed width.
Also logs the message (un-bannered) to the given logger at the debug level.
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz =======================
"""
msg = msg.encode('utf-8')
if logger:
logger.debug(ANSI_ESC_RE.sub('', msg))
if CLI_QUIET:
return
lpad = int(math.ceil((width - _printed_len(msg) - 2) / 2.0)) * '='
rpad = int(math.floor((width - _printed_len(msg) - 2) / 2.0)) * '='
file.write('{sep}{color}{lpad} {msg} {rpad}{reset}{sep}{sep}'.format(
sep=_linesep_for_file(file), color=color, lpad=lpad, msg=msg, rpad=rpad,
reset=colorama.Style.RESET_ALL))
file.flush()
def bracket_print(msg, color='', width=8, file=sys.stdout):
"""Prints the message in brackets in the specified color and end the line.
Args:
msg: The message to put inside the brackets (a brief status message).
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total desired width of the bracketed message.
file: A file object to which the bracketed text will be written. Intended
for use with CLI output file objects like sys.stdout.
"""
msg = msg.encode('utf-8')
if CLI_QUIET:
return
lpad = int(math.ceil((width - 2 - _printed_len(msg)) / 2.0)) * ' '
rpad = int(math.floor((width - 2 - _printed_len(msg)) / 2.0)) * ' '
file.write('[{lpad}{bright}{color}{msg}{reset}{rpad}]'.format(
lpad=lpad, bright=colorama.Style.BRIGHT, color=color, msg=msg,
reset=colorama.Style.RESET_ALL, rpad=rpad))
file.write(colorama.Style.RESET_ALL)
file.write(_linesep_for_file(file))
file.flush()
def cli_print(msg, color='', end=None, file=sys.stdout, logger=_LOG):
"""Print the message to file and also log it.
This function is intended as a 'tee' mechanism to enable the CLI interface as
a first-class citizen, while ensuring that everything the operator sees also
has an analogous logging entry in the test record for later inspection.
Args:
msg: The message to print/log.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
end: A custom line-ending string to print instead of newline.
file: A file object to which the baracketed text will be written. Intended
for use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
"""
msg = msg.encode('utf-8')
if logger:
logger.debug('-> {}'.format(msg))
if CLI_QUIET:
return
if end is None:
end = _linesep_for_file(file)
file.write('{color}{msg}{reset}{end}'.format(
color=color, msg=msg, reset=colorama.Style.RESET_ALL, end=end))
def error_print(msg, color=colorama.Fore.RED, file=sys.stderr):
"""Print the error message to the file in the specified color.
Args:
msg: The error message to be printed.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together here, but note that style
strings will not be applied.
file: A file object to which the baracketed text will be written. Intended
for use with CLI output file objects, specifically sys.stderr.
"""
msg = msg.encode('utf-8')
if CLI_QUIET:
return
file.write('{sep}{bright}{color}Error: {normal}{msg}{sep}{reset}'.format(
sep=_linesep_for_file(file), bright=colorama.Style.BRIGHT, color=color,
normal=colorama.Style.NORMAL, msg=msg, reset=colorama.Style.RESET_ALL))
file.flush()
class ActionResult(object):
"""Used with an action_result_context to signal the result of an action."""
def __init__(self):
self.success = None
def succeed(self):
"""Mark the action as having succeeded."""
self.success = True
def fail(self):
"""Mark the action as having failed.
Raises:
ActionFailedError: This exception is always raised, by this function,
but should be caught by the contextmanager.
"""
self.success = False
raise ActionFailedError()
@contextlib.contextmanager
def action_result_context(action_text,
width=60,
status_width=8,
succeed_text='OK',
fail_text='FAIL',
unknown_text='????',
file=sys.stdout,
logger=_LOG):
"""A contextmanager that prints actions and results to the CLI.
When entering the context, the action will be printed, and when the context
is exited, the result will be printed. The object yielded by the context is
used to mark the action as a success or failure, and a raise from inside the
context will also result in the action being marked fail. If the result is
left unset, then indicative text ("????") will be printed as the result.
Args:
action_text: Text to be displayed that describes the action being taken.
width: Total width for each line of output.
status_width: Width of the just the status message portion of each line.
succeed_text: Status message displayed when the action succeeds.
fail_text: Status message displayed when the action fails.
unknown_text: Status message displayed when the result is left unset.
file: Specific file object to write to write CLI output to.
logger: A logger to use, or None to disable logging.
Example usage:
with action_result_context('Doing an action that will succeed...') as act:
time.sleep(2)
act.succeed()
with action_result_context('Doing an action with unset result...') as act:
time.sleep(2)
with action_result_context('Doing an action that will fail...') as act:
time.sleep(2)
act.fail()
with action_result_context('Doing an action that will raise...') as act:
time.sleep(2)
import textwrap
raise RuntimeError(textwrap.dedent('''\
Uh oh, looks like there was a raise in the mix.
If you see this message, it means you are running the console_output
module directly rather than using it as a library. Things to try:
* Not running it as a module.
* Running it as a module and enjoying the preview text.
* Getting another coffee.'''))
Example output:
Doing an action that will succeed... [ OK ]
Doing an action with unset result... [ ???? ]
Doing an action that will fail... [ FAIL ]
Doing an action that will raise... [ FAIL ]
...
"""
if logger:
logger.debug('Action - %s', action_text)
if not CLI_QUIET:
file.write(''.join((action_text, '\r')))
file.flush()
spacing = (width - status_width - _printed_len(action_text)) * ' '
result = ActionResult()
try:
yield result
except Exception as err:
if logger:
logger.debug('Result - %s [ %s ]', action_text, fail_text)
if not CLI_QUIET:
file.write(''.join((action_text, spacing)))
bracket_print(fail_text, width=status_width, color=colorama.Fore.RED,
file=file)
if not isinstance(err, ActionFailedError):
raise
return
result_text = succeed_text if result.success else unknown_text
result_color = colorama.Fore.GREEN if result.success else colorama.Fore.YELLOW
if logger:
logger.debug('Result - %s [ %s ]', action_text, result_text)
if not CLI_QUIET:
file.write(''.join((action_text, spacing)))
bracket_print(result_text, width=status_width, color=result_color,
file=file)
# If invoked as a runnable module, this module will invoke its action result
# context in order to print colorized example output.
if __name__ == '__main__':
banner_print('Running pre-flight checks.')
with action_result_context('Doing an action that will succeed...') as act:
time.sleep(2)
act.succeed()
with action_result_context('Doing an action with unset result...') as act:
time.sleep(2)
with action_result_context('Doing an action that will fail...') as act:
time.sleep(2)
act.fail()
with action_result_context('Doing an action that will raise...') as act:
time.sleep(2)
raise RuntimeError(textwrap.dedent('''\
Uh oh, looks like there was a raise in the mix.
If you see this message, it means you are running the console_output
module directly rather than using it as a library. Things to try:
* Not running it as a module.
* Running it as a module and enjoying the preview text.
* Getting another coffee.'''))
class CliQuietFilter(logging.Filter):
"""Filter that suppresses logging output if the --quiet CLI option is set.
Note that the --quiet CLI option is stored in the CLI_QUIET member of this
module, and can thus be overridden in test scripts. This filter should only
be used with loggers that print to the CLI.
"""
def filter(self, record):
return not CLI_QUIET
| |
import unittest
import numpy as np
from ..stat import *
class TestLedoitWolfCov(unittest.TestCase):
def setUp(self):
np.random.seed(0)
p, n = 40, 50
self.A = A = np.random.randn(p, p)
self.Sigma = np.dot(A, A.T)
X = np.random.randn(p, n)
X -= np.atleast_2d(np.mean(X, 1)).T
X = np.dot(A, X)
self.X0 = X0 = X - np.atleast_2d(np.mean(X, 1)).T
self.S = np.dot(X0, X0.T) / n
def test_var_of_cov(self):
X0, S = self.X0, self.S
p, n = X0.shape
V = np.mean(
[(np.dot(np.atleast_2d(o).T, np.atleast_2d(o)) - S)**2 for o in X0.T],
axis=0)
b2, d2, lamb = lw_cov_base(X0, S, np.eye(p))
self.assertAlmostEqual(np.sum(V) / n, b2)
def test_condition_number(self):
S_star = lw_cov(self.X0)
S = np.cov(self.X0, rowvar=False)
self.assert_(np.linalg.cond(S_star) < np.linalg.cond(S))
def test_accuracy(self):
X, S, Sigma = self.X0, self.S, self.Sigma
Sigma = np.dot(self.A.T, self.A)
self.assert_(np.linalg.norm(lw_cov(X) - Sigma)
< np.linalg.norm(S - Sigma))
def test_inv_accuracy(self):
X, S, Sigma = self.X0, self.S, self.Sigma
S_star = lw_cov(X)
invSigma, invS, invS_star = [np.linalg.inv(Y) for Y in [Sigma, S, S_star]]
self.assert_(np.linalg.norm(invS_star - invSigma)
< np.linalg.norm(invS - invSigma))
class TestKullbackLeibler(unittest.TestCase):
def setUp(self):
A = np.random.randn(4, 4)
self.Sig1 = np.dot(A, A.T)
self.inv_Sig1 = np.linalg.inv(self.Sig1)
self.mu1 = np.random.randn(4)
B = np.random.randn(4, 4)
self.Sig2 = np.dot(B, B.T)
self.inv_Sig2 = np.linalg.inv(self.Sig2)
self.mu2 = np.random.randn(4)
def test_equal_dist(self):
Sig_p, inv_Sig_p, mu = self.Sig1, self.inv_Sig1, self.mu1
self.assertAlmostEqual(norm_kl_divergence(inv_Sig_p, mu, Sig_p, mu), 0)
def test_mean_divergence(self):
Sig_q, inv_Sig_p, mu = self.Sig1, self.inv_Sig1, self.mu1
for i in range(4):
# generate random direction
rd = np.random.randn(Sig_q.shape[0])
# shift one mean in this direction
kld = np.asarray([norm_kl_divergence(inv_Sig_p, mu, Sig_q, mu + rd * d)
for d in np.linspace(0, 10, 50)])
# check that the KLD is monotonically increasing
self.assert_(np.all(np.diff(kld) > 0))
def test_cov_divergence(self):
Sig_q, inv_Sig_p, mu = self.Sig1, self.inv_Sig1, self.mu1
Sig_p = self.Sig2
kl = []
for alpha in np.linspace(0, 1, 10):
# create diverging covariance matrix
S = alpha * Sig_p + (1. - alpha) * Sig_q
kl.append(norm_kl_divergence(inv_Sig_p, mu, S, mu))
self.assert_(np.all(np.diff(kl) > 0))
def test_numerical(self):
mu_p, mu_q, sig_p, sig_q = -1, 0, 1, .5
kld_an = norm_kl_divergence(sig_p, mu_p, 1./sig_q, mu_q)
def norm_pdf(x, mu, sig):
return 1./np.sqrt(np.pi * 2. * sig **2)* np.exp(-(x-mu)**2./(2.*sig**2))
xs = np.linspace(-10, 10, 5000)
px = norm_pdf(xs, mu_p, sig_p**.5)
qx = norm_pdf(xs, mu_q, sig_q**.5)
div = px * np.log(px/qx)
kld_num = np.trapz(div, xs)
np.testing.assert_almost_equal(kld_num, kld_an)
def test_convenience_fun(self):
P = np.dot(np.random.randn(4, 4), np.random.rand(4, 10))
Q = np.dot(np.random.randn(4, 4), np.random.rand(4, 100))
self.assertAlmostEqual(
kl(P, Q),
norm_kl_divergence(lw_cov(P), np.mean(P, 1),
np.linalg.pinv(lw_cov(Q)), np.mean(Q, 1)))
class TestROC(unittest.TestCase):
def test_roc(self):
'''Test bounds and ordering of ROC'''
TPs, FPs = roc(np.random.rand(100), np.random.rand(100).round())
# test mononely increasing TPs and FPs
np.testing.assert_equal(np.sort(TPs), TPs)
np.testing.assert_equal(np.sort(FPs), FPs)
self.assertEqual(TPs.min(), 0)
self.assertEqual(TPs.max(), 1)
self.assertEqual(FPs.min(), 0)
self.assertEqual(FPs.max(), 1)
def test_reverse(self):
'''Test that the ROC is invariant for reversions'''
scores = np.array([-1, 0, 0, 0, 0, 0, 0, 1])
labels = np.array([ 0, 0, 0, 0, 1, 1, 1, 1])
t0, f0 = roc(scores, labels)
t1, f1 = roc(scores[::-1], labels[::-1]) # reversed ROC
np.testing.assert_equal(t0, t1)
np.testing.assert_equal(f0, f1)
def test_known(self):
'''Test ROC for known input'''
scores = np.array([-1, 0, 0, 0, 0, 0, 0, 1])
labels = np.array([ 0, 0, 0, 0, 1, 1, 1, 1])
t0, f0 = roc(scores, labels)
self.assert_((t0 == [0, .25, 1, 1]).all())
self.assert_((f0 == [0, 0, .75, 1]).all())
class TestAUC(unittest.TestCase):
def test_AUC_extrema(self):
'''Test AUC for extrema'''
self.assertEqual(auc([0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]), 1)
self.assertEqual(auc([1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1]), 0)
self.assertEqual(auc([1, 0, 1, 0, 1, 0], [1, 1, 1, 1, 0, 0]), .5)
def test_AUC_symbounds(self):
'''Test AUC for symmetry and bounds'''
N = 100
for rho in [.1, .3, .5]:
for i in range(20):
xs = np.random.random(N)
ys = (np.linspace(0, 1, N) <= rho).round()
self.assertAlmostEqual(auc(xs, ys), 1-auc(xs, np.abs(ys-1)))
self.assert_(0 <= auc(xs, ys) <= 1)
def test_AUC_confidence(self):
'''Test AUC confidence interval for trends'''
# we do not know much, but we can test for trends
self.assert_(auc_confidence(1) > auc_confidence(100))
self.assert_(auc_confidence(100, rho=.1) > auc_confidence(100))
self.assert_(auc_confidence(100, delta=1e-8) > auc_confidence(100))
# and symmetry
for rho in [.01, .1, .5]:
self.assertAlmostEqual(auc_confidence(100, rho=rho),
auc_confidence(100, rho=1-rho))
def test_monte_carlo(self):
'''Monte Carlo test for AUC confidence intervals'''
SAMPLES = 100
for N in [10, 100, 1000]:
for rho in [0.1, .5, .9]:
xs = np.random.random(N)
ys = (np.linspace(0, 1, N) <= rho).round()
self.assertEqual(ys.mean(), rho)
aucs = []
# create random AUCs
for i in range(SAMPLES):
np.random.shuffle(ys)
aucs.append(auc(xs, ys))
# test conservativeness
for delta in [.05, .001, .0001]:
epsilon = auc_confidence(N, rho, delta)
dev = np.abs(np.array(aucs) - 0.5)
e_p = np.mean(dev > epsilon)
self.assert_(e_p <= delta,
'empirical p (=%f) > delta (=%f)' % (e_p, delta))
class TestMutualInformation(unittest.TestCase):
def test_max_bits(self):
for i in range(4):
conf = np.eye(2 ** i)
self.assertAlmostEqual(mut_inf(conf), i)
def test_uniform(self):
for i in range(4):
conf = np.ones((i, i + 1))
self.assertAlmostEqual(mut_inf(conf), 0)
def test_zero(self):
self.assert_(np.isnan(mut_inf(np.zeros((5, 3)))))
def test_no_modification(self):
conf = np.ones((4, 3))
mut_inf(conf)
np.testing.assert_equal(conf, np.ones((4, 3)))
def test_symmetrical(self):
for i in range(4):
conf = np.random.rand(3, 8)
self.assertAlmostEqual(mut_inf(conf), mut_inf(conf.T))
def test_malformed(self):
self.assertRaises(AssertionError, mut_inf, -np.ones((3, 3)))
| |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects for extracting timing data from the ABINIT output files
It also provides tools to analye and to visualize the parallel efficiency.
"""
import collections
import logging
import os
import sys
import numpy as np
from monty.string import is_string, list_strings
from pymatgen.util.num import minloc
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
logger = logging.getLogger(__name__)
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend(tup)
return items
class AbinitTimerParserError(Exception):
"""Errors raised by AbinitTimerParser"""
class AbinitTimerParser(collections.abc.Iterable):
"""
Responsible for parsing a list of output files, extracting the timing results
and analyzing the results.
Assume the Abinit output files have been produced with `timopt -1`.
Example:
parser = AbinitTimerParser()
parser.parse(list_of_files)
To analyze all *.abo files withing top, use:
parser, paths, okfiles = AbinitTimerParser.walk(top=".", ext=".abo")
"""
# The markers enclosing the data.
BEGIN_TAG = "-<BEGIN_TIMER"
END_TAG = "-<END_TIMER>"
Error = AbinitTimerParserError
# DEFAULT_MPI_RANK = "0"
@classmethod
def walk(cls, top=".", ext=".abo"):
"""
Scan directory tree starting from top, look for files with extension `ext` and
parse timing data.
Return: (parser, paths, okfiles)
where `parser` is the new object, `paths` is the list of files found and `okfiles`
is the list of files that have been parsed successfully.
(okfiles == paths) if all files have been parsed.
"""
paths = []
for root, dirs, files in os.walk(top):
for f in files:
if f.endswith(ext):
paths.append(os.path.join(root, f))
parser = cls()
okfiles = parser.parse(paths)
return parser, paths, okfiles
def __init__(self):
"""Initialize object."""
# List of files that have been parsed.
self._filenames = []
# timers[filename][mpi_rank]
# contains the timer extracted from the file filename associated to the MPI rank mpi_rank.
self._timers = collections.OrderedDict()
def __iter__(self):
return self._timers.__iter__()
def __len__(self):
return len(self._timers)
@property
def filenames(self):
"""List of files that have been parsed successfully."""
return self._filenames
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return: list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname) # pylint: disable=R1732
except OSError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning(f"exception while parsing file {fname}:\n{str(e)}")
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok
def _read(self, fh, fname):
"""Parse the TIMER section"""
if fname in self._timers:
raise self.Error("Cannot overwrite timer associated to: %s " % fname)
def parse_line(line):
"""Parse single line."""
name, vals = line[:25], line[25:].split()
try:
ctime, cfract, wtime, wfract, ncalls, gflops = vals
except ValueError:
# v8.3 Added two columns at the end [Speedup, Efficacity]
ctime, cfract, wtime, wfract, ncalls, gflops, speedup, eff = vals
return AbinitTimerSection(name, ctime, cfract, wtime, wfract, ncalls, gflops)
sections, info, cpu_time, wall_time = None, None, None, None
data = {}
inside, has_timer = 0, False
for line in fh:
# print(line.strip())
if line.startswith(self.BEGIN_TAG):
has_timer = True
sections = []
info = {}
inside = 1
line = line[len(self.BEGIN_TAG) :].strip()[:-1]
info["fname"] = fname
for tok in line.split(","):
key, val = (s.strip() for s in tok.split("="))
info[key] = val
elif line.startswith(self.END_TAG):
inside = 0
timer = AbinitTimer(sections, info, cpu_time, wall_time)
mpi_rank = info["mpi_rank"]
data[mpi_rank] = timer
elif inside:
inside += 1
line = line[1:].strip()
if inside == 2:
d = {}
for tok in line.split(","):
key, val = (s.strip() for s in tok.split("="))
d[key] = float(val)
cpu_time, wall_time = d["cpu_time"], d["wall_time"]
elif inside > 5:
sections.append(parse_line(line))
else:
try:
parse_line(line)
except Exception:
parser_failed = True
if not parser_failed:
raise self.Error("line should be empty: " + str(inside) + line)
if not has_timer:
raise self.Error("%s: No timer section found" % fname)
# Add it to the dict
self._timers[fname] = data
def timers(self, filename=None, mpi_rank="0"):
"""
Return the list of timers associated to the given `filename` and MPI rank mpi_rank.
"""
if filename is not None:
return [self._timers[filename][mpi_rank]]
return [self._timers[filename][mpi_rank] for filename in self._filenames]
def section_names(self, ordkey="wall_time"):
"""
Return the names of sections ordered by ordkey.
For the time being, the values are taken from the first timer.
"""
section_names = []
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
# check = section_names
# else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
# if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() given `section_name`
A fake section is returned if the timer does not have section_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections
def pefficiency(self):
"""
Analyze the parallel efficiency.
Return: :class:`ParallelEfficiency` object.
"""
timers = self.timers()
# Number of CPUs employed in each calculation.
ncpus = [timer.ncpus for timer in timers]
# Find the minimum number of cpus used and its index in timers.
min_idx = minloc(ncpus)
min_ncpus = ncpus[min_idx]
# Reference timer
ref_t = timers[min_idx]
# Compute the parallel efficiency (total and section efficiency)
peff = {}
ctime_peff = [(min_ncpus * ref_t.wall_time) / (t.wall_time * ncp) for (t, ncp) in zip(timers, ncpus)]
wtime_peff = [(min_ncpus * ref_t.cpu_time) / (t.cpu_time * ncp) for (t, ncp) in zip(timers, ncpus)]
n = len(timers)
peff["total"] = {}
peff["total"]["cpu_time"] = ctime_peff
peff["total"]["wall_time"] = wtime_peff
peff["total"]["cpu_fract"] = n * [100]
peff["total"]["wall_fract"] = n * [100]
for sect_name in self.section_names():
# print(sect_name)
ref_sect = ref_t.get_section(sect_name)
sects = [t.get_section(sect_name) for t in timers]
try:
ctime_peff = [(min_ncpus * ref_sect.cpu_time) / (s.cpu_time * ncp) for (s, ncp) in zip(sects, ncpus)]
wtime_peff = [(min_ncpus * ref_sect.wall_time) / (s.wall_time * ncp) for (s, ncp) in zip(sects, ncpus)]
except ZeroDivisionError:
ctime_peff = n * [-1]
wtime_peff = n * [-1]
assert sect_name not in peff
peff[sect_name] = {}
peff[sect_name]["cpu_time"] = ctime_peff
peff[sect_name]["wall_time"] = wtime_peff
peff[sect_name]["cpu_fract"] = [s.cpu_fract for s in sects]
peff[sect_name]["wall_fract"] = [s.wall_fract for s in sects]
return ParallelEfficiency(self._filenames, min_idx, peff)
def summarize(self, **kwargs):
"""
Return pandas DataFrame with the most important results stored in the timers.
"""
import pandas as pd
colnames = [
"fname",
"wall_time",
"cpu_time",
"mpi_nprocs",
"omp_nthreads",
"mpi_rank",
]
frame = pd.DataFrame(columns=colnames)
for i, timer in enumerate(self.timers()):
frame = frame.append({k: getattr(timer, k) for k in colnames}, ignore_index=True)
frame["tot_ncpus"] = frame["mpi_nprocs"] * frame["omp_nthreads"]
# Compute parallel efficiency (use the run with min number of cpus to normalize).
i = frame["tot_ncpus"].values.argmin()
ref_wtime = frame.iloc[i]["wall_time"]
ref_ncpus = frame.iloc[i]["tot_ncpus"]
frame["peff"] = (ref_ncpus * ref_wtime) / (frame["wall_time"] * frame["tot_ncpus"])
return frame
@add_fig_kwargs
def plot_efficiency(self, key="wall_time", what="good+bad", nmax=5, ax=None, **kwargs):
"""
Plot the parallel efficiency
Args:
key: Parallel efficiency is computed using the wall_time.
what: Specifies what to plot: `good` for sections with good parallel efficiency.
`bad` for sections with bad efficiency. Options can be concatenated with `+`.
nmax: Maximum number of entries in plot
ax: matplotlib :class:`Axes` or None if a new figure should be created.
================ ====================================================
kwargs Meaning
================ ====================================================
linewidth matplotlib linewidth. Default: 2.0
markersize matplotlib markersize. Default: 10
================ ====================================================
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
lw = kwargs.pop("linewidth", 2.0)
msize = kwargs.pop("markersize", 10)
what = what.split("+")
timers = self.timers()
peff = self.pefficiency()
n = len(timers)
xx = np.arange(n)
# ax.set_color_cycle(['g', 'b', 'c', 'm', 'y', 'k'])
ax.set_prop_cycle(color=["g", "b", "c", "m", "y", "k"])
lines, legend_entries = [], []
# Plot sections with good efficiency.
if "good" in what:
good = peff.good_sections(key=key, nmax=nmax)
for g in good:
# print(g, peff[g])
yy = peff[g][key]
(line,) = ax.plot(xx, yy, "-->", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(g)
# Plot sections with bad efficiency.
if "bad" in what:
bad = peff.bad_sections(key=key, nmax=nmax)
for b in bad:
# print(b, peff[b])
yy = peff[b][key]
(line,) = ax.plot(xx, yy, "-.<", linewidth=lw, markersize=msize)
lines.append(line)
legend_entries.append(b)
# Add total if not already done
if "total" not in legend_entries:
yy = peff["total"][key]
(total_line,) = ax.plot(xx, yy, "r", linewidth=lw, markersize=msize)
lines.append(total_line)
legend_entries.append("total")
ax.legend(lines, legend_entries, loc="best", shadow=True)
# ax.set_title(title)
ax.set_xlabel("Total_NCPUs")
ax.set_ylabel("Efficiency")
ax.grid(True)
# Set xticks and labels.
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(xx)
ax.set_xticklabels(labels, fontdict=None, minor=False, rotation=15)
return fig
@add_fig_kwargs
def plot_pie(self, key="wall_time", minfract=0.05, **kwargs):
"""
Plot pie charts of the different timers.
Args:
key: Keyword used to extract data from timers.
minfract: Don't show sections whose relative weight is less that minfract.
Returns:
`matplotlib` figure
"""
timers = self.timers()
n = len(timers)
# Make square figures and axes
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
fig = plt.gcf()
gspec = GridSpec(n, 1)
for idx, timer in enumerate(timers):
ax = plt.subplot(gspec[idx, 0])
ax.set_title(str(timer))
timer.pie(ax=ax, key=key, minfract=minfract, show=False)
return fig
@add_fig_kwargs
def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
"""
Plot stacked histogram of the different timers.
Args:
key: Keyword used to extract data from the timers. Only the first `nmax`
sections with largest value are show.
mmax: Maximum nuber of sections to show. Other entries are grouped together
in the `others` section.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax=%d)" % nmax)
values.append(rest)
# The dataset is stored in values. Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
colors = nmax * ["r", "g", "b", "c", "k", "y", "m"]
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar_ = ax.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar_)
bottom += vals
ax.set_ylabel(key)
ax.set_title("Stacked histogram with the %d most important sections" % nmax)
ticks = ind + width / 2.0
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation=15)
# Add legend.
ax.legend([bar_[0] for bar_ in bars], names, loc="best")
return fig
def plot_all(self, show=True, **kwargs):
"""
Call all plot methods provided by the parser.
"""
figs = []
app = figs.append
app(self.plot_stacked_hist(show=show))
app(self.plot_efficiency(show=show))
app(self.plot_pie(show=show))
return figs
class ParallelEfficiency(dict):
"""
Store results concerning the parallel efficiency of the job.
"""
def __init__(self, filenames, ref_idx, *args, **kwargs):
"""
Args:
filennames: List of filenames
ref_idx: Index of the Reference time (calculation done with the smallest number of cpus)
"""
self.update(*args, **kwargs)
self.filenames = filenames
self._ref_idx = ref_idx
def _order_by_peff(self, key, criterion, reverse=True):
self.estimator = {
"min": min,
"max": max,
"mean": lambda items: sum(items) / len(items),
}[criterion]
data = []
for (sect_name, peff) in self.items():
# Ignore values where we had a division by zero.
if all(v != -1 for v in peff[key]):
values = peff[key][:]
# print(sect_name, values)
if len(values) > 1:
ref_value = values.pop(self._ref_idx)
assert ref_value == 1.0
data.append((sect_name, self.estimator(values)))
data.sort(key=lambda t: t[1], reverse=reverse)
return tuple(sect_name for (sect_name, e) in data)
def totable(self, stop=None, reverse=True):
"""
Return table (list of lists) with timing results.
Args:
stop: Include results up to stop. None for all
reverse: Put items with highest wall_time in first positions if True.
"""
osects = self._order_by_peff("wall_time", criterion="mean", reverse=reverse)
if stop is not None:
osects = osects[:stop]
n = len(self.filenames)
table = [["AbinitTimerSection"] + alternate(self.filenames, n * ["%"])]
for sect_name in osects:
peff = self[sect_name]["wall_time"]
fract = self[sect_name]["wall_fract"]
vals = alternate(peff, fract)
table.append([sect_name] + ["%.2f" % val for val in vals])
return table
def good_sections(self, key="wall_time", criterion="mean", nmax=5):
"""
Return first `nmax` sections with best value of key `key` using criterion `criterion`.
"""
good_sections = self._order_by_peff(key, criterion=criterion)
return good_sections[:nmax]
def bad_sections(self, key="wall_time", criterion="mean", nmax=5):
"""
Return first `nmax` sections with worst value of key `key` using criterion `criterion`.
"""
bad_sections = self._order_by_peff(key, criterion=criterion, reverse=False)
return bad_sections[:nmax]
class AbinitTimerSection:
"""Record with the timing results associated to a section of code."""
STR_FIELDS = ["name"]
NUMERIC_FIELDS = [
"wall_time",
"wall_fract",
"cpu_time",
"cpu_fract",
"ncalls",
"gflops",
]
FIELDS = tuple(STR_FIELDS + NUMERIC_FIELDS)
@classmethod
def fake(cls):
"""Return a fake section. Mainly used to fill missing entries if needed."""
return AbinitTimerSection("fake", 0.0, 0.0, 0.0, 0.0, -1, 0.0)
def __init__(self, name, cpu_time, cpu_fract, wall_time, wall_fract, ncalls, gflops):
"""
Args:
name: Name of the sections.
cpu_time: CPU time in seconds.
cpu_fract: Percentage of CPU time.
wall_time: Wall-time in seconds.
wall_fract: Percentage of wall-time.
ncalls: Number of calls
gflops: Gigaflops.
"""
self.name = name.strip()
self.cpu_time = float(cpu_time)
self.cpu_fract = float(cpu_fract)
self.wall_time = float(wall_time)
self.wall_fract = float(wall_fract)
self.ncalls = int(ncalls)
self.gflops = float(gflops)
def to_tuple(self):
"""Convert object to tuple."""
return tuple(self.__dict__[at] for at in AbinitTimerSection.FIELDS)
def to_dict(self):
"""Convert object to dictionary."""
return {at: self.__dict__[at] for at in AbinitTimerSection.FIELDS}
def to_csvline(self, with_header=False):
"""Return a string with data in CSV format. Add header if `with_header`"""
string = ""
if with_header:
string += "# " + " ".join(at for at in AbinitTimerSection.FIELDS) + "\n"
string += ", ".join(str(v) for v in self.to_tuple()) + "\n"
return string
def __str__(self):
"""String representation."""
string = ""
for a in AbinitTimerSection.FIELDS:
string += a + " = " + self.__dict__[a] + ","
return string[:-1]
class AbinitTimer:
"""Container class storing the timing results."""
def __init__(self, sections, info, cpu_time, wall_time):
"""
Args:
sections: List of sections
info: Dictionary with extra info.
cpu_time: Cpu-time in seconds.
wall_time: Wall-time in seconds.
"""
# Store sections and names
self.sections = tuple(sections)
self.section_names = tuple(s.name for s in self.sections)
self.info = info
self.cpu_time = float(cpu_time)
self.wall_time = float(wall_time)
self.mpi_nprocs = int(info["mpi_nprocs"])
self.omp_nthreads = int(info["omp_nthreads"])
self.mpi_rank = info["mpi_rank"].strip()
self.fname = info["fname"].strip()
def __str__(self):
string = "file=%s, wall_time=%.1f, mpi_nprocs=%d, omp_nthreads=%d" % (
self.fname,
self.wall_time,
self.mpi_nprocs,
self.omp_nthreads,
)
# string += ", rank = " + self.mpi_rank
return string
@property
def ncpus(self):
"""Total number of CPUs employed."""
return self.mpi_nprocs * self.omp_nthreads
def get_section(self, section_name):
"""Return section associated to `section_name`."""
try:
idx = self.section_names.index(section_name)
except Exception:
raise
sect = self.sections[idx]
assert sect.name == section_name
return sect
def to_csv(self, fileobj=sys.stdout):
"""Write data on file fileobj using CSV format."""
openclose = is_string(fileobj)
if openclose:
fileobj = open(fileobj, "w") # pylint: disable=R1732
for idx, section in enumerate(self.sections):
fileobj.write(section.to_csvline(with_header=(idx == 0)))
fileobj.flush()
if openclose:
fileobj.close()
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [
list(AbinitTimerSection.FIELDS),
]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
# Maintain old API
totable = to_table
def get_dataframe(self, sort_key="wall_time", **kwargs):
"""
Return a pandas DataFrame with entries sorted according to `sort_key`.
"""
import pandas as pd
frame = pd.DataFrame(columns=AbinitTimerSection.FIELDS)
for osect in self.order_sections(sort_key):
frame = frame.append(osect.to_dict(), ignore_index=True)
# Monkey patch
frame.info = self.info
frame.cpu_time = self.cpu_time
frame.wall_time = self.wall_time
frame.mpi_nprocs = self.mpi_nprocs
frame.omp_nthreads = self.omp_nthreads
frame.mpi_rank = self.mpi_rank
frame.fname = self.fname
return frame
def get_values(self, keys):
"""
Return a list of values associated to a particular list of keys.
"""
if is_string(keys):
return [s.__dict__[keys] for s in self.sections]
values = []
for k in keys:
values.append([s.__dict__[k] for s in self.sections])
return values
def names_and_values(self, key, minval=None, minfract=None, sorted=True):
"""
Select the entries whose value[key] is >= minval or whose fraction[key] is >= minfract
Return the names of the sections and the corresponding values.
"""
values = self.get_values(key)
names = self.get_values("name")
new_names, new_values = [], []
other_val = 0.0
if minval is not None:
assert minfract is None
for n, v in zip(names, values):
if v >= minval:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minval " + str(minval))
new_values.append(other_val)
elif minfract is not None:
assert minval is None
total = self.sum_sections(key)
for n, v in zip(names, values):
if v / total >= minfract:
new_names.append(n)
new_values.append(v)
else:
other_val += v
new_names.append("below minfract " + str(minfract))
new_values.append(other_val)
else:
# all values
new_names, new_values = names, values
if sorted:
# Sort new_values and rearrange new_names.
nandv = list(zip(new_names, new_values))
nandv.sort(key=lambda t: t[1])
new_names, new_values = [n[0] for n in nandv], [n[1] for n in nandv]
return new_names, new_values
def _reduce_sections(self, keys, operator):
return operator(self.get_values(keys))
def sum_sections(self, keys):
"""Sum value of keys."""
return self._reduce_sections(keys, sum)
def order_sections(self, key, reverse=True):
"""Sort sections according to the value of key."""
return sorted(self.sections, key=lambda s: s.__dict__[key], reverse=reverse)
@add_fig_kwargs
def cpuwall_histogram(self, ax=None, **kwargs):
"""
Plot histogram with cpu- and wall-time on axis `ax`.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
nk = len(self.sections)
ind = np.arange(nk) # the x locations for the groups
width = 0.35 # the width of the bars
cpu_times = self.get_values("cpu_time")
rects1 = plt.bar(ind, cpu_times, width, color="r")
wall_times = self.get_values("wall_time")
rects2 = plt.bar(ind + width, wall_times, width, color="y")
# Add ylable and title
ax.set_ylabel("Time (s)")
# plt.title('CPU-time and Wall-time for the different sections of the code')
ticks = self.get_values("name")
ax.set_xticks(ind + width, ticks)
ax.legend((rects1[0], rects2[0]), ("CPU", "Wall"), loc="best")
return fig
@add_fig_kwargs
def pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
"""
Plot pie chart for this timer.
Args:
key: Keyword used to extract data from the timer.
minfract: Don't show sections whose relative weight is less that minfract.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.axis("equal")
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
ax.pie(vals, explode=None, labels=labels, autopct="%1.1f%%", shadow=True)
return fig
@add_fig_kwargs
def scatter_hist(self, ax=None, **kwargs):
"""
Scatter plot + histogram.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax, fig, plt = get_ax_fig_plt(ax=ax)
x = np.asarray(self.get_values("cpu_time"))
y = np.asarray(self.get_values("wall_time"))
# the scatter plot:
axScatter = plt.subplot(1, 1, 1)
axScatter.scatter(x, y)
axScatter.set_aspect("auto")
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(), visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation="horizontal")
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these axis.
# axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
# axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
# plt.draw()
return fig
| |
#Author: Miguel Molero <miguel.molero@gmail.com>
import numpy as np
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal as Signal
from vtk import vtkInteractorStyleImage, vtkInteractorStyleTrackballCamera, vtkActor, vtkImageActor
from vtk import vtkAreaPicker, vtkContourWidget, vtkOrientedGlyphContourRepresentation, vtkImplicitSelectionLoop
from vtk import vtkCleanPolyData, vtkImplicitBoolean, vtkDataSetSurfaceFilter, vtkExtractGeometry, vtkExtractPolyDataGeometry
from ..graphics.QVTKWidget import QVTKWidget
from ..utils.qhelpers import *
from ..ManagerLayer import ManagerLayer
from pcloudpy.core.utils.vtkhelpers import actor_from_polydata
class QVTKWidgetKeyEvents(QVTKWidget):
def __init__(self, parent = None):
super(QVTKWidgetKeyEvents, self).__init__(parent)
keyEventRequested = Signal(int)
def keyPressEvent(self, ev):
super(QVTKWidgetKeyEvents, self).keyPressEvent(ev)
self.keyEventRequested.emit(ev.key())
class ViewWidget(QWidget):
def __init__(self, parent=None):
super(ViewWidget,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self._is_extract = False
self._contour_widget = None
self.manager_layer = ManagerLayer()
self._current_layer = None
layout = QVBoxLayout()
layout.setContentsMargins(0,0,0,0)
self.toolbar = QToolBar()
self.toolbar.setStyleSheet("""
QToolBar { border: 0px }
""")
self.toolbar.setIconSize(QSize(16,16))
self.vtkWidget = QVTKWidgetKeyEvents(self)
self.vtkWidget.keyEventRequested.connect(self.key_press_event)
layout.addWidget(self.toolbar)
layout.addWidget(self.vtkWidget)
self.setLayout(layout)
self.setup_toolbar()
self.init_model()
layersModified = Signal()
def setup_toolbar(self):
self.reset_view_action = QAction(QIcon(":/pqResetCamera32.png"), "Reset View/Camera", self)
self.reset_view_action.setStatusTip("Reset View/Camera")
self.reset_view_action.setToolTip("Reset View/Camera")
self.reset_view_action.triggered.connect(self.vtkWidget.reset_view)
self.set_view_direction_to_mx_action = QAction(QIcon(":/pqXMinus24.png"), "View Direction -x", self)
self.set_view_direction_to_mx_action.setStatusTip("View Direction -x")
self.set_view_direction_to_mx_action.setToolTip("View Direction -x")
self.set_view_direction_to_mx_action.triggered.connect(self.vtkWidget.viewMX)
self.set_view_direction_to_my_action = QAction(QIcon(":/pqYMinus24.png"), "View Direction -y", self)
self.set_view_direction_to_my_action.setStatusTip("View Direction -y")
self.set_view_direction_to_my_action.setToolTip("View Direction -y")
self.set_view_direction_to_my_action.triggered.connect(self.vtkWidget.viewMY)
self.set_view_direction_to_mz_action = QAction(QIcon(":/pqZMinus24.png"), "View Direction -z", self)
self.set_view_direction_to_mz_action.setStatusTip("View Direction -z")
self.set_view_direction_to_mz_action.setToolTip("View Direction -z")
self.set_view_direction_to_mz_action.triggered.connect(self.vtkWidget.viewMZ)
self.set_view_direction_to_px_action = QAction(QIcon(":/pqXPlus24.png"), "View Direction +x", self)
self.set_view_direction_to_px_action.setStatusTip("View Direction +x")
self.set_view_direction_to_px_action.setToolTip("View Direction +x")
self.set_view_direction_to_px_action.triggered.connect(self.vtkWidget.viewPX)
self.set_view_direction_to_py_action = QAction(QIcon(":/pqYPlus24.png"), "View Direction +y", self)
self.set_view_direction_to_py_action.setStatusTip("View Direction +y")
self.set_view_direction_to_py_action.setToolTip("View Direction +y")
self.set_view_direction_to_py_action.triggered.connect(self.vtkWidget.viewPY)
self.set_view_direction_to_pz_action = QAction(QIcon(":/pqZPlus24.png"), "View Direction +z", self)
self.set_view_direction_to_pz_action.setStatusTip("View Direction +z")
self.set_view_direction_to_pz_action.setToolTip("View Direction +z")
self.set_view_direction_to_pz_action.triggered.connect(self.vtkWidget.viewPZ)
self.selection_action = QAction(QIcon(":/pqSelectSurfPoints24.png"), "Select Points", self)
self.selection_action.setStatusTip("Select Points")
self.selection_action.setToolTip("Select Points")
self.selection_action.triggered.connect(self.select_points)
self.extract_action = QAction(QIcon(":/pqExtractSelection.png"), "Extract Selected Points", self)
self.extract_action.setStatusTip("Extract Selected Points")
self.extract_action.setToolTip("Extract Selected Points")
self.extract_action.triggered.connect(self.extract_points)
self.clean_action = createAction(self, "Clean Selected Points", self.clean_points, icon="clean.png")
self.clean_action = QAction(QIcon(":/clean.png"), "Clean Selected Points", self)
self.clean_action .setStatusTip("Clean Selected Points")
self.clean_action .setToolTip("Clean Selected Points")
self.clean_action .triggered.connect(self.clean_points)
addActions(self.toolbar, self.reset_view_action)
addActions(self.toolbar, self.set_view_direction_to_mx_action)
addActions(self.toolbar, self.set_view_direction_to_my_action)
addActions(self.toolbar, self.set_view_direction_to_mz_action)
addActions(self.toolbar, self.set_view_direction_to_px_action)
addActions(self.toolbar, self.set_view_direction_to_py_action)
addActions(self.toolbar, self.set_view_direction_to_pz_action)
self.toolbar.addSeparator()
addActions(self.toolbar, self.selection_action)
addActions(self.toolbar, self.extract_action)
addActions(self.toolbar, self.clean_action)
self.extract_action.setEnabled(False)
self.clean_action.setEnabled(False)
def init_model(self):
self.model = QStandardItemModel()
root = self.model.invisibleRootItem()
item = QStandardItem("build:")
item.setIcon(QIcon(":/pqServer16.png"))
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
root.appendRow(item)
def set_interactor_style_image(self):
self.vtkWidget.set_interactor_style(vtkInteractorStyleImage())
def set_interactor_style_trackball(self):
self.vtkWidget.set_interactor_style(vtkInteractorStyleTrackballCamera())
def add_layer(self, layer):
self.manager_layer += layer
self.add_actor(layer.get_container().get_actor())
self._current_layer = layer
def set_current_layer(self, layer):
self._current_layer =layer
def add_actor(self, actor):
if isinstance(actor, vtkActor):
self.vtkWidget.renderer.AddActor(actor)
self.set_interactor_style_trackball()
elif isinstance(actor, vtkImageActor):
self.vtkWidget.renderer.AddActor2D(actor)
self.set_interactor_style_image()
def remove_actor(self, actor):
self.vtkWidget.renderer.RemoveActor(actor)
self.update_render()
def update_render(self):
self.vtkWidget.reset_view()
self.vtkWidget.Render()
self.vtkWidget.show()
def select_points(self):
self._is_extract = not self._is_extract
if self._is_extract:
self.setCursor(QCursor(Qt.CrossCursor))
self._contour_widget = vtkContourWidget()
self._contour_widget.SetInteractor(self.vtkWidget.get_interactor())
self._contour_representation = vtkOrientedGlyphContourRepresentation()
self._contour_widget.SetRepresentation(self._contour_representation)
self._contour_representation.GetLinesProperty().SetColor(1,1,0)
self._contour_representation.GetLinesProperty().SetLineWidth(2.0)
self._contour_representation.GetLinesProperty().SetPointSize(10.0)
self._contour_representation.SetAlwaysOnTop(1)
self._contour_widget.EnabledOn()
else:
self.vtkWidget.get_interactor().GetRenderWindow().SetCurrentCursor(0)
self._contour_widget.CloseLoop()
self._contour_widget.ProcessEventsOff()
self.vtkWidget.get_interactor().GetRenderWindow().Render()
self.extract_action.setEnabled(True)
self.clean_action.setEnabled(True)
def _extract_polygon(self, PolyData, is_clean):
self._contour_widget.EnabledOff()
print("Init Extracting")
self.setCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
polydata_rep = self._contour_representation.GetContourRepresentationAsPolyData()
planes = self.get_frustrum()
normal = planes.GetNormals()
nor = np.array([0,0,0])
normal.GetTuple(5, nor)
#progressBar.setValue(10)
#QApplication.processEvents()
selection = vtkImplicitSelectionLoop()
selection.SetLoop(polydata_rep.GetPoints())
selection.SetNormal(nor[0], nor[1], nor[2])
#progressBar.setValue(20)
#QApplication.processEvents()
tip = vtkImplicitBoolean()
tip.AddFunction(selection)
tip.AddFunction(planes)
tip.SetOperationTypeToIntersection()
tip.Modified()
#progressBar.setValue(40)
#QApplication.processEvents()
if is_clean:
extractGeometry = vtkExtractPolyDataGeometry()
else:
extractGeometry = vtkExtractGeometry()
extractGeometry.SetInputData(PolyData)
extractGeometry.SetImplicitFunction(tip)
if is_clean:
extractGeometry.ExtractInsideOff()
extractGeometry.Update()
if is_clean:
clean = vtkCleanPolyData()
clean.SetInputConnection(extractGeometry.GetOutputPort())
clean.Update()
#progressBar.setValue(80)
#QApplication.processEvents()
filter = vtkDataSetSurfaceFilter()
if is_clean:
filter.SetInputConnection(clean.GetOutputPort())
else:
filter.SetInputConnection(extractGeometry.GetOutputPort())
filter.Update()
#progressBar.setValue(90)
#QApplication.processEvents()
self.setCursor(QCursor(Qt.ArrowCursor))
QApplication.processEvents()
self.extract_action.setEnabled(False)
self.clean_action.setEnabled(False)
print("End Extracting")
return filter.GetOutput()
def extract_points(self):
self._apply_extraction(is_clean=False)
def clean_points(self):
self._apply_extraction(is_clean=True)
def _apply_extraction(self, is_clean):
if self._is_extract:
self._is_extract = False
self.vtkWidget.get_interactor().GetRenderWindow().SetCurrentCursor(0)
self._contour_widget.CloseLoop()
self._contour_widget.ProcessEventsOff()
self.vtkWidget.get_interactor().GetRenderWindow().Render()
#for idx, layer in enumerate(self.manager_layer.layers()):
layer = self._current_layer
actor = layer.get_container().get_actor()
if actor.GetVisibility():
polydata = self._extract_polygon(actor.GetMapper().GetInput(), is_clean=is_clean)
self.vtkWidget.renderer.RemoveActor(actor)
actor = actor_from_polydata(polydata)
layer.get_container().update_data_from(polydata)
layer.get_container().set_actor(actor)
#todo
#update data -> numpy_from_polydata
self.add_actor(actor)
self.update_render()
def get_frustrum(self):
render = self._contour_representation.GetRenderer()
numberNodes = self._contour_representation.GetNumberOfNodes()
V = list()
for i in range(numberNodes):
v = np.array([0,0])
self._contour_representation.GetNthNodeDisplayPosition(i, v)
V.append(v)
xmin = np.min(np.array(V)[:,0])
ymin = np.min(np.array(V)[:,1])
xmax = np.max(np.array(V)[:,0])
ymax = np.max(np.array(V)[:,1])
p1 = np.array([xmin, ymax])
p2 = np.array([xmax, ymin])
picker = vtkAreaPicker()
picker.AreaPick( p1[0], p1[1], p2[0], p2[1], render)
planes = picker.GetFrustum()
return planes
def key_press_event(self, key):
if key == Qt.Key_Escape:
if isinstance(self._contour_widget, vtkContourWidget):
self.vtkWidget.get_interactor().GetRenderWindow().SetCurrentCursor(0)
self._contour_widget.ProcessEventsOff()
self._contour_widget.SetEnabled(0)
self.vtkWidget.get_interactor().GetRenderWindow().Render()
| |
from __future__ import unicode_literals
import unittest
from functools import wraps
import weakref
from elasticsearch import helpers
from elasticsearch.client import Elasticsearch
from elasticsearch.helpers import scan
from slingshot.indices_manager import IndicesManagerClient
from slingshot.exceptions import IndexDoesNotExist, SameIndex, IndexAlreadyExists, IndexNotManaged
DOCS = [
{
'_index': 'slingshot.write',
'_id': 'employee/53',
'_type': 'employee',
'_source': {
'first_name': 'John',
'last_name': 'Doe',
}
},
{
'_index': 'slingshot.write',
'_id': 'employee/57',
'_type': 'employee',
'_source': {
'first_name': 'Jane',
'last_name': 'Doe',
}
},
{
'_index': 'slingshot.write',
'_id': 'organization/3',
'_type': 'organization',
'_source': {
'name': 'Acme',
}
}
]
CONFIG = {
'settings': {
'number_of_shards': '1',
'number_of_replicas': '0',
},
'aliases': {
'slingshot.test_alias': {},
},
'mappings': {
'employee': {
'properties': {
'first_name': {
'type': 'string'
},
'last_name': {
'type': 'string'
}
}
},
'organization': {
'properties': {
'name': {
'type': 'string'
}
}
}
}
}
def with_managed_index(index_name, config=None, docs=None):
def wrapper(f):
@wraps(f)
def decorator(self, *args, **kwargs):
self.client.indices_manager.create(index_name, body=config)
self.client.indices.refresh(index_name)
if docs:
helpers.bulk(self.client, docs, stats_only=True)
self.client.indices.refresh(index_name)
try:
f(self, *args, **kwargs)
finally:
try:
self.client.indices.delete(index_name)
except:
pass
return decorator
return wrapper
def with_unmanaged_index(index_name, config=None, docs=None):
def wrapper(f):
@wraps(f)
def decorator(self, *args, **kwargs):
self.client.indices.create(index_name, body=config)
self.client.indices.refresh(index_name)
if docs:
helpers.bulk(self.client, docs, stats_only=True)
self.client.indices.refresh(index_name)
try:
f(self, *args, **kwargs)
finally:
self.client.indices.delete(index_name)
return decorator
return wrapper
class TestIndicesManagerClient(unittest.TestCase):
def setUp(self):
self.client = Elasticsearch()
self.client.indices_manager = IndicesManagerClient(weakref.proxy(self.client))
def tearDown(self):
pass
@with_managed_index("slingshot", CONFIG, DOCS)
def test_real_name(self):
real_names = self.client.indices_manager.real_names('slingshot.write')
self.assertEqual(1, len(real_names))
self.assertEqual([real_names[0]], self.client.indices_manager.real_names(real_names[0]))
with self.assertRaises(IndexDoesNotExist):
self.client.indices_manager.real_names('does_not_exist')
raise Exception("Bla")
@with_managed_index("slingshot", CONFIG, DOCS)
def test_has_alias(self):
self.assertTrue(self.client.indices_manager.has_alias('slingshot', 'slingshot.test_alias'))
@with_managed_index("slingshot", CONFIG, DOCS)
def test_has_read_alias(self):
self.assertTrue(self.client.indices_manager.has_read_alias('slingshot'))
@with_managed_index("slingshot", CONFIG, DOCS)
def test_has_write_alias(self):
self.assertTrue(self.client.indices_manager.has_write_alias('slingshot'))
@with_managed_index("slingshot", CONFIG, DOCS)
def test_add_alias(self):
self.client.indices_manager.add_alias('slingshot', 'slingshot.added_alias')
self.assertTrue(self.client.indices_manager.has_alias('slingshot', 'slingshot.added_alias'))
@with_managed_index("slingshot", CONFIG, DOCS)
def test_remove_alias(self):
self.client.indices_manager.remove_alias('slingshot', 'slingshot.test_alias')
self.assertFalse(self.client.indices_manager.has_alias('slingshot', 'slingshot.test_alias'))
@with_managed_index("slingshot", CONFIG, DOCS)
def test_rename_alias(self):
self.client.indices_manager.rename_alias('slingshot', 'slingshot.test_alias', 'slingshot.renamed_alias')
self.assertFalse(self.client.indices_manager.has_alias('slingshot', 'slingshot.test_alias'))
self.assertTrue(self.client.indices_manager.has_alias('slingshot', 'slingshot.renamed_alias'))
@with_managed_index("slingshot", CONFIG, DOCS)
@with_unmanaged_index('slingshot_tmp')
def test_move_alias(self):
self.client.indices_manager.move_alias('slingshot', 'slingshot_tmp', 'slingshot.test_alias')
self.assertFalse(self.client.indices_manager.has_alias('slingshot', 'slingshot.test_alias'))
self.assertTrue(self.client.indices_manager.has_alias('slingshot_tmp', 'slingshot.test_alias'))
@with_managed_index("slingshot", CONFIG, DOCS)
@with_unmanaged_index('slingshot_tmp')
def test_swap_alias_when_alias_exists(self):
self.client.indices_manager.swap_alias('slingshot', 'slingshot_tmp', 'slingshot.test_alias')
self.assertFalse(self.client.indices_manager.has_alias('slingshot', 'slingshot.test_alias'))
self.assertTrue(self.client.indices_manager.has_alias('slingshot_tmp', 'slingshot.test_alias'))
@with_managed_index("slingshot", CONFIG, DOCS)
@with_unmanaged_index('slingshot_tmp')
def test_swap_alias_when_alias_does_not_exist(self):
self.assertFalse(self.client.indices_manager.has_alias('slingshot', 'slingshot.added_alias'))
self.client.indices_manager.swap_alias('slingshot', 'slingshot_tmp', 'slingshot.added_alias')
self.assertTrue(self.client.indices_manager.has_alias('slingshot_tmp', 'slingshot.added_alias'))
@with_unmanaged_index('slingshot_tmp')
def test_copy_pre_conditions(self):
with self.assertRaises(SameIndex):
self.client.indices_manager.copy('slingshot_tmp', 'slingshot_tmp')
with self.assertRaises(IndexDoesNotExist):
self.client.indices_manager.copy('slingshot_tmp', 'slingshot')
with self.assertRaises(IndexDoesNotExist):
self.client.indices_manager.copy('slingshot', 'slingshot_tmp')
@with_managed_index("slingshot", CONFIG, DOCS)
@with_unmanaged_index('slingshot_tmp')
def test_copy(self):
self.client.indices_manager.copy('slingshot', 'slingshot_tmp')
self.client.indices.refresh('slingshot_tmp')
self.assertEqual(self.client.count('slingshot')['count'], self.client.count('slingshot_tmp')['count'])
@with_managed_index("slingshot", CONFIG, DOCS)
@with_unmanaged_index('slingshot_tmp')
def test_parallel_copy(self):
self.client.indices_manager.copy('slingshot', 'slingshot_tmp', parallel=True, bulk_kwargs={'chunk_size': 2})
self.client.indices.refresh('slingshot_tmp')
self.assertEqual(self.client.count('slingshot')['count'], self.client.count('slingshot_tmp')['count'])
@with_managed_index("slingshot", CONFIG, DOCS)
@with_unmanaged_index('slingshot_tmp')
def test_copy_with_transform(self):
def rename_name_to_legal_name(doc):
if doc['_type'] == 'organization':
doc['_source']['legal_name'] = doc['_source'].pop('name')
return doc
return None
self.client.indices_manager.copy('slingshot', 'slingshot_tmp', transform=rename_name_to_legal_name)
self.client.indices.refresh('slingshot_tmp')
docs = list(scan(self.client, index='slingshot_tmp'))
self.assertEqual(1, len(docs))
for doc in docs:
self.assertFalse('name' in doc['_source'])
self.assertTrue('legal_name' in doc['_source'])
@with_managed_index("slingshot", CONFIG, DOCS)
@with_unmanaged_index('slingshot_tmp')
def test_copy_with_ignore_types(self):
self.client.indices_manager.copy('slingshot', 'slingshot_tmp', ignore_types=['employee'])
self.client.indices.refresh('slingshot_tmp')
self.assertEqual(1, self.client.count('slingshot_tmp')['count'])
def test_create(self):
try:
self.client.indices_manager.create('slingshot')
self.assertTrue(self.client.indices_manager.has_read_alias('slingshot'))
self.assertTrue(self.client.indices_manager.has_write_alias('slingshot'))
self.assertTrue(self.client.indices_manager.is_managed('slingshot'))
finally:
self.client.indices.delete('slingshot')
@with_unmanaged_index("slingshot")
def test_create_preconditions(self):
with self.assertRaises(IndexAlreadyExists):
self.client.indices_manager.create('slingshot')
@with_unmanaged_index('slingshot')
def test_manage(self):
self.assertFalse(self.client.indices_manager.is_managed('slingshot'))
self.client.indices_manager.manage('slingshot')
self.assertTrue(self.client.indices_manager.is_managed('slingshot'))
@with_managed_index("slingshot", CONFIG, DOCS)
def test_migrate(self):
real_names = self.client.indices_manager.real_names('slingshot')
self.assertEqual(3, self.client.count('slingshot')['count'])
self.client.indices_manager.migrate('slingshot', CONFIG)
self.assertNotEqual(real_names, self.client.indices_manager.real_names('slingshot'))
self.client.indices.refresh('slingshot')
self.assertEqual(3, self.client.count('slingshot')['count'])
self.client.indices.refresh('slingshot')
@with_unmanaged_index("slingshot")
def manage_migrate_pre_conditions(self):
with self.assertRaises(IndexDoesNotExist):
self.client.indices_manager.migrate('does_not_exist')
with self.assertRaises(IndexNotManaged):
self.client.indices_manager.migrate('slingshot')
| |
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
This module deals with the creation of ISO jobs. It can be used for all
ISO types.
AAB
"""
from thelma.tools.base import BaseTool
from thelma.tools.utils.base import is_valid_number
from thelma.entities.iso import ISO_TYPES
from thelma.entities.iso import IsoRequest
from thelma.entities.iso import LabIsoRequest
from thelma.entities.iso import StockSampleCreationIsoRequest
from thelma.entities.job import IsoJob
from thelma.entities.user import User
__docformat__ = 'reStructuredText en'
__all__ = ['IsoJobCreator',
'IsoProvider']
class IsoJobCreator(BaseTool):
"""
Creates, copies or populates ISOs for an ISO request and summarises them
in an ISO job. The class is abstract - however, sub class only need to
provide the :class:`IsoProvider` class.
**Return Value:** :class:`thelma.entities.job.IsoJob` with all new ISOs
"""
NAME = 'ISO Job Creator'
#: The supported ISO type (see :class:`thelma.entities.iso.ISO_TYPES`).
_ISO_TYPE = None
__ISO_REQUEST_CLS = {ISO_TYPES.LAB : LabIsoRequest,
ISO_TYPES.STOCK_SAMPLE_GENERATION : StockSampleCreationIsoRequest}
def __init__(self, iso_request, job_owner, number_isos,
excluded_racks=None, requested_tubes=None, parent=None):
"""
Constructor.
:param iso_request: The ISO request that will take up the ISOs.
:type iso_request: :class:`thelma.entities.iso.IsoRequest` subclass
:param job_owner: The job owner will be set as user for the ISO job.
:type job_owner: :class:`thelma.entities.user.User`
:param int number_isos: The number of ISOs ordered (positive number).
:param excluded_racks: A list of barcodes from stock racks that shall
not be used for stock sample picking.
:type excluded_racks: A list of rack barcodes
:param requested_tubes: A list of barcodes from stock tubes that are
supposed to be used.
:type requested_tubes: A list of tube barcodes.
"""
BaseTool.__init__(self, parent=parent)
#: The ISO request that will take up the ISOs.
self.iso_request = iso_request
#: The job owner will be set as user for the ISO job.
self.job_owner = job_owner
#: The number of ISOs ordered.
self.number_isos = number_isos
#: A list of barcodes from stock racks that shall not be used for
#: stock sample (molecule design pool) picking.
self.excluded_racks = excluded_racks
if excluded_racks is None:
self.excluded_racks = []
if requested_tubes is None:
requested_tubes = []
#: A list of barcodes from stock tubes that are supposed to be used
#: (for fixed positions).
self.requested_tubes = requested_tubes
#: The ISOs for the new job.
self._isos = None
#: The new ISO job.
self._iso_job = None
def reset(self):
BaseTool.reset(self)
self._isos = None
self._iso_job = None
def run(self):
self.reset()
self.add_info('Start ISO job generation ...')
self._check_input()
if not self.has_errors():
self._get_isos()
if not self.has_errors():
self.__create_iso_job()
if not self.has_errors():
self.return_value = self._iso_job
self.add_info('ISO job generation completed.')
def _check_input(self):
"""
Checks the initialisation values.
"""
self.add_debug('Check initialisation values ...')
self._check_input_class('ISO request', self.iso_request,
self.__ISO_REQUEST_CLS[self._ISO_TYPE])
self._check_input_class('job owner', self.job_owner, User)
if not is_valid_number(self.number_isos, is_integer=True):
msg = 'The number of ISOs order must be a positive integer ' \
'(obtained: %s).' % (self.number_isos)
self.add_error(msg)
self._check_input_list_classes('excluded rack', self.excluded_racks,
basestring, may_be_empty=True)
self._check_input_list_classes('requested tube', self.requested_tubes,
basestring, may_be_empty=True)
def _get_isos(self):
"""
Creates or populates the request number of ISOs (depending on the
ISO type).
"""
raise NotImplementedError('Abstract method.')
def _get_iso_provider_keywords(self):
"""
Returns the keyword dictionary for the ISO providing tool.
"""
return dict(iso_request=self.iso_request,
number_isos=self.number_isos,
excluded_racks=self.excluded_racks,
requested_tubes=self.requested_tubes,
parent=self)
def __create_iso_job(self):
"""
Creates an :class:`IsoJob` summarizing the ISOs. The label for
the job is derived from the ISO request label.
"""
self.add_debug('Create ISO job ...')
job_label = self._get_job_label()
number_stock_racks = self._get_number_stock_racks()
worklist_series = self._create_iso_job_worklist_series()
self._iso_job = IsoJob(job_label, self.job_owner, self._isos,
number_stock_racks,
worklist_series=worklist_series)
self._create_iso_job_racks()
def _get_job_label(self):
"""
Returns the label for the new job.
"""
raise NotImplementedError('Abstract method.')
def _get_number_stock_racks(self):
"""
Returns the (maximum) number of stock racks expected for this ISO job.
"""
raise NotImplementedError('Abstract method.')
def _create_iso_job_racks(self):
"""
Creates plates and ISOs specific to an ISO job. By default we do not
create any racks.
"""
pass
def _create_iso_job_worklist_series(self):
"""
Creates the worklist series containing the worklists that are specific
to the ISO job. By default, there is no worklist series.
"""
return None
class IsoProvider(BaseTool):
"""
Creates, copies or populates ISOs for an ISO request. This includes
tube picking, layout generation and in some cases also worklist generation.
There are different subclass for the different :class:`ISO_TYPES`.
**Return Value:** depends on the subclass
"""
#: The supported ISO type (see :class:`thelma.entities.iso.ISO_TYPES`).
_ISO_TYPE = None
__ISO_REQUEST_CLS = {ISO_TYPES.LAB : LabIsoRequest,
ISO_TYPES.STOCK_SAMPLE_GENERATION : StockSampleCreationIsoRequest}
def __init__(self, iso_request, number_isos,
excluded_racks=None, requested_tubes=None, parent=None):
"""
Constructor.
:param iso_request: The ISO request containing the ISO layout for the
ISO (and experiment metadata with the molecule design pools).
:type iso_request: :class:`thelma.entities.iso.IsoRequest`
:param int number_isos: The number of ISOs ordered.
:param excluded_racks: A list of barcodes from stock racks that shall
not be used for stock sample picking.
:type excluded_racks: A list of rack barcodes
:param requested_tubes: A list of barcodes from stock tubes that are
supposed to be used.
:type requested_tubes: A list of tube barcodes.
"""
BaseTool.__init__(self, parent=parent)
#: The ISO request defining the ISO layout
#: (:class:`thelma.entities.iso.IsoRequest`)
self.iso_request = iso_request
#: The number of ISOs ordered.
self.number_isos = number_isos
#: A list of barcodes from stock racks that shall not be used for
#: stock sample (molecule design pool) picking.
self.excluded_racks = excluded_racks
if excluded_racks is None:
self.excluded_racks = []
if requested_tubes is None:
requested_tubes = []
#: A list of barcodes from stock tubes that are supposed to be used
#: (for fixed positions).
self.requested_tubes = requested_tubes
def run(self):
self.add_info('Start ISO request analysis ...')
self.reset()
self._check_input()
if not self.has_errors():
self._collect_iso_data()
if not self.has_errors():
self.add_info('ISO preparation completed.')
def _check_input(self):
"""
Checks the initialisation values.
"""
self.add_debug('Check initialization values ...')
if self._check_input_class('ISO request', self.iso_request,
IsoRequest):
iso_type = self.iso_request.iso_type
if not self.__ISO_REQUEST_CLS.has_key(iso_type):
msg = 'Unsupported ISO type "%s"!' % (iso_type)
self.add_error(msg)
else:
ir_cls = self.__ISO_REQUEST_CLS[self._ISO_TYPE]
self._check_input_class('ISO request', self.iso_request,
ir_cls)
if not is_valid_number(self.number_isos, is_integer=True):
msg = 'The number of ISOs order must be a positive integer ' \
'(obtained: %s).' % (self.number_isos)
self.add_error(msg)
self._check_input_list_classes('excluded rack', self.excluded_racks,
basestring, may_be_empty=True)
self._check_input_list_classes('requested tube', self.requested_tubes,
basestring, may_be_empty=True)
def _collect_iso_data(self):
"""
Does the actual generation or population job. Includes tube picking.
"""
raise NotImplementedError('Abstract method.')
| |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytz
import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isna, date_range,
MultiIndex, Index, Timestamp, NaT, IntervalIndex)
from pandas.compat import range
from pandas._libs.tslib import iNaT
from pandas.core.series import remove_na
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from .common import TestData
try:
import scipy
_is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
except:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.Akima1DInterpolator missing')
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestSeriesMissingData(TestData):
def test_remove_na_deprecation(self):
# see gh-16971
with tm.assert_produces_warning(FutureWarning):
remove_na(Series([]))
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(NaT)
expected = Series([NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
result = s.fillna(NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
null_loc = pd.Series([False, True, False, True])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, {0}]'.format(tz)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(pd.Timestamp('2011-01-02 10:00',
tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
tm.assert_series_equal(expected, result)
tm.assert_series_equal(pd.isna(s), null_loc)
# with timezone
# GH 15855
df = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='pad'), exp)
df = pd.Series([pd.NaT, pd.Timestamp('2012-11-11 00:00:00+01:00')])
exp = pd.Series([pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')])
assert_series_equal(df.fillna(method='bfill'), exp)
def test_fillna_consistency(self):
# GH 16402
# fillna with a tz aware to a tz-naive, should result in object
s = Series([Timestamp('20130101'), pd.NaT])
result = s.fillna(Timestamp('20130101', tz='US/Eastern'))
expected = Series([Timestamp('20130101'),
Timestamp('2013-01-01', tz='US/Eastern')],
dtype='object')
assert_series_equal(result, expected)
# where (we ignore the raise_on_error)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
raise_on_error=False)
assert_series_equal(result, expected)
result = s.where([True, False],
Timestamp('20130101', tz='US/Eastern'),
raise_on_error=True)
assert_series_equal(result, expected)
# with a non-datetime
result = s.fillna('foo')
expected = Series([Timestamp('20130101'),
'foo'])
assert_series_equal(result, expected)
# assignment
s2 = s.copy()
s2[1] = 'foo'
assert_series_equal(s2, expected)
def test_datetime64tz_fillna_round_issue(self):
# GH 14872
data = pd.Series([pd.NaT, pd.NaT,
datetime(2016, 12, 12, 22, 24, 6, 100001,
tzinfo=pytz.utc)])
filled = data.fillna(method='bfill')
expected = pd.Series([datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc),
datetime(2016, 12, 12, 22, 24, 6,
100001, tzinfo=pytz.utc)])
assert_series_equal(filled, expected)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
s = pd.Series([1., np.nan])
result = s.fillna(0, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
s = pd.Series([1., np.nan])
result = s.fillna({1: 0}, downcast='infer')
expected = pd.Series([1, 0])
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
pytest.raises(TypeError, s.fillna, [1, 2])
pytest.raises(TypeError, s.fillna, (1, 2))
# related GH 9217, make sure limit is an int and greater than 0
s = Series([1, 2, 3, None])
for limit in [-1, 0, 1., 2.]:
for method in ['backfill', 'bfill', 'pad', 'ffill', None]:
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_isna_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_na', True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
@tm.capture_stdout
def test_isnull_for_inf_deprecated(self):
# gh-17115
s = Series(['a', np.inf, np.nan, 1.0])
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
pd.set_option('mode.use_inf_as_null', True)
r = s.isna()
dr = s.dropna()
pd.reset_option('mode.use_inf_as_null')
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
pytest.raises(ValueError, ts.fillna)
pytest.raises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
assert 'ffil' in str(inst)
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_ffill_mixed_dtypes_without_missing_data(self):
# GH14956
series = pd.Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])
result = series.ffill()
assert_series_equal(series, result)
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0].value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
td1[1] = iNaT
assert isna(td1[1])
assert td1[1].value == iNaT
td1[1] = td[1]
assert not isna(td1[1])
td1[2] = NaT
assert isna(td1[2])
assert td1[2].value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
assert len(s.dropna()) == 0
s.dropna(inplace=True)
assert len(s) == 0
# invalid axis
pytest.raises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
tm.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
assert s.dtype == 'datetime64[ns, Asia/Tokyo]'
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
assert result.dtype == 'datetime64[ns, Asia/Tokyo]'
tm.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
tm.assert_series_equal(result, s)
assert result is not s
s2 = s.copy()
s2.dropna(inplace=True)
tm.assert_series_equal(s2, s)
def test_dropna_intervals(self):
s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays(
[np.nan, 0, 1, 2],
[np.nan, 1, 2, 3]))
result = s.dropna()
expected = s.iloc[1:]
assert_series_equal(result, expected)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notna(ts)])
def test_isna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isna(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isna(), Series([False, False, True]).values)
def test_notna(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notna(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notna(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
assert np.isnan(x[0]), np.isnan(expected[0])
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
# neither monotonic increasing or decreasing
rng2 = rng[[1, 0, 2]]
pytest.raises(ValueError, rng2.get_indexer, rng, method='pad')
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
assert result.name == self.ts.name
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
assert ts.name == name
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
class TestSeriesInterpolateData(TestData):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
tm.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
pytest.raises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with pytest.raises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected = Series([1, 3., 6.823529, 12., 18.058824, 25.])
else:
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
# GH 9217, make sure limit is an int and greater than 0
methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial', None,
'from_derivatives', 'pchip', 'akima']
s = pd.Series([1, 2, np.nan, np.nan, 5])
for limit in [-1, 0, 1., 2.]:
for method in methods:
with pytest.raises(ValueError):
s.interpolate(limit=limit, method=method)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_unlimited(self):
# these test are for issue #16282 default Limit=None is unlimited
s = Series([np.nan, 1., 3., np.nan, np.nan, np.nan, 11., np.nan])
expected = Series([1., 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([np.nan, 1., 3., 5., 7., 9., 11., 11.])
result = s.interpolate(method='linear',
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([1., 1., 3., 5., 7., 9., 11., np.nan])
result = s.interpolate(method='linear',
limit_direction='backward')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
pytest.raises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
pytest.raises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with pytest.raises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with pytest.raises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with pytest.raises(ValueError):
s.interpolate(method='polynomial')
with pytest.raises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', min_version='0.15',
app='setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
assert (s.interpolate(method='spline', order=3, s=0)[5] !=
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
def test_spline_error(self):
# see gh-10633
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with pytest.raises(ValueError):
s.interpolate(method='spline')
with pytest.raises(ValueError):
s.interpolate(method='spline', order=0)
def test_interp_timedelta64(self):
# GH 6424
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 3]))
result = df.interpolate(method='time')
expected = Series([1., 2., 3.],
index=pd.to_timedelta([1, 2, 3]))
assert_series_equal(result, expected)
# test for non uniform spacing
df = Series([1, np.nan, 3],
index=pd.to_timedelta([1, 2, 4]))
result = df.interpolate(method='time')
expected = Series([1., 1.666667, 3.],
index=pd.to_timedelta([1, 2, 4]))
assert_series_equal(result, expected)
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).sort_values()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()
result = ts.reindex(new_index).interpolate(method='time')
tm.assert_numpy_array_equal(result.values, exp.values)
| |
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from distutils.version import LooseVersion
from f5.bigip.tm.gtm.pool import A
from f5.bigip.tm.gtm.pool import Aaaa
from f5.bigip.tm.gtm.pool import Cname
from f5.bigip.tm.gtm.pool import MembersResource_v11
from f5.bigip.tm.gtm.pool import MembersResourceA
from f5.bigip.tm.gtm.pool import MembersResourceAAAA
from f5.bigip.tm.gtm.pool import MembersResourceCname
from f5.bigip.tm.gtm.pool import MembersResourceMx
from f5.bigip.tm.gtm.pool import MembersResourceNaptr
from f5.bigip.tm.gtm.pool import MembersResourceSrv
from f5.bigip.tm.gtm.pool import Mx
from f5.bigip.tm.gtm.pool import Naptr
from f5.bigip.tm.gtm.pool import Pool
from f5.bigip.tm.gtm.pool import Srv
from f5.sdk_exception import URICreationCollision
import mock
import pytest
from requests.exceptions import HTTPError
from six import iteritems
GTM_SERVER = 'fake_serv1'
GTM_VS = 'fakeVS'
RES_NAME = GTM_SERVER + ':' + GTM_VS
WIDEIPNAME = 'fake.wide.net'
TESTDESCRIPTION = "TESTDESCRIPTION"
# Dependencies setup to be shared between v11 and v12 tests
class MockResponse(object):
def __init__(self, attr_dict):
self.__dict__ = attr_dict
def json(self):
return self.__dict__
def delete_gtm_server(mgmt_root, name):
try:
foo = mgmt_root.tm.gtm.servers.server.load(
name=name)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def delete_dc(mgmt_root, name, partition):
try:
delete_gtm_server(mgmt_root, GTM_SERVER)
foo = mgmt_root.tm.gtm.datacenters.datacenter.load(
name=name, partition=partition
)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def create_dc(request, mgmt_root, name, partition):
def teardown():
delete_dc(mgmt_root, name, partition)
dc = mgmt_root.tm.gtm.datacenters.datacenter.create(
name=name, partition=partition)
request.addfinalizer(teardown)
return dc
def setup_gtm_server(request, mgmt_root, name, partition, **kwargs):
def teardown():
delete_gtm_server(mgmt_root, name)
dc = create_dc(request, mgmt_root, 'dc1', partition)
serv1 = mgmt_root.tm.gtm.servers.server.create(
name=name, datacenter=dc.name,
**kwargs)
request.addfinalizer(teardown)
return serv1
def delete_gtm_vs(mgmt_root, name):
s1 = mgmt_root.tm.gtm.servers.server.load(name=GTM_SERVER)
try:
foo = s1.virtual_servers_s.virtual_server.load(
name=name)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_gtm_vs(request, mgmt_root, name, destination, **kwargs):
def teardown():
delete_gtm_vs(mgmt_root, name)
s1 = setup_gtm_server(request, mgmt_root, GTM_SERVER, 'Common', **kwargs)
vs = s1.virtual_servers_s.virtual_server.create(
name=name, destination=destination)
request.addfinalizer(teardown)
return vs
def delete_wideip_v12(mgmt_root, name):
try:
foo = mgmt_root.tm.gtm.wideips.a_s.a.load(
name=name)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_wideip_v12(request, mgmt_root, name, **kwargs):
def teardown():
delete_wideip_v12(mgmt_root, name)
wideip1 = mgmt_root.tm.gtm.wideips.a_s.a.create(
name=name, **kwargs)
request.addfinalizer(teardown)
return wideip1
# Start of V12.x Tests here
# Helper class to limit code repetition
class HelperTest(object):
def __init__(self, collection_name):
self.partition = 'Common'
self.lowered = collection_name.lower()
self.test_name = 'fakepool_' + self.urielementname()
self.poolkinds = {'a': 'tm:gtm:pool:a:astate',
'aaaa': 'tm:gtm:pool:aaaa:aaaastate',
'cname': 'tm:gtm:pool:cname:cnamestate',
'mx': 'tm:gtm:pool:mx:mxstate',
'naptr': 'tm:gtm:pool:naptr:naptrstate',
'srv': 'tm:gtm:pool:srv:srvstate'}
self.memkinds = {'a': 'tm:gtm:pool:a:members:membersstate',
'aaaa': 'tm:gtm:pool:aaaa:members:membersstate',
'cname': 'tm:gtm:pool:cname:members:membersstate',
'mx': 'tm:gtm:pool:mx:members:membersstate',
'naptr': 'tm:gtm:pool:naptr:members:membersstate',
'srv': 'tm:gtm:pool:srv:members:membersstate'}
def urielementname(self):
if self.lowered[-2:] == '_s':
endind = 2
else:
endind = 1
return self.lowered[:-endind]
def delete_resource(self, resource):
try:
foo = resource.load(name=self.test_name, partition=self.partition)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_test(self, request, mgmt_root, **kwargs):
def teardown():
self.delete_resource(resource)
resourcecollection =\
getattr(getattr(getattr(mgmt_root.tm, 'gtm'), 'pools'),
self.lowered)
resource = getattr(resourcecollection, self.urielementname())
self.delete_resource(resource)
created = resource.create(name=self.test_name,
partition=self.partition,
**kwargs)
request.addfinalizer(teardown)
return created, resourcecollection
def test_MCURDL(self, request, mgmt_root, **kwargs):
# Testing create
pool, rescollection = self.setup_test(request, mgmt_root, **kwargs)
assert pool.name == self.test_name
assert pool.fullPath == '/Common/'+self.test_name
assert pool.generation and isinstance(pool.generation, int)
assert pool.kind == self.poolkinds[self.urielementname()]
# Testing update
pool.description = TESTDESCRIPTION
pool.update()
if hasattr(pool, 'description'):
assert pool.description == TESTDESCRIPTION
# Testing refresh
pool.description = ''
pool.refresh()
if hasattr(pool, 'description'):
assert pool.description == TESTDESCRIPTION
# Testing modify
meta_data = pool.__dict__.pop('_meta_data')
start_dict = copy.deepcopy(pool.__dict__)
pool.__dict__['_meta_data'] = meta_data
pool.modify(description='MODIFIED')
desc = 'description'
for k, v in iteritems(pool.__dict__):
if k != desc:
start_dict[k] = pool.__dict__[k]
assert getattr(pool, k) == start_dict[k]
elif k == desc:
assert getattr(pool, desc) == 'MODIFIED'
# Testing load
p2 = getattr(rescollection, self.urielementname())
pool2 = p2.load(partition=self.partition, name=self.test_name)
assert pool.selfLink == pool2.selfLink
def test_collection(self, request, mgmt_root, **kwargs):
pool, rescollection = self.setup_test(request, mgmt_root, **kwargs)
assert pool.name == self.test_name
assert pool.fullPath == '/Common/'+self.test_name
assert pool.generation and isinstance(pool.generation, int)
assert pool.kind == self.poolkinds[self.urielementname()]
coll = rescollection.get_collection()
assert isinstance(coll, list)
assert len(coll)
if self.lowered == 'a_s':
assert isinstance(coll[0], A)
elif self.lowered == 'aaaas':
assert isinstance(coll[0], Aaaa)
elif self.lowered == 'cnames':
assert isinstance(coll[0], Cname)
elif self.lowered == 'mxs':
assert isinstance(coll[0], Mx)
elif self.lowered == 'naptrs':
assert isinstance(coll[0], Naptr)
elif self.lowered == 'srvs':
assert isinstance(coll[0], Srv)
def setup_members_test(self, request, mgmt_root, **kwargs):
pool, rescollection = self.setup_test(request, mgmt_root, **kwargs)
mem_coll = pool.members_s
if isinstance(pool, A):
setup_gtm_vs(request, mgmt_root, GTM_VS, '20.20.20.20:80',
addresses=[{'name': '1.1.1.1'}])
member = mem_coll.member.create(name=RES_NAME,
partition=self.partition)
elif isinstance(pool, Aaaa):
setup_gtm_vs(request, mgmt_root, GTM_VS,
'fd00:7967:71a5::.80',
addresses=[{'name': 'fda8:e5d6:5ef6::'}])
member = mem_coll.member.create(name=RES_NAME,
partition=self.partition)
elif isinstance(pool, Cname) or isinstance(pool, Mx):
setup_wideip_v12(request, mgmt_root, WIDEIPNAME,
partition=self.partition)
member = mem_coll.member.create(name=WIDEIPNAME)
elif isinstance(pool, Naptr):
setup_wideip_v12(request, mgmt_root, WIDEIPNAME,
partition=self.partition)
member = mem_coll.member.create(name=WIDEIPNAME,
flags='a', service='http')
elif isinstance(pool, Srv):
setup_wideip_v12(request, mgmt_root, WIDEIPNAME,
partition=self.partition)
member = mem_coll.member.create(name=WIDEIPNAME, port=80)
return member, mem_coll, member.name
def test_members_MCURDL(self, request, mgmt_root, **kwargs):
# Testing create
member, rescollection, name = self.setup_members_test(
request, mgmt_root, **kwargs)
assert member.name == name
assert member.generation and isinstance(member.generation, int)
assert member.kind == self.memkinds[self.urielementname()]
# Testing update
member.description = TESTDESCRIPTION
member.update()
if hasattr(member, 'description'):
assert member.description == TESTDESCRIPTION
# Testing refresh
member.description = ''
member.refresh()
if hasattr(member, 'description'):
assert member.description == TESTDESCRIPTION
# Testing modify
meta_data = member.__dict__.pop('_meta_data')
start_dict = copy.deepcopy(member.__dict__)
member.__dict__['_meta_data'] = meta_data
member.modify(description='MODIFIED')
desc = 'description'
for k, v in iteritems(member.__dict__):
if k != desc:
start_dict[k] = member.__dict__[k]
assert getattr(member, k) == start_dict[k]
elif k == desc:
assert getattr(member, desc) == 'MODIFIED'
def test_members_sucollection(self, request, mgmt_root, **kwargs):
member, rescollection, name = self.setup_members_test(
request, mgmt_root, **kwargs)
assert member.name == name
assert member.generation and isinstance(member.generation, int)
assert member.kind == self.memkinds[self.urielementname()]
coll = rescollection.get_collection()
assert isinstance(coll, list)
assert len(coll)
if self.lowered == 'a_s':
assert isinstance(coll[0], MembersResourceA)
assert rescollection.kind == \
'tm:gtm:pool:a:members:memberscollectionstate'
elif self.lowered == 'aaaas':
assert isinstance(coll[0], MembersResourceAAAA)
assert rescollection.kind == \
'tm:gtm:pool:aaaa:members:memberscollectionstate'
elif self.lowered == 'cnames':
assert isinstance(coll[0], MembersResourceCname)
assert rescollection.kind == \
'tm:gtm:pool:cname:members:memberscollectionstate'
elif self.lowered == 'mxs':
assert isinstance(coll[0], MembersResourceMx)
assert rescollection.kind == \
'tm:gtm:pool:mx:members:memberscollectionstate'
elif self.lowered == 'naptrs':
assert isinstance(coll[0], MembersResourceNaptr)
assert rescollection.kind == \
'tm:gtm:pool:naptr:members:memberscollectionstate'
elif self.lowered == 'srvs':
assert isinstance(coll[0], MembersResourceSrv)
assert rescollection.kind == \
'tm:gtm:pool:srv:members:memberscollectionstate'
def test_v12_A_AAAA_create_URICollision(self, request, mgmt_root,
**kwargs):
pool, rescollection = self.setup_test(request, mgmt_root, **kwargs)
mem_res = pool.members_s.member
mem_res._meta_data['uri'] = 'URI'
with pytest.raises(URICreationCollision) as UCCEIO:
mem_res.create(uri='URI')
assert str(UCCEIO.value) == \
"There was an attempt to assign a new uri to this " \
"resource, the _meta_data['uri'] " \
"is URI and it should not be changed."
def test_v12_A_AAAA_create_non_404(self, request, mgmt_root, **kwargs):
pool, rescollection = self.setup_test(request, mgmt_root, **kwargs)
mem_coll = pool.members_s
if isinstance(pool, A):
setup_gtm_vs(request, mgmt_root, GTM_VS, '20.20.20.20:80',
addresses=[{'name': '1.1.1.1'}])
mock_response = mock.MagicMock()
mock_response.status_code = 500
mock_response.text = 'Internal Server Error'
error = HTTPError(response=mock_response)
fake_session = mock.MagicMock(name='mock_session')
fake_session.post.side_effect = error
session = mem_coll._meta_data['bigip']._meta_data['icr_session']
mem_coll._meta_data['bigip']._meta_data['icr_session'] = \
fake_session
with pytest.raises(HTTPError) as err:
mem_coll.member.create(name=RES_NAME, partition=self.partition)
assert err.value.response.status_code == 500
mem_coll._meta_data['bigip']._meta_data['icr_session'] = session
elif isinstance(pool, Aaaa):
setup_gtm_vs(request, mgmt_root, GTM_VS,
'fd00:7967:71a5::.80',
addresses=[{'name': 'fda8:e5d6:5ef6::'}])
mock_response = mock.MagicMock()
mock_response.status_code = 500
mock_response.text = 'Internal Server Error'
error = HTTPError(response=mock_response)
fake_session = mock.MagicMock(name='mock_session')
fake_session.post.side_effect = error
session = mem_coll._meta_data['bigip']._meta_data['icr_session']
mem_coll._meta_data['bigip']._meta_data['icr_session'] = \
fake_session
with pytest.raises(HTTPError) as err:
mem_coll.member.create(name=RES_NAME, partition=self.partition)
assert err.value.response.status_code == 500
mem_coll._meta_data['bigip']._meta_data['icr_session'] = session
def test_v12_A_AAAA_create_200_OK(self, request, mgmt_root, **kwargs):
pool, rescollection = self.setup_test(request, mgmt_root, **kwargs)
mem_coll = pool.members_s
MRO = MockResponse({"kind": self.memkinds[self.urielementname()],
"selfLink":
".../~Common~testpool/members/~Common~fake"})
fake_session = mock.MagicMock(name='mock_session')
fake_session.post.return_value = MRO
session = mem_coll._meta_data['bigip']._meta_data['icr_session']
mem_coll._meta_data['bigip']._meta_data['icr_session'] = fake_session
a = mem_coll.member.create(name='fake', partition='Common')
assert a.selfLink == '.../~Common~testpool/members/~Common~fake'
assert a.kind == self.memkinds[self.urielementname()]
mem_coll._meta_data['bigip']._meta_data['icr_session'] = session
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolAtype(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('A_s')
pool.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('A_s')
pool.test_collection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolATypeSubcollMembers(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('A_s')
pool.test_members_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('A_s')
pool.test_members_sucollection(request, mgmt_root)
def test_A_create_URICollision(self, request, mgmt_root):
pool = HelperTest('A_s')
pool.test_v12_A_AAAA_create_URICollision(request, mgmt_root)
def test_A_create_non404(self, request, mgmt_root):
pool = HelperTest('A_s')
pool.test_v12_A_AAAA_create_non_404(request, mgmt_root)
def test_A_create_200OK(self, request, mgmt_root):
pool = HelperTest('A_s')
pool.test_v12_A_AAAA_create_200_OK(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolAAAAtype(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Aaaas')
pool.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Aaaas')
pool.test_collection(request, mgmt_root)
def test_A_create_URICollision(self, request, mgmt_root):
pool = HelperTest('Aaaas')
pool.test_v12_A_AAAA_create_URICollision(request, mgmt_root)
def test_A_create_non404(self, request, mgmt_root):
pool = HelperTest('Aaaas')
pool.test_v12_A_AAAA_create_non_404(request, mgmt_root)
def test_A_create_200OK(self, request, mgmt_root):
pool = HelperTest('Aaaas')
pool.test_v12_A_AAAA_create_200_OK(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolAAAATypeSubcollMembers(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Aaaas')
pool.test_members_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Aaaas')
pool.test_members_sucollection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolCnametype(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Cnames')
pool.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Cnames')
pool.test_collection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolCnameTypeSubcollMembers(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Cnames')
pool.test_members_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Cnames')
pool.test_members_sucollection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolMxstype(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Mxs')
pool.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Mxs')
pool.test_collection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolMxsTypeSubcollMembers(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Mxs')
pool.test_members_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Mxs')
pool.test_members_sucollection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolNaptrtype(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Naptrs')
pool.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Naptrs')
pool.test_collection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolNaptrTypeSubcollMembers(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Naptrs')
pool.test_members_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Naptrs')
pool.test_members_sucollection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPooSrvAtype(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Srvs')
pool.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Srvs')
pool.test_collection(request, mgmt_root)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion(
'12.0.0'),
reason='This collection exists on 12.0.0 or greater.'
)
class TestPoolSrvTypeSubcollMembers(object):
def test_MCURDL(self, request, mgmt_root):
pool = HelperTest('Srvs')
pool.test_members_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
pool = HelperTest('Srvs')
pool.test_members_sucollection(request, mgmt_root)
# End of v12.x Tests
# Start of v11.x Tests
def delete_pool(mgmt_root, name):
try:
foo = mgmt_root.tm.gtm.pools.pool.load(
name=name)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_pool_basic_test(request, mgmt_root, name, partition):
def teardown():
delete_pool(mgmt_root, name)
poolc = mgmt_root.tm.gtm.pools
pool = poolc.pool.create(name=name, partition=partition)
request.addfinalizer(teardown)
return pool, poolc
def setup_create_pool_test(request, mgmt_root, name):
def teardown():
delete_pool(mgmt_root, name)
request.addfinalizer(teardown)
def setup_create_member_test(request, mgmt_root, name):
def teardown():
delete_pool(mgmt_root, name)
request.addfinalizer(teardown)
def setup_member_basic_test(request, mgmt_root, name, partition, poolname):
def teardown():
delete_pool(mgmt_root, poolname)
setup_gtm_vs(request, mgmt_root, GTM_VS, '20.20.20.20:80',
addresses=[{'name': '1.1.1.1'}])
pool, poolcoll = setup_pool_basic_test(request, mgmt_root, poolname,
partition)
memberc = pool.members_s
member = memberc.member.create(name=name, partition=partition)
request.addfinalizer(teardown)
return member, memberc
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) >= LooseVersion(
'12.0.0'),
reason='This collection exists on 11.x'
)
class TestPools_v11(object):
def test_create_req_arg(self, request, mgmt_root):
setup_create_pool_test(request, mgmt_root, 'fake_pool')
pool1 = mgmt_root.tm.gtm.pools.pool.create(
name='fake_pool', partition='Common')
assert pool1.name == 'fake_pool'
assert pool1.generation and isinstance(pool1.generation, int)
assert pool1.kind == 'tm:gtm:pool:poolstate'
assert pool1.selfLink.startswith(
'https://localhost/mgmt/tm/gtm/pool/~Common~fake_pool')
def test_create_optional_args(self, request, mgmt_root):
setup_create_pool_test(request, mgmt_root, 'fake_pool')
pool1 = mgmt_root.tm.gtm.pools.pool.create(
name='fake_pool', partition='Common', description=TESTDESCRIPTION)
assert pool1.description == TESTDESCRIPTION
assert pool1.limitMaxBpsStatus == 'disabled'
def test_create_duplicate(self, request, mgmt_root):
setup_pool_basic_test(request, mgmt_root, 'fake_pool', 'Common')
try:
mgmt_root.tm.gtm.pools.pool.create(name='fake_pool',
partition='Common')
except HTTPError as err:
assert err.response.status_code == 409
def test_refresh(self, request, mgmt_root):
setup_pool_basic_test(request, mgmt_root, 'fake_pool', 'Common')
p1 = mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
p2 = mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
assert p1.limitMaxBpsStatus == 'disabled'
assert p2.limitMaxBpsStatus == 'disabled'
p2.update(limitMaxBpsStatus='enabled')
assert p1.limitMaxBpsStatus == 'disabled'
assert p2.limitMaxBpsStatus == 'enabled'
p1.refresh()
assert p1.limitMaxBpsStatus == 'enabled'
def test_load_no_object(self, mgmt_root):
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
assert err.value.response.status_code == 404
def test_load(self, request, mgmt_root):
setup_pool_basic_test(request, mgmt_root, 'fake_pool', 'Common')
p1 = mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
assert p1.limitMaxBpsStatus == 'disabled'
p1.limitMaxBpsStatus = 'enabled'
p1.update()
p2 = mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
assert p2.limitMaxBpsStatus == 'enabled'
def test_update(self, request, mgmt_root):
p1, sc = setup_pool_basic_test(request, mgmt_root, 'fake_pool',
'Common')
assert p1.limitMaxBpsStatus == 'disabled'
assert not hasattr(p1, 'description')
p1.update(limitMaxBpsStatus='enabled', description=TESTDESCRIPTION)
assert p1.limitMaxBpsStatus == 'enabled'
assert hasattr(p1, 'description')
assert p1.description == TESTDESCRIPTION
def test_modify(self, request, mgmt_root):
p1, sc = setup_pool_basic_test(request, mgmt_root, 'fake_pool',
'Common')
original_dict = copy.copy(p1.__dict__)
limit = 'limitMaxBpsStatus'
p1.modify(limitMaxBpsStatus='enabled')
for k, v in iteritems(original_dict):
if k != limit:
original_dict[k] = p1.__dict__[k]
elif k == limit:
assert p1.__dict__[k] == 'enabled'
def test_delete(self, request, mgmt_root):
p1, sc = setup_pool_basic_test(request, mgmt_root, 'fake_pool',
'Common')
p1.delete()
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
assert err.value.response.status_code == 404
def test_pool_collection(self, request, mgmt_root):
pool1, pcoll = setup_pool_basic_test(request, mgmt_root,
'fake_pool', 'Common')
assert pool1.fullPath == '/Common/fake_pool'
assert pool1.name == 'fake_pool'
assert pool1.generation and isinstance(pool1.generation, int)
assert pool1.kind == 'tm:gtm:pool:poolstate'
assert pool1.selfLink.startswith(
'https://localhost/mgmt/tm/gtm/pool/~Common~fake_pool')
pc = pcoll.get_collection()
assert isinstance(pc, list)
assert len(pc)
assert isinstance(pc[0], Pool)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) >= LooseVersion(
'12.0.0'),
reason='This collection exists on 11.x'
)
class TestMembersSubCollection_v11(object):
def test_create_req_arg(self, request, mgmt_root):
setup_create_member_test(request, mgmt_root, 'fake_pool')
setup_gtm_vs(request, mgmt_root, GTM_VS, '20.20.20.20:80',
addresses=[{'name': '1.1.1.1'}])
p1, pc = setup_pool_basic_test(request, mgmt_root, 'fake_pool',
'Common')
m1 = p1.members_s.member.create(name=RES_NAME, partition='Common')
uri = 'https://localhost/mgmt/tm/gtm/pool/~Common~fake_pool/' \
'members/~Common~fake_serv1:fakeVS'
assert m1.name == RES_NAME
assert m1.generation and isinstance(m1.generation, int)
assert m1.kind == 'tm:gtm:pool:members:membersstate'
assert m1.selfLink.startswith(uri)
def test_create_optional_args(self, request, mgmt_root):
setup_create_member_test(request, mgmt_root, 'fake_pool')
setup_gtm_vs(request, mgmt_root, GTM_VS, '20.20.20.20:80',
addresses=[{'name': '1.1.1.1'}])
p1, pc = setup_pool_basic_test(request, mgmt_root, 'fake_pool',
'Common')
m1 = p1.members_s.member.create(name=RES_NAME, partition='Common',
description='FancyFakeMember',
limitMaxBpsStatus='enabled',
limitMaxBps=1337)
assert m1.name == RES_NAME
assert m1.description == 'FancyFakeMember'
assert m1.limitMaxBpsStatus == 'enabled'
assert m1.limitMaxBps == 1337
def test_create_duplicate(self, request, mgmt_root):
setup_member_basic_test(request, mgmt_root, RES_NAME, 'Common',
'fake_pool')
p1 = mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
try:
p1.members_s.member.create(name=RES_NAME, partition='Common')
except HTTPError as err:
assert err.response.status_code == 409
def test_refresh(self, request, mgmt_root):
setup_member_basic_test(request, mgmt_root, RES_NAME, 'Common',
'fake_pool')
p1 = mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
m1 = p1.members_s.member.load(name=RES_NAME, partition='Common')
m2 = p1.members_s.member.load(name=RES_NAME, partition='Common')
assert m1.limitMaxBpsStatus == 'disabled'
assert m1.limitMaxBpsStatus == 'disabled'
m2.update(limitMaxBpsStatus='enabled')
assert m1.limitMaxBpsStatus == 'disabled'
assert m2.limitMaxBpsStatus == 'enabled'
m1.refresh()
assert m2.limitMaxBpsStatus == 'enabled'
def test_load_no_object(self, request, mgmt_root):
p1, pc = setup_pool_basic_test(request, mgmt_root, 'fake_pool',
'Common')
try:
p1.members_s.member.load(name=RES_NAME)
except HTTPError as err:
assert err.response.status_code == 404
def test_load(self, request, mgmt_root):
m1, mc = setup_member_basic_test(request, mgmt_root, RES_NAME,
'Common', 'fake_pool')
assert m1.name == RES_NAME
assert m1.limitMaxBpsStatus == 'disabled'
m1.limitMaxBpsStatus = 'enabled'
m1.update()
m2 = mc.member.load(name=RES_NAME, partition='Common')
assert m2.name == RES_NAME
assert m2.limitMaxBpsStatus == 'enabled'
def test_update(self, request, mgmt_root):
m1, mc = setup_member_basic_test(request, mgmt_root, RES_NAME,
'Common', 'fake_pool')
assert m1.limitMaxBpsStatus == 'disabled'
m1.update(limitMaxBpsStatus='enabled')
assert m1.limitMaxBpsStatus == 'enabled'
def test_modify(self, request, mgmt_root):
m1, mc = setup_member_basic_test(request, mgmt_root, RES_NAME,
'Common', 'fake_pool')
original_dict = copy.copy(m1.__dict__)
limit = 'limitMaxBpsStatus'
m1.modify(limitMaxBpsStatus='enabled')
for k, v in iteritems(original_dict):
if k != limit:
original_dict[k] = m1.__dict__[k]
elif k == limit:
assert m1.__dict__[k] == 'enabled'
@pytest.mark.skipif(pytest.config.getoption('--release') == '11.6.0',
reason='Due to a bug in 11.6.0 Final this test '
'fails')
def test_delete(self, request, mgmt_root):
m1, mc = setup_member_basic_test(request, mgmt_root, RES_NAME,
'Common', 'fake_pool')
m1.delete()
p1 = mgmt_root.tm.gtm.pools.pool.load(name='fake_pool')
try:
p1.members_s.member.load(name=RES_NAME)
except HTTPError as err:
assert err.response.status_code == 404
def test_member_collection(self, request, mgmt_root):
m1, mc = setup_member_basic_test(request, mgmt_root, RES_NAME,
'Common', 'fake_pool')
uri = 'https://localhost/mgmt/tm/gtm/pool/~Common~fake_pool/' \
'members/~Common~fake_serv1:fakeVS'
assert m1.name == RES_NAME
assert m1.generation and isinstance(m1.generation, int)
assert m1.kind == 'tm:gtm:pool:members:membersstate'
assert m1.selfLink.startswith(uri)
msc = mc.get_collection()
assert isinstance(msc, list)
assert len(msc)
assert isinstance(msc[0], MembersResource_v11)
| |
# Copyright 2013, Big Switch Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: KC Wang
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
port_validator = validators.validate_port_or_colon_separated_port_range
LOG = logging.getLogger(__name__)
class UpdateRule(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
required=False,
max_length=80, label=_("Description"))
protocol = forms.ChoiceField(
label=_("Protocol"), required=False,
help_text=_('Protocol for the firewall rule'))
action = forms.ChoiceField(
label=_("Action"), required=False,
help_text=_('Action for the firewall rule'))
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Source IP address or subnet'))
destination_ip_address = forms.IPField(
label=_('Destination IP Address/Subnet'),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True,
help_text=_('Destination IP address or subnet'))
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Source port (integer in [1, 65535] or range in a:b)'))
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator],
help_text=_('Destination port (integer in [1, 65535] or range'
' in a:b)'))
shared = forms.BooleanField(label=_("Shared"), required=False)
enabled = forms.BooleanField(label=_("Enabled"), required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateRule, self).__init__(request, *args, **kwargs)
protocol = kwargs['initial']['protocol']
protocol = protocol.upper() if protocol else 'ANY'
action = kwargs['initial']['action'].upper()
protocol_choices = [(protocol, protocol)]
for tup in [('TCP', _('TCP')), ('UDP', _('UDP')), ('ICMP', _('ICMP')),
('ANY', _('ANY'))]:
if tup[0] != protocol:
protocol_choices.append(tup)
self.fields['protocol'].choices = protocol_choices
action_choices = [(action, action)]
for tup in [('ALLOW', _('ALLOW')), ('DENY', _('DENY'))]:
if tup[0] != action:
action_choices.append(tup)
self.fields['action'].choices = action_choices
def handle(self, request, context):
rule_id = self.initial['rule_id']
name_or_id = context.get('name') or rule_id
if context['protocol'] == 'ANY':
context['protocol'] = None
for f in ['source_ip_address', 'destination_ip_address',
'source_port', 'destination_port']:
if not context[f]:
context[f] = None
try:
rule = api.fwaas.rule_update(request, rule_id, **context)
msg = _('Rule %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return rule
except Exception as e:
msg = (_('Failed to update rule %(name)s: %(reason)s') %
{'name': name_or_id, 'reason': e})
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdatePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(required=False,
max_length=80, label=_("Description"))
shared = forms.BooleanField(label=_("Shared"), required=False)
audited = forms.BooleanField(label=_("Audited"), required=False)
failure_url = 'horizon:project:firewalls:index'
def handle(self, request, context):
policy_id = self.initial['policy_id']
name_or_id = context.get('name') or policy_id
try:
policy = api.fwaas.policy_update(request, policy_id, **context)
msg = _('Policy %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to update policy %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class UpdateFirewall(forms.SelfHandlingForm):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
# TODO(amotoki): make UP/DOWN translatable
admin_state_up = forms.ChoiceField(choices=[(True, 'UP'), (False, 'DOWN')],
label=_("Admin State"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(UpdateFirewall, self).__init__(request, *args, **kwargs)
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list(request, tenant_id=tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy list.'))
policies = []
policy_id = kwargs['initial']['firewall_policy_id']
policy_name = [p.name for p in policies if p.id == policy_id][0]
firewall_policy_id_choices = [(policy_id, policy_name)]
for p in policies:
if p.id != policy_id:
p.set_id_as_name_if_empty()
firewall_policy_id_choices.append((p.id, p.name))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
def handle(self, request, context):
firewall_id = self.initial['firewall_id']
name_or_id = context.get('name') or firewall_id
context['admin_state_up'] = (context['admin_state_up'] == 'True')
try:
firewall = api.fwaas.firewall_update(request, firewall_id,
**context)
msg = _('Firewall %s was successfully updated.') % name_or_id
LOG.debug(msg)
messages.success(request, msg)
return firewall
except Exception as e:
msg = _('Failed to update firewall %(name)s: %(reason)s') % {
'name': name_or_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class InsertRuleToPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Insert Rule"))
insert_before = forms.ChoiceField(label=_("Before"),
required=False)
insert_after = forms.ChoiceField(label=_("After"),
required=False)
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(InsertRuleToPolicy, self).__init__(request, *args, **kwargs)
tenant_id = self.request.user.tenant_id
try:
all_rules = api.fwaas.rule_list(request, tenant_id=tenant_id)
for r in all_rules:
r.set_id_as_name_if_empty()
all_rules = sorted(all_rules, key=lambda rule: rule.name)
available_rules = [r for r in all_rules
if not r.firewall_policy_id]
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
available_choices = [(r.id, r.name) for r in available_rules]
current_choices = [(r.id, r.name) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve available rules: %s') % e
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = available_choices
self.fields['insert_before'].choices = [('', '')] + current_choices
self.fields['insert_after'].choices = [('', '')] + current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
insert_rule_id = context['firewall_rule_id']
insert_rule = api.fwaas.rule_get(request, insert_rule_id)
body = {'firewall_rule_id': insert_rule_id,
'insert_before': context['insert_before'],
'insert_after': context['insert_after']}
policy = api.fwaas.policy_insert_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully inserted to policy '
'%(policy)s.') % {
'rule': insert_rule.name or insert_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to insert rule to policy %(name)s: %(reason)s') % {
'name': policy_id, 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
class RemoveRuleFromPolicy(forms.SelfHandlingForm):
firewall_rule_id = forms.ChoiceField(label=_("Remove Rule"))
failure_url = 'horizon:project:firewalls:index'
def __init__(self, request, *args, **kwargs):
super(RemoveRuleFromPolicy, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
try:
all_rules = api.fwaas.rule_list(request, tenant_id=tenant_id)
for r in all_rules:
r.set_id_as_name_if_empty()
current_rules = []
for r in kwargs['initial']['firewall_rules']:
r_obj = [rule for rule in all_rules if r == rule.id][0]
current_rules.append(r_obj)
current_choices = [(r.id, r.name) for r in current_rules]
except Exception as e:
msg = _('Failed to retrieve current rules in policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'], 'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
self.fields['firewall_rule_id'].choices = current_choices
def handle(self, request, context):
policy_id = self.initial['policy_id']
policy_name_or_id = self.initial['name'] or policy_id
try:
remove_rule_id = context['firewall_rule_id']
remove_rule = api.fwaas.rule_get(request, remove_rule_id)
body = {'firewall_rule_id': remove_rule_id}
policy = api.fwaas.policy_remove_rule(request, policy_id, **body)
msg = _('Rule %(rule)s was successfully removed from policy '
'%(policy)s.') % {
'rule': remove_rule.name or remove_rule.id,
'policy': policy_name_or_id}
LOG.debug(msg)
messages.success(request, msg)
return policy
except Exception as e:
msg = _('Failed to remove rule from policy %(name)s: '
'%(reason)s') % {'name': self.initial['name'],
'reason': e}
LOG.error(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
from ..utils.decorators import wraps
import os
import shutil
import sys
__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config',
'set_temp_cache']
def _find_home():
""" Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
# First find the home directory - this is inspired by the scheme ipython
# uses to identify "home"
if os.name == 'posix':
# Linux, Unix, AIX, OS X
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find unix home directory to search for '
'astropy config dir')
elif os.name == 'nt': # This is for all modern Windows (NT or after)
if 'MSYSTEM' in os.environ and os.environ.get('HOME'):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = os.environ['HOME']
# Next try for a network home
elif 'HOMESHARE' in os.environ:
homedir = os.environ['HOMESHARE']
# See if there's a local home
elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
homedir = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
# Maybe a user profile?
elif 'USERPROFILE' in os.environ:
homedir = os.path.join(os.environ['USERPROFILE'])
else:
try:
import winreg as wreg
shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, 'Personal')[0]
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find windows home directory to '
'search for astropy config dir')
else:
# for other platforms, try HOME, although it probably isn't there
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find a home directory to search for '
'astropy config dir - are you on an unspported '
'platform?')
return homedir
def get_config_dir(create=True):
"""
Determines the Astropy configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, 'astropy')
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get('XDG_CONFIG_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('config', linkto))
def get_cache_dir():
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, 'astropy')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get('XDG_CACHE_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('cache', linkto))
class _SetTempPath:
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
return self._default_path_getter()
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects
from .configuration import _cfgobjs
path = super().__enter__()
_cfgobjs.clear()
return path
def __exit__(self, *args):
from .configuration import _cfgobjs
super().__exit__(*args)
_cfgobjs.clear()
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_astropy_dir(dirnm, linkto):
innerdir = os.path.join(_find_home(), '.astropy')
maindir = os.path.join(_find_home(), '.astropy', dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
msg = 'Intended Astropy directory {0} is actually a file.'
raise OSError(msg.format(innerdir))
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (not sys.platform.startswith('win') and
linkto is not None and
not os.path.exists(linkto)):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
msg = 'Intended Astropy {0} directory {1} is actually a file.'
raise OSError(msg.format(dirnm, maindir))
return os.path.abspath(maindir)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import io
import os
import tempfile
import unittest
from datetime import datetime, timedelta
from unittest import mock
from unittest.mock import MagicMock
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.cli.commands import dag_command
from airflow.exceptions import AirflowException
from airflow.models import DagBag, DagModel, DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs
dag_folder_path = '/'.join(os.path.realpath(__file__).split('/')[:-1])
DEFAULT_DATE = timezone.make_aware(datetime(2015, 1, 1), timezone=timezone.utc)
TEST_DAG_FOLDER = os.path.join(os.path.dirname(dag_folder_path), 'dags')
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), "airflow/example_dags"
)
# TODO: Check if tests needs side effects - locally there's missing DAG
class TestCliDags(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
cls.dagbag.sync_to_db()
cls.parser = cli_parser.get_parser()
@classmethod
def tearDownClass(cls) -> None:
clear_db_runs()
clear_db_dags()
def test_reserialize(self):
# Assert that there are serialized Dags
with create_session() as session:
serialized_dags_before_command = session.query(SerializedDagModel).all()
assert len(serialized_dags_before_command) # There are serialized DAGs to delete
# Run clear of serialized dags
dag_command.dag_reserialize(self.parser.parse_args(['dags', 'reserialize', "--clear-only"]))
# Assert no serialized Dags
with create_session() as session:
serialized_dags_after_clear = session.query(SerializedDagModel).all()
assert not len(serialized_dags_after_clear)
# Serialize manually
dag_command.dag_reserialize(self.parser.parse_args(['dags', 'reserialize']))
# Check serialized DAGs are back
with create_session() as session:
serialized_dags_after_reserialize = session.query(SerializedDagModel).all()
assert len(serialized_dags_after_reserialize) >= 40 # Serialized DAGs back
@mock.patch("airflow.cli.commands.dag_command.DAG.run")
def test_backfill(self, mock_run):
dag_command.dag_backfill(
self.parser.parse_args(
['dags', 'backfill', 'example_bash_operator', '--start-date', DEFAULT_DATE.isoformat()]
)
)
mock_run.assert_called_once_with(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=False,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
mock_run.reset_mock()
dag = self.dagbag.get_dag('example_bash_operator')
with contextlib.redirect_stdout(io.StringIO()) as stdout:
dag_command.dag_backfill(
self.parser.parse_args(
[
'dags',
'backfill',
'example_bash_operator',
'--task-regex',
'runme_0',
'--dry-run',
'--start-date',
DEFAULT_DATE.isoformat(),
]
),
dag=dag,
)
output = stdout.getvalue()
assert f"Dry run of DAG example_bash_operator on {DEFAULT_DATE.isoformat()}\n" in output
assert "Task runme_0\n" in output
mock_run.assert_not_called() # Dry run shouldn't run the backfill
dag_command.dag_backfill(
self.parser.parse_args(
[
'dags',
'backfill',
'example_bash_operator',
'--dry-run',
'--start-date',
DEFAULT_DATE.isoformat(),
]
),
dag=dag,
)
mock_run.assert_not_called() # Dry run shouldn't run the backfill
dag_command.dag_backfill(
self.parser.parse_args(
[
'dags',
'backfill',
'example_bash_operator',
'--local',
'--start-date',
DEFAULT_DATE.isoformat(),
]
),
dag=dag,
)
mock_run.assert_called_once_with(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
mock_run.reset_mock()
@mock.patch("airflow.cli.commands.dag_command.get_dag")
def test_backfill_fails_without_loading_dags(self, mock_get_dag):
cli_args = self.parser.parse_args(['dags', 'backfill', 'example_bash_operator'])
with pytest.raises(AirflowException):
dag_command.dag_backfill(cli_args)
mock_get_dag.assert_not_called()
def test_show_dag_dependencies_print(self):
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_dependencies_show(self.parser.parse_args(['dags', 'show-dependencies']))
out = temp_stdout.getvalue()
assert "digraph" in out
assert "graph [rankdir=LR]" in out
@mock.patch("airflow.cli.commands.dag_command.render_dag_dependencies")
def test_show_dag_dependencies_save(self, mock_render_dag_dependencies):
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_dependencies_show(
self.parser.parse_args(['dags', 'show-dependencies', '--save', 'output.png'])
)
out = temp_stdout.getvalue()
mock_render_dag_dependencies.return_value.render.assert_called_once_with(
cleanup=True, filename='output', format='png'
)
assert "File output.png saved" in out
def test_show_dag_print(self):
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_show(self.parser.parse_args(['dags', 'show', 'example_bash_operator']))
out = temp_stdout.getvalue()
assert "label=example_bash_operator" in out
assert "graph [label=example_bash_operator labelloc=t rankdir=LR]" in out
assert "runme_2 -> run_after_loop" in out
@mock.patch("airflow.cli.commands.dag_command.render_dag")
def test_show_dag_save(self, mock_render_dag):
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_show(
self.parser.parse_args(['dags', 'show', 'example_bash_operator', '--save', 'awesome.png'])
)
out = temp_stdout.getvalue()
mock_render_dag.return_value.render.assert_called_once_with(
cleanup=True, filename='awesome', format='png'
)
assert "File awesome.png saved" in out
@mock.patch("airflow.cli.commands.dag_command.subprocess.Popen")
@mock.patch("airflow.cli.commands.dag_command.render_dag")
def test_show_dag_imgcat(self, mock_render_dag, mock_popen):
mock_render_dag.return_value.pipe.return_value = b"DOT_DATA"
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.communicate.return_value = (b"OUT", b"ERR")
mock_popen.return_value.__enter__.return_value = mock_proc
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_show(
self.parser.parse_args(['dags', 'show', 'example_bash_operator', '--imgcat'])
)
out = temp_stdout.getvalue()
mock_render_dag.return_value.pipe.assert_called_once_with(format='png')
mock_proc.communicate.assert_called_once_with(b'DOT_DATA')
assert "OUT" in out
assert "ERR" in out
@mock.patch("airflow.cli.commands.dag_command.DAG.run")
def test_cli_backfill_depends_on_past(self, mock_run):
"""
Test that CLI respects -I argument
We just check we call dag.run() right. The behaviour of that kwarg is
tested in test_jobs
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + timedelta(days=1)
args = [
'dags',
'backfill',
dag_id,
'--local',
'--start-date',
run_date.isoformat(),
'--ignore-first-depends-on-past',
]
dag = self.dagbag.get_dag(dag_id)
dag_command.dag_backfill(self.parser.parse_args(args), dag=dag)
mock_run.assert_called_once_with(
start_date=run_date,
end_date=run_date,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=False,
verbose=False,
)
@mock.patch("airflow.cli.commands.dag_command.DAG.run")
def test_cli_backfill_depends_on_past_backwards(self, mock_run):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + timedelta(days=1)
end_date = start_date + timedelta(days=1)
args = [
'dags',
'backfill',
dag_id,
'--local',
'--start-date',
start_date.isoformat(),
'--end-date',
end_date.isoformat(),
'--ignore-first-depends-on-past',
'--run-backwards',
]
dag = self.dagbag.get_dag(dag_id)
dag_command.dag_backfill(self.parser.parse_args(args), dag=dag)
mock_run.assert_called_once_with(
start_date=start_date,
end_date=end_date,
conf=None,
delay_on_limit_secs=1.0,
donot_pickle=False,
ignore_first_depends_on_past=True,
ignore_task_deps=False,
local=True,
mark_success=False,
pool=None,
rerun_failed_tasks=False,
run_backwards=True,
verbose=False,
)
def test_next_execution(self):
dag_ids = [
'example_bash_operator', # schedule_interval is '0 0 * * *'
'latest_only', # schedule_interval is timedelta(hours=4)
'example_python_operator', # schedule_interval=None
'example_xcom',
] # schedule_interval="@once"
# Delete DagRuns
with create_session() as session:
dr = session.query(DagRun).filter(DagRun.dag_id.in_(dag_ids))
dr.delete(synchronize_session=False)
# Test None output
args = self.parser.parse_args(['dags', 'next-execution', dag_ids[0]])
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_next_execution(args)
out = temp_stdout.getvalue()
# `next_execution` function is inapplicable if no execution record found
# It prints `None` in such cases
assert "None" in out
# The details below is determined by the schedule_interval of example DAGs
now = DEFAULT_DATE
expected_output = [
(now + timedelta(days=1)).isoformat(),
(now + timedelta(hours=4)).isoformat(),
"None",
"None",
]
expected_output_2 = [
(now + timedelta(days=1)).isoformat() + os.linesep + (now + timedelta(days=2)).isoformat(),
(now + timedelta(hours=4)).isoformat() + os.linesep + (now + timedelta(hours=8)).isoformat(),
"None",
"None",
]
for i, dag_id in enumerate(dag_ids):
dag = self.dagbag.dags[dag_id]
# Create a DagRun for each DAG, to prepare for next step
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=now,
start_date=now,
state=State.FAILED,
)
# Test num-executions = 1 (default)
args = self.parser.parse_args(['dags', 'next-execution', dag_id])
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_next_execution(args)
out = temp_stdout.getvalue()
assert expected_output[i] in out
# Test num-executions = 2
args = self.parser.parse_args(['dags', 'next-execution', dag_id, '--num-executions', '2'])
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_next_execution(args)
out = temp_stdout.getvalue()
assert expected_output_2[i] in out
# Clean up before leaving
with create_session() as session:
dr = session.query(DagRun).filter(DagRun.dag_id.in_(dag_ids))
dr.delete(synchronize_session=False)
@conf_vars({('core', 'load_examples'): 'true'})
def test_cli_report(self):
args = self.parser.parse_args(['dags', 'report', '--output', 'json'])
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_report(args)
out = temp_stdout.getvalue()
assert "airflow/example_dags/example_complex.py" in out
assert "example_complex" in out
@conf_vars({('core', 'load_examples'): 'true'})
def test_cli_list_dags(self):
args = self.parser.parse_args(['dags', 'list', '--output', 'yaml'])
with contextlib.redirect_stdout(io.StringIO()) as temp_stdout:
dag_command.dag_list_dags(args)
out = temp_stdout.getvalue()
assert "owner" in out
assert "airflow" in out
assert "paused" in out
assert "airflow/example_dags/example_complex.py" in out
assert "- dag_id:" in out
def test_cli_list_dag_runs(self):
dag_command.dag_trigger(
self.parser.parse_args(
[
'dags',
'trigger',
'example_bash_operator',
]
)
)
args = self.parser.parse_args(
[
'dags',
'list-runs',
'--dag-id',
'example_bash_operator',
'--no-backfill',
'--start-date',
DEFAULT_DATE.isoformat(),
'--end-date',
timezone.make_aware(datetime.max).isoformat(),
]
)
dag_command.dag_list_dag_runs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(
[
'dags',
'list-jobs',
'--dag-id',
'example_bash_operator',
'--state',
'success',
'--limit',
'100',
'--output',
'json',
]
)
dag_command.dag_list_jobs(args)
def test_pause(self):
args = self.parser.parse_args(['dags', 'pause', 'example_bash_operator'])
dag_command.dag_pause(args)
assert self.dagbag.dags['example_bash_operator'].get_is_paused() in [True, 1]
args = self.parser.parse_args(['dags', 'unpause', 'example_bash_operator'])
dag_command.dag_unpause(args)
assert self.dagbag.dags['example_bash_operator'].get_is_paused() in [False, 0]
def test_trigger_dag(self):
dag_command.dag_trigger(
self.parser.parse_args(
[
'dags',
'trigger',
'example_bash_operator',
'--run-id=test_trigger_dag',
'--exec-date=2021-06-04T09:00:00+08:00',
'--conf={"foo": "bar"}',
],
),
)
with create_session() as session:
dagrun = session.query(DagRun).filter(DagRun.run_id == "test_trigger_dag").one()
assert dagrun, "DagRun not created"
assert dagrun.run_type == DagRunType.MANUAL
assert dagrun.external_trigger
assert dagrun.conf == {"foo": "bar"}
# Coerced to UTC.
assert dagrun.execution_date.isoformat(timespec="seconds") == "2021-06-04T01:00:00+00:00"
# example_bash_operator runs every day at midnight, so the data interval
# should be aligned to the previous day.
assert dagrun.data_interval_start.isoformat(timespec="seconds") == "2021-06-03T00:00:00+00:00"
assert dagrun.data_interval_end.isoformat(timespec="seconds") == "2021-06-04T00:00:00+00:00"
def test_trigger_dag_invalid_conf(self):
with pytest.raises(ValueError):
dag_command.dag_trigger(
self.parser.parse_args(
[
'dags',
'trigger',
'example_bash_operator',
'--run-id',
'trigger_dag_xxx',
'--conf',
'NOT JSON',
]
),
)
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
dag_command.dag_delete(self.parser.parse_args(['dags', 'delete', key, '--yes']))
assert session.query(DM).filter_by(dag_id=key).count() == 0
with pytest.raises(AirflowException):
dag_command.dag_delete(
self.parser.parse_args(['dags', 'delete', 'does_not_exist_dag', '--yes']),
)
def test_delete_dag_existing_file(self):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
DM = DagModel
key = "my_dag_id"
session = settings.Session()
with tempfile.NamedTemporaryFile() as f:
session.add(DM(dag_id=key, fileloc=f.name))
session.commit()
dag_command.dag_delete(self.parser.parse_args(['dags', 'delete', key, '--yes']))
assert session.query(DM).filter_by(dag_id=key).count() == 0
def test_cli_list_jobs(self):
args = self.parser.parse_args(['dags', 'list-jobs'])
dag_command.dag_list_jobs(args)
def test_dag_state(self):
assert (
dag_command.dag_state(
self.parser.parse_args(['dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])
)
is None
)
@mock.patch("airflow.cli.commands.dag_command.DebugExecutor")
@mock.patch("airflow.cli.commands.dag_command.get_dag")
def test_dag_test(self, mock_get_dag, mock_executor):
cli_args = self.parser.parse_args(['dags', 'test', 'example_bash_operator', DEFAULT_DATE.isoformat()])
dag_command.dag_test(cli_args)
mock_get_dag.assert_has_calls(
[
mock.call(subdir=cli_args.subdir, dag_id='example_bash_operator'),
mock.call().clear(
start_date=cli_args.execution_date,
end_date=cli_args.execution_date,
dag_run_state=State.NONE,
),
mock.call().run(
executor=mock_executor.return_value,
start_date=cli_args.execution_date,
end_date=cli_args.execution_date,
run_at_least_once=True,
),
]
)
@mock.patch("airflow.cli.commands.dag_command.render_dag", return_value=MagicMock(source="SOURCE"))
@mock.patch("airflow.cli.commands.dag_command.DebugExecutor")
@mock.patch("airflow.cli.commands.dag_command.get_dag")
def test_dag_test_show_dag(self, mock_get_dag, mock_executor, mock_render_dag):
cli_args = self.parser.parse_args(
['dags', 'test', 'example_bash_operator', DEFAULT_DATE.isoformat(), '--show-dagrun']
)
with contextlib.redirect_stdout(io.StringIO()) as stdout:
dag_command.dag_test(cli_args)
output = stdout.getvalue()
mock_get_dag.assert_has_calls(
[
mock.call(subdir=cli_args.subdir, dag_id='example_bash_operator'),
mock.call().clear(
start_date=cli_args.execution_date,
end_date=cli_args.execution_date,
dag_run_state=State.NONE,
),
mock.call().run(
executor=mock_executor.return_value,
start_date=cli_args.execution_date,
end_date=cli_args.execution_date,
run_at_least_once=True,
),
]
)
mock_render_dag.assert_has_calls([mock.call(mock_get_dag.return_value, tis=[])])
assert "SOURCE" in output
| |
"""
Supports flushing statsite metrics to Librato
"""
import sys
import socket
import logging
import ConfigParser
import re
import base64
import urllib2
import json
import os
##
# Librato sink for statsite
# =========================
#
# Use with the following stream command:
#
# stream_cmd = python sinks/librato.py librato.ini
#
# The Librato sink takes an INI format configuration file as a single
# argument. The following is an example configuration:
#
# Configuration example:
# ---------------------
#
# [librato]
# email = john@example.com
# token = 02ac4003c4fcd11bf9cee34e34263155dc7ba1906c322d167db6ab4b2cd2082b
# source_regex = ^([^-]+)--
# floor_time_secs = 60
#
# Options:
# -------
#
# - email / token: Librato account credentials (required).
# - source: Source name to use for samples, defaults to hostname if not set.
# - source_regex: Source name regex extraction see:
# https://github.com/librato/statsd-librato-backend#setting-the-source-per-metric
# - floor_time_secs: Floor samples to this time (should match statsite flush_interval.
# - prefix: Metric name prefix to set for all metrics.
#
###
class LibratoStore(object):
def __init__(self, conffile="/etc/statsite/librato.ini"):
"""
Implements an interface that allows metrics to be persisted to Librato.
Raises a :class:`ValueError` on bad arguments or `Exception` on missing
configuration section.
:Parameters:
- `conffile`: INI configuration file.
"""
self.logger = logging.getLogger("statsite.librato")
self.api = "https://metrics-api.librato.com"
self.parse_conf(conffile)
self.sink_name = "statsite-librato"
self.sink_version = "0.0.1"
self.flush_timeout_secs = 5
self.gauges = {}
# Limit our payload sizes
self.max_metrics_payload = 500
self.timer_re = re.compile("^timers\.")
self.type_re = re.compile("^(kv|timers|counts|gauges|sets)\.(.+)$")
self.sfx_map = {
'sum': 'sum',
'sum_sq' : 'sum_squares',
'count' : 'count',
'stdev' : None,
'lower' : 'min',
'upper' : 'max',
'mean' : None
}
self.sfx_re = re.compile("(.+)\.(sum|sum_sq|count|stdev|lower|upper|mean)$")
self.sanitize_re = re.compile("[^-A-Za-z0-9.:_]")
def parse_conf(self, conffile):
"""
Loads configuration from an INI format file.
"""
sect = "librato"
config = ConfigParser.RawConfigParser()
config.read(conffile)
if not config.has_section(sect):
raise Exception("Can not locate config section 'librato'")
if config.has_option(sect, 'email'):
self.email = config.get(sect, 'email')
else:
raise ValueError("email must be set in config")
if config.has_option(sect, 'token'):
self.token = config.get(sect, 'token')
else:
raise ValueError("token must be set in config")
if config.has_option(sect, 'api'):
self.api = config.get(sect, 'api')
if config.has_option(sect, 'source'):
self.source = config.get(sect, 'source')
else:
self.source = socket.gethostname()
if config.has_option(sect, 'source_regex'):
reg = config.get(sect, 'source_regex')
# Strip /'s
if len(reg) > 2 and reg[0] == '/' and \
reg[len(reg) - 1] == "/":
reg = reg[1:len(reg)-1]
self.source_re = re.compile(reg)
else:
self.source_re = None
if config.has_option(sect, 'floor_time_secs'):
self.floor_time_secs = config.getint(sect, 'floor_time_secs')
else:
self.floor_time_secs = None
if config.has_option(sect, "prefix"):
self.prefix = config.get(sect, "prefix")
else:
self.prefix = None
def split_timer_metric(self, name):
m = self.sfx_re.match(name)
if m != None:
if self.sfx_map[m.group(2)] != None:
return m.group(1), self.sfx_map[m.group(2)]
else:
# These we drop
return None, None
else:
return name, None
def sanitize(self, name):
return self.sanitize_re.sub("_", name)
def add_measure(self, key, value, time):
ts = int(time)
if self.floor_time_secs != None:
ts = (ts / self.floor_time_secs) * self.floor_time_secs
value = float(value)
source = self.source
istimer = self.timer_re.match(key) != None
name = self.type_re.match(key).group(2)
# Match the source regex
if self.source_re != None:
m = self.source_re.search(name)
if m != None:
source = m.group(1)
name = name[0:m.start(0)] + name[m.end(0):]
subf = None
if istimer:
name, subf = self.split_timer_metric(name)
if subf == None:
subf = 'value'
# Bail if skipping
if name == None:
return
# Add a metric prefix
if self.prefix:
name = "%s.%s" % (self.prefix, name)
name = self.sanitize(name)
source = self.sanitize(source)
k = "%s\t%s" % (name, source)
if k not in self.gauges:
self.gauges[k] = {
'name' : name,
'source' : source,
'measure_time' : ts,
}
self.gauges[k][subf] = value
def build(self, metrics):
"""
Build metric data to send to Librato
:Parameters:
- `metrics` : A list of (key,value,timestamp) tuples.
"""
if not metrics:
return
# Construct the output
for m in metrics:
k, vs, ts = m.split("|")
self.add_measure(k, vs, ts)
def flush_payload(self, headers, g):
"""
POST a payload to Librato.
"""
body = json.dumps({ 'gauges' : g })
url = "%s/v1/metrics" % (self.api)
req = urllib2.Request(url, body, headers)
try:
f = urllib2.urlopen(req, timeout = self.flush_timeout_secs)
response = f.read()
f.close()
except urllib2.HTTPError as error:
body = error.read()
self.logger.warning('Failed to send metrics to Librato: Code: %d. Response: %s' % \
(error.code, body))
except IOError as error:
if hasattr(error, 'reason'):
self.logger.warning('Error when sending metrics Librato (%s)' % (error.reason))
elif hasattr(error, 'code'):
self.logger.warning('Error when sending metrics Librato (%s)' % (error.code))
else:
self.logger.warning('Error when sending metrics Librato and I dunno why')
def flush(self):
"""
POST a collection of gauges to Librato.
"""
# Nothing to do
if len(self.gauges) == 0:
return
headers = {
'Content-Type': 'application/json',
'User-Agent': self.build_user_agent(),
'Authorization': 'Basic %s' % self.build_basic_auth()
}
metrics = []
count = 0
for g in self.gauges.values():
metrics.append(g)
count += 1
if count >= self.max_metrics_payload:
self.flush_payload(headers, metrics)
count = 0
metrics = []
if count > 0:
self.flush_payload(headers, metrics)
def build_basic_auth(self):
base64string = base64.encodestring('%s:%s' % (self.email, self.token))
return base64string.translate(None, '\n')
def build_user_agent(self):
try:
uname = os.uname()
system = "; ".join([uname[0], uname[4]])
except:
system = os.name()
pver = sys.version_info
user_agent = '%s/%s (%s) Python-Urllib2/%d.%d' % \
(self.sink_name, self.sink_version, system, pver[0], pver[1])
return user_agent
if __name__ == "__main__":
# Initialize the logger
logging.basicConfig()
# Intialize from our arguments
librato = LibratoStore(*sys.argv[1:])
# Get all the inputs
metrics = sys.stdin.read()
# Flush
librato.build(metrics.splitlines())
librato.flush()
| |
import numpy as np
from featureflow import Node
from scipy.fftpack import dct
from scipy.stats.mstats import gmean
from .functional import fft, mdct
from .frequencyscale import LinearScale, ChromaScale, BarkScale
from .weighting import AWeighting
from .tfrepresentation import FrequencyDimension
from .frequencyadaptive import FrequencyAdaptive
from zounds.core import ArrayWithUnits, IdentityDimension
from zounds.nputil import safe_log
from zounds.timeseries import audio_sample_rate
from .sliding_window import HanningWindowingFunc
class FrequencyWeighting(Node):
"""
`FrequencyWeighting` is a processing node that expects to be passed an
:class:`~zounds.core.ArrayWithUnits` instance whose last dimension is a
:class:`~zounds.spectral.FrequencyDimension`
Args:
weighting (FrequencyWeighting): the frequency weighting to apply
needs (Node): a processing node on which this node depends whose last
dimension is a :class:`~zounds.spectral.FrequencyDimension`
"""
def __init__(self, weighting=None, needs=None):
super(FrequencyWeighting, self).__init__(needs=needs)
self.weighting = weighting
def _process(self, data):
yield data * self.weighting
class FFT(Node):
"""
A processing node that performs an FFT of a real-valued signal
Args:
axis (int): The axis over which the FFT should be computed
padding_samples (int): number of zero samples to pad each window with
before applying the FFT
needs (Node): a processing node on which this one depends
See Also:
:class:`~zounds.synthesize.FFTSynthesizer`
"""
def __init__(self, needs=None, axis=-1, padding_samples=0):
super(FFT, self).__init__(needs=needs)
self._axis = axis
self._padding_samples = padding_samples
def _process(self, data):
yield fft(data, axis=self._axis, padding_samples=self._padding_samples)
class DCT(Node):
"""
A processing node that performs a Type II Discrete Cosine Transform
(https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II) of the
input
Args:
axis (int): The axis over which to perform the DCT transform
needs (Node): a processing node on which this one depends
See Also:
:class:`~zounds.synthesize.DctSynthesizer`
"""
def __init__(self, axis=-1, scale_always_even=False, needs=None):
super(DCT, self).__init__(needs=needs)
self.scale_always_even = scale_always_even
self._axis = axis
def _process(self, data):
transformed = dct(data, norm='ortho', axis=self._axis)
sr = audio_sample_rate(
int(data.shape[1] / data.dimensions[0].duration_in_seconds))
scale = LinearScale.from_sample_rate(
sr, transformed.shape[-1], always_even=self.scale_always_even)
yield ArrayWithUnits(
transformed, [data.dimensions[0], FrequencyDimension(scale)])
class DCTIV(Node):
"""
A processing node that performs a Type IV Discrete Cosine Transform
(https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-IV) of the
input
Args:
needs (Node): a processing node on which this one depends
See Also:
:class:`~zounds.synthesize.DCTIVSynthesizer`
"""
def __init__(self, scale_always_even=False, needs=None):
super(DCTIV, self).__init__(needs=needs)
self.scale_always_even = scale_always_even
def _process_raw(self, data):
l = data.shape[1]
tf = np.arange(0, l)
z = np.zeros((len(data), l * 2))
z[:, :l] = (data * np.exp(-1j * np.pi * tf / 2 / l)).real
z = np.fft.fft(z)[:, :l]
raw = np.sqrt(2 / l) * \
(z * np.exp(-1j * np.pi * (tf + 0.5) / 2 / l)).real
return raw
def _process(self, data):
raw = self._process_raw(data)
sr = audio_sample_rate(
int(data.shape[1] / data.dimensions[0].duration_in_seconds))
scale = LinearScale.from_sample_rate(
sr, data.shape[1], always_even=self.scale_always_even)
yield ArrayWithUnits(
raw, [data.dimensions[0], FrequencyDimension(scale)])
class MDCT(Node):
"""
A processing node that performs a modified discrete cosine transform
(https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform) of the
input.
This is really just a lapped version of the DCT-IV transform
Args:
needs (Node): a processing node on which this one depends
See Also:
:class:`~zounds.synthesize.MDCTSynthesizer`
"""
def __init__(self, needs=None):
super(MDCT, self).__init__(needs=needs)
def _process(self, data):
transformed = mdct(data)
sr = audio_sample_rate(data.dimensions[1].samples_per_second)
scale = LinearScale.from_sample_rate(sr, transformed.shape[1])
yield ArrayWithUnits(
transformed, [data.dimensions[0], FrequencyDimension(scale)])
class FrequencyAdaptiveTransform(Node):
"""
A processing node that expects to receive the input from a frequency domain
transformation (e.g. :class:`~zounds.spectral.FFT`), and produces a
:class:`~zounds.spectral.FrequencyAdaptive` instance where time resolution
can vary by frequency. This is similar to, but not precisely the same as
ideas introduced in:
* `A quasi-orthogonal, invertible, and perceptually relevant time-frequency transform for audio coding <https://hal-amu.archives-ouvertes.fr/hal-01194806/document>`_
* `A FRAMEWORK FOR INVERTIBLE, REAL-TIME CONSTANT-Q TRANSFORMS <http://www.univie.ac.at/nonstatgab/pdf_files/dogrhove12_amsart.pdf>`_
Args:
transform (function): the transform to be applied to each frequency band
scale (FrequencyScale): the scale used to take frequency band slices
window_func (numpy.ndarray): the windowing function to apply each band
before the transform is applied
check_scale_overlap_ratio (bool): If this feature is to be used for
resynthesis later, ensure that each frequency band overlaps with
the previous one by at least half, to ensure artifact-free synthesis
See Also:
:class:`~zounds.spectral.FrequencyAdaptive`
:class:`~zounds.synthesize.FrequencyAdaptiveDCTSynthesizer`
:class:`~zounds.synthesize.FrequencyAdaptiveFFTSynthesizer`
"""
def __init__(
self,
transform=None,
scale=None,
window_func=None,
check_scale_overlap_ratio=False,
needs=None):
super(FrequencyAdaptiveTransform, self).__init__(needs=needs)
if check_scale_overlap_ratio:
try:
scale.ensure_overlap_ratio(0.5)
except AssertionError as e:
raise ValueError(*e.args)
self._window_func = window_func or np.ones
self._scale = scale
self._transform = transform
def _process_band(self, data, band):
try:
raw_coeffs = data[:, band]
except IndexError:
raise ValueError(
'data must have FrequencyDimension as its last dimension, '
'but it was {dim}'.format(dim=data.dimensions[-1]))
window = self._window_func(raw_coeffs.shape[1])
return self._transform(raw_coeffs * window[None, :], norm='ortho')
def _process(self, data):
yield FrequencyAdaptive(
[self._process_band(data, band) for band in self._scale],
data.dimensions[0],
self._scale)
class BaseScaleApplication(Node):
def __init__(self, scale, window, needs=None):
super(BaseScaleApplication, self).__init__(needs=needs)
self.window = window
self.scale = scale
def _new_dim(self):
return FrequencyDimension(self.scale)
def _preprocess(self, data):
return data
def _process(self, data):
x = self._preprocess(data)
x = self.scale.apply(x, self.window)
yield ArrayWithUnits(
x, data.dimensions[:-1] + (self._new_dim(),))
class Chroma(BaseScaleApplication):
def __init__(
self, frequency_band, window=HanningWindowingFunc(), needs=None):
super(Chroma, self).__init__(
ChromaScale(frequency_band), window, needs=needs)
def _new_dim(self):
return IdentityDimension()
def _preprocess(self, data):
return np.abs(data) * AWeighting()
class BarkBands(BaseScaleApplication):
def __init__(
self,
frequency_band,
n_bands=100,
window=HanningWindowingFunc(),
needs=None):
super(BarkBands, self).__init__(
BarkScale(frequency_band, n_bands), window, needs=needs)
def _preprocess(self, data):
return np.abs(data)
class SpectralCentroid(Node):
"""
Indicates where the "center of mass" of the spectrum is. Perceptually,
it has a robust connection with the impression of "brightness" of a
sound. It is calculated as the weighted mean of the frequencies
present in the signal, determined using a Fourier transform, with
their magnitudes as the weights...
-- http://en.wikipedia.org/wiki/Spectral_centroid
"""
def __init__(self, needs=None):
super(SpectralCentroid, self).__init__(needs=needs)
def _first_chunk(self, data):
self._bins = np.arange(1, data.shape[-1] + 1)
self._bins_sum = np.sum(self._bins)
return data
def _process(self, data):
data = np.abs(data)
yield (data * self._bins).sum(axis=1) / self._bins_sum
class SpectralFlatness(Node):
"""
Spectral flatness or tonality coefficient, also known as Wiener
entropy, is a measure used in digital signal processing to characterize an
audio spectrum. Spectral flatness is typically measured in decibels, and
provides a way to quantify how tone-like a sound is, as opposed to being
noise-like. The meaning of tonal in this context is in the sense of the
amount of peaks or resonant structure in a power spectrum, as opposed to
flat spectrum of a white noise. A high spectral flatness indicates that
the spectrum has a similar amount of power in all spectral bands - this
would sound similar to white noise, and the graph of the spectrum would
appear relatively flat and smooth. A low spectral flatness indicates that
the spectral power is concentrated in a relatively small number of
bands - this would typically sound like a mixture of sine waves, and the
spectrum would appear "spiky"...
-- http://en.wikipedia.org/wiki/Spectral_flatness
"""
def __init__(self, needs=None):
super(SpectralFlatness, self).__init__(needs=needs)
def _process(self, data):
data = np.abs(data)
mean = data.mean(axis=1)
mean[mean == 0] = -1e5
flatness = gmean(data, axis=1) / mean
yield ArrayWithUnits(flatness, data.dimensions[:1])
class BFCC(Node):
"""
Bark frequency cepstral coefficients
"""
def __init__(self, needs=None, n_coeffs=13, exclude=1):
super(BFCC, self).__init__(needs=needs)
self._n_coeffs = n_coeffs
self._exclude = exclude
def _process(self, data):
data = np.abs(data)
bfcc = dct(safe_log(data), axis=1) \
[:, self._exclude: self._exclude + self._n_coeffs]
yield ArrayWithUnits(
bfcc.copy(), [data.dimensions[0], IdentityDimension()])
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import namedtuple
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.subsystems.zinc_language_mixin import ZincLanguageMixin
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.build_graph.address import Address
from pants.subsystem.subsystem import Subsystem
# full_version - the full scala version to use.
# style_version - the version of org.scalastyle.scalastyle to use.
major_version_info = namedtuple('major_version_info', ['full_version', 'style_version'])
# Note that the compiler has two roles here: as a tool (invoked by the compile task), and as a
# runtime library (when compiling plugins, which require the compiler library as a dependency).
scala_build_info = {
'2.10':
major_version_info(
full_version='2.10.4',
style_version='0.3.2'),
'2.11':
major_version_info(
full_version='2.11.7',
style_version='0.8.0'),
}
class ScalaPlatform(JvmToolMixin, ZincLanguageMixin, Subsystem):
"""A scala platform.
:API: public
"""
options_scope = 'scala-platform'
@classmethod
def _create_jardep(cls, name, version):
return JarDependency(org='org.scala-lang',
name=name,
rev=scala_build_info[version].full_version)
@classmethod
def _create_runtime_jardep(cls, version):
return cls._create_jardep('scala-library', version)
@classmethod
def _create_compiler_jardep(cls, version):
return cls._create_jardep('scala-compiler', version)
@classmethod
def _key_for_tool_version(cls, tool, version):
if version == 'custom':
return tool
else:
return '{}_{}'.format(tool, version.replace('.', '_'))
@classmethod
def register_options(cls, register):
def register_scala_compiler_tool(version):
cls.register_jvm_tool(register,
cls._key_for_tool_version('scalac', version),
classpath=[cls._create_compiler_jardep(version)])
def register_scala_repl_tool(version, with_jline=False):
classpath = [cls._create_compiler_jardep(version)] # Note: the REPL is in the compiler jar.
if with_jline:
jline_dep = JarDependency(
org = 'org.scala-lang',
name = 'jline',
rev = scala_build_info['2.10'].full_version
)
classpath.append(jline_dep)
cls.register_jvm_tool(register,
cls._key_for_tool_version('scala-repl', version),
classpath=classpath)
def register_style_tool(version):
# Note: Since we can't use ScalaJarDependency without creating a import loop we need to
# specify the version info in the name.
style_version = scala_build_info[version].style_version
jardep = JarDependency('org.scalastyle', 'scalastyle_{}'.format(version), style_version)
cls.register_jvm_tool(register,
cls._key_for_tool_version('scalastyle', version),
classpath=[jardep])
super(ScalaPlatform, cls).register_options(register)
register('--version', advanced=True, default='2.10',
choices=['2.10', '2.11', 'custom'], fingerprint=True,
help='The scala platform version. If --version=custom, the targets '
'//:scala-library, //:scalac, //:scala-repl and //:scalastyle will be used, '
'and must exist. Otherwise, defaults for the specified version will be used.')
register('--suffix-version', advanced=True, default=None,
help='Scala suffix to be used when a custom version is specified. For example 2.10.')
# Register the fixed version tools.
register_scala_compiler_tool('2.10')
register_scala_repl_tool('2.10', with_jline=True) # 2.10 repl requires jline.
register_style_tool('2.10')
register_scala_compiler_tool('2.11')
register_scala_repl_tool('2.11')
register_style_tool('2.11')
# Register the custom tools. We provide a dummy classpath, so that register_jvm_tool won't
# require that a target with the given spec actually exist (not everyone will define custom
# scala platforms). However if the custom tool is actually resolved, we want that to
# fail with a useful error, hence the dummy jardep with rev=None.
def register_custom_tool(key):
dummy_jardep = JarDependency('missing spec', ' //:{}'.format(key))
cls.register_jvm_tool(register, cls._key_for_tool_version(key, 'custom'),
classpath=[dummy_jardep])
register_custom_tool('scalac')
register_custom_tool('scala-repl')
register_custom_tool('scalastyle')
def _tool_classpath(self, tool, products):
"""Return the proper classpath based on products and scala version."""
return self.tool_classpath_from_products(products,
self._key_for_tool_version(tool, self.version),
scope=self.options_scope)
def compiler_classpath(self, products):
return self._tool_classpath('scalac', products)
def style_classpath(self, products):
return self._tool_classpath('scalastyle', products)
@property
def version(self):
return self.get_options().version
def suffix_version(self, name):
"""Appends the platform version to the given artifact name.
Also validates that the name doesn't already end with the version.
"""
if self.version == 'custom':
suffix = self.get_options().suffix_version
if suffix:
return '{0}_{1}'.format(name, suffix)
else:
raise RuntimeError('Suffix version must be specified if using a custom scala version.'
'Suffix version is used for bootstrapping jars. If a custom '
'scala version is not specified, then the version specified in '
'--scala-platform-suffix-version is used. For example for Scala '
'2.10.7 you would use the suffix version "2.10".')
elif name.endswith(self.version):
raise ValueError('The name "{0}" should not be suffixed with the scala platform version '
'({1}): it will be added automatically.'.format(name, self.version))
return '{0}_{1}'.format(name, self.version)
@property
def repl(self):
"""Return the repl tool key."""
return self._key_for_tool_version('scala-repl', self.version)
@classmethod
def compiler_library_target_spec(cls, buildgraph):
"""Returns a target spec for the scala compiler library.
Synthesizes one into the buildgraph if necessary.
:param pants.build_graph.build_graph.BuildGraph buildgraph: buildgraph object.
:return a target spec:
"""
return ScalaPlatform.global_instance()._library_target_spec(buildgraph, 'scalac',
cls._create_compiler_jardep)
@classmethod
def runtime_library_target_spec(cls, buildgraph):
"""Returns a target spec for the scala runtime library.
Synthesizes one into the buildgraph if necessary.
:param pants.build_graph.build_graph.BuildGraph buildgraph: buildgraph object.
:return a target spec:
"""
return ScalaPlatform.global_instance()._library_target_spec(buildgraph, 'scala-library',
cls._create_runtime_jardep)
def _library_target_spec(self, buildgraph, key, create_jardep_func):
if self.version == 'custom':
return '//:{}'.format(key)
else:
synthetic_address = Address.parse('//:{}-synthetic'.format(key))
if not buildgraph.contains_address(synthetic_address):
jars = [create_jardep_func(self.version)]
buildgraph.inject_synthetic_target(synthetic_address, JarLibrary, jars=jars)
elif not buildgraph.get_target(synthetic_address).is_synthetic:
raise buildgraph.ManualSyntheticTargetError(synthetic_address)
return buildgraph.get_target(synthetic_address).address.spec
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import time
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document2
from notebook.connectors.base import Api, QueryError
LOG = logging.getLogger(__name__)
try:
from oozie.models2 import Workflow, WorkflowBuilder
from oozie.views.api import get_log as get_workflow_logs
from oozie.views.dashboard import check_job_access_permission, check_job_edition_permission
from oozie.views.editor2 import _submit_workflow
except Exception, e:
LOG.exception('Oozie application is not enabled: %s' % e)
class OozieApi(Api):
LOG_START_PATTERN = '(>>> Invoking Main class now >>>.+)'
LOG_END_PATTERN = '<<< Invocation of Main class completed <<<'
RESULTS_PATTERN = "(?P<results>>>> Invoking Beeline command line now >>>.+<<< Invocation of Beeline command completed <<<)"
RESULTS_PATTERN_GENERIC = "(?P<results>>>> Invoking Main class now >>>.+<<< Invocation of Main class completed <<<)"
RESULTS_PATTERN_MAPREDUCE = "(?P<results>.+)"
RESULTS_PATTERN_PIG = "(?P<results>>>> Invoking Pig command line now >>>.+<<< Invocation of Pig command completed <<<)"
BATCH_JOB_PREFIX = 'Hue_Batch'
SCHEDULE_JOB_PREFIX = 'Hue_Schedule'
def __init__(self, *args, **kwargs):
Api.__init__(self, *args, **kwargs)
self.fs = self.request.fs
self.jt = self.request.jt
def execute(self, notebook, snippet):
# Get document from notebook
if not notebook.get('uuid', ''):
raise PopupException(_('Notebook is missing a uuid, please save the notebook before executing as a batch job.'))
if notebook['type'] == 'notebook':
# Convert notebook to workflow
workflow_doc = WorkflowBuilder().create_notebook_workflow(notebook=notebook, user=self.user, managed=True, name=_("%s for %s") % (OozieApi.BATCH_JOB_PREFIX, notebook['name'] or notebook['type']))
workflow = Workflow(document=workflow_doc, user=self.user)
else:
notebook_doc = Document2.objects.get_by_uuid(user=self.user, uuid=notebook['uuid'], perm_type='read')
# Create a managed workflow from the notebook doc
workflow_doc = WorkflowBuilder().create_workflow(document=notebook_doc, user=self.user, managed=True, name=_("Batch job for %s") % (notebook_doc.name or notebook_doc.type))
workflow = Workflow(document=workflow_doc, user=self.user)
# Submit workflow
job_id = _submit_workflow(user=self.user, fs=self.fs, jt=self.jt, workflow=workflow, mapping=None)
return {
'id': job_id,
'has_result_set': True,
}
def check_status(self, notebook, snippet):
response = {'status': 'running'}
job_id = snippet['result']['handle']['id']
oozie_job = check_job_access_permission(self.request, job_id)
if oozie_job.is_running():
return response
elif oozie_job.status in ('KILLED', 'FAILED'):
raise QueryError(_('Job was %s') % oozie_job.status)
else:
# Check if job results are actually available, since YARN takes a while to move logs to JHS,
log_output = self.get_log(notebook, snippet)
if log_output:
results = self._get_results(log_output, snippet['type'])
if results:
response['status'] = 'available'
else:
LOG.warn('No log result could be matched for %s' % job_id)
else:
response['status'] = 'failed'
return response
def fetch_result(self, notebook, snippet, rows, start_over):
log_output = self.get_log(notebook, snippet)
results = self._get_results(log_output, snippet['type'])
return {
'data': [[line] for line in results.split('\n')], # hdfs_link()
'meta': [{'name': 'Header', 'type': 'STRING_TYPE', 'comment': ''}],
'type': 'table',
'has_more': False,
}
def cancel(self, notebook, snippet):
job_id = snippet['result']['handle']['id']
job = check_job_access_permission(self, job_id)
oozie_job = check_job_edition_permission(job, self.user)
oozie_job.kill()
return {'status': 0}
def get_log(self, notebook, snippet, startFrom=0, size=None):
job_id = snippet['result']['handle']['id']
oozie_job = check_job_access_permission(self.request, job_id)
return self._get_log_output(oozie_job)
def progress(self, snippet, logs):
job_id = snippet['result']['handle']['id']
oozie_job = check_job_access_permission(self.request, job_id)
return oozie_job.get_progress(),
def get_jobs(self, notebook, snippet, logs):
jobs = []
job_id = snippet['result']['handle']['id']
oozie_job = check_job_access_permission(self.request, job_id)
actions = oozie_job.get_working_actions()
for action in actions:
if action.externalId is not None:
jobs.append({
'name': action.externalId,
'url': reverse('jobbrowser.views.single_job', kwargs={'job': action.externalId}),
'started': action.startTime is not None,
'finished': action.endTime is not None
})
return jobs
def close_statement(self, snippet):
pass
def close_session(self, session):
pass
def _get_log_output(self, oozie_workflow):
log_output = ''
q = QueryDict(self.request.GET, mutable=True)
q['format'] = 'python' # Hack for triggering the good section in single_task_attempt_logs
self.request.GET = q
attempts = 0
max_attempts = 10
logs_found = False
while not logs_found and attempts < max_attempts:
logs, workflow_actions, is_really_done = get_workflow_logs(self.request, oozie_workflow, make_links=False,
log_start_pattern=self.LOG_START_PATTERN,
log_end_pattern=self.LOG_END_PATTERN)
if logs:
log_output = logs.values()[0]
if log_output.startswith('Unable to locate'):
LOG.debug('Failed to get job attempt logs, possibly due to YARN archiving job to JHS. Will sleep and try again.')
time.sleep(2.0)
else:
logs_found = True
attempts += 1
return log_output
def _get_results(self, log_output, action_type):
results = ''
if action_type == 'hive':
pattern = self.RESULTS_PATTERN
elif action_type == 'pig':
pattern = self.RESULTS_PATTERN_PIG
elif action_type == 'mapreduce':
pattern = self.RESULTS_PATTERN_MAPREDUCE
else:
pattern = self.RESULTS_PATTERN_GENERIC
re_results = re.compile(pattern, re.M | re.DOTALL)
if re_results.search(log_output):
results = re.search(re_results, log_output).group('results').strip()
return results
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 28 19:18:27 2016
@author: stuart
"""
import os
import os.path
import zipfile
from shutil import copyfile
import numpy as np
from analysis import Analysis
from ..reporting import data_sharing_reports as reports
from ..configuration.analysis_configuration import AnalysisConfiguration
from ..configuration.dataset_configuration import DatasetConfiguration
from ..configuration.base_configuration import Filter
from ..exceptions.handling import ExceptionHandler
from ..core.status import Status
import version as ver
class ShareAnalysis(Analysis):
def __init__(self, config):
Analysis.__init__(self, config)
self.pcwg_share_metrics_calc()
def calculate_power_deviation_matrices(self):
#speed optimisation (output power deviation matrices not required for PCWG-Share-X)
pass
def calculate_sensitivity_analysis(self):
#speed optimisation (sensitivity analysis not required for PCWG-Share-X)
pass
def calculate_scatter_metric(self):
#speed optimisation (scatter metric not required for PCWG-Share-X)
pass
def load_dataset(self, dataset_config, analysis_config):
power_filter = Filter(True, dataset_config.power, 'Below', False, 0.0)
dataset_config.filters.append(power_filter)
return Analysis.load_dataset(self, dataset_config, analysis_config)
def loadData(self, config):
Analysis.loadData(self, config)
self.auto_activate_corrections()
def auto_activate_corrections(self):
Status.add("Automatically activating corrections based on available data.")
save_conf = False
if self.hasDensity:
self.config.densityCorrectionActive = True
Status.add("Density Correction activated.")
save_conf = True
if self.hubTurbulence in self.dataFrame.columns:
self.config.turbRenormActive = True
Status.add("TI Renormalisation activated.")
save_conf = True
if self.rewsDefined:
self.config.rewsActive = True
Status.add("REWS activated.")
save_conf = True
if (type(self.config.specified_power_deviation_matrix.absolute_path) in (str, unicode)) and (len(self.config.specified_power_deviation_matrix.absolute_path) > 0):
self.config.powerDeviationMatrixActive = True
Status.add("PDM activated.")
save_conf = True
if save_conf:
self.config.save()
def pcwg_share_metrics_calc(self):
if self.powerCurveMode != "InnerMeasured":
raise Exception("Power Curve Mode must be set to Inner to export PCWG Sharing Initiative 1 Report.")
else:
self.calculate_pcwg_error_fields()
self.calculate_pcwg_overall_metrics()
self.calculate_pcwg_binned_metrics()
def calculate_pcwg_error_fields(self):
self.calculate_anonymous_values()
self.pcwgErrorBaseline = 'Baseline Error'
self.pcwgErrorCols = [self.pcwgErrorBaseline]
self.dataFrame[self.pcwgErrorBaseline] = self.dataFrame[self.hubPower] - self.dataFrame[self.actualPower]
if self.turbRenormActive:
self.pcwgErrorTurbRenor = 'TI Renormalisation Error'
self.dataFrame[self.pcwgErrorTurbRenor] = self.dataFrame[self.turbulencePower] - self.dataFrame[self.actualPower]
self.pcwgErrorCols.append(self.pcwgErrorTurbRenor)
if self.rewsActive:
self.pcwgErrorRews = 'REWS Error'
self.dataFrame[self.pcwgErrorRews] = self.dataFrame[self.rewsPower] - self.dataFrame[self.actualPower]
self.pcwgErrorCols.append(self.pcwgErrorRews)
if (self.turbRenormActive and self.rewsActive):
self.pcwgErrorTiRewsCombined = 'Combined TI Renorm and REWS Error'
self.dataFrame[self.pcwgErrorTiRewsCombined] = self.dataFrame[self.combinedPower] - self.dataFrame[self.actualPower]
self.pcwgErrorCols.append(self.pcwgErrorTiRewsCombined)
if self.powerDeviationMatrixActive:
self.pcwgErrorPdm = 'PDM Error'
self.dataFrame[self.pcwgErrorPdm] = self.dataFrame[self.powerDeviationMatrixPower] - self.dataFrame[self.actualPower]
self.pcwgErrorCols.append(self.pcwgErrorPdm)
self.powerCurveCompleteBins = self.powerCurve.powerCurveLevels.index[self.powerCurve.powerCurveLevels[self.dataCount] > 0]
self.number_of_complete_bins = len(self.powerCurveCompleteBins)
self.pcwgErrorValid = 'Baseline Power Curve WS Bin Complete'
self.dataFrame[self.pcwgErrorValid] = self.dataFrame[self.windSpeedBin].isin(self.powerCurveCompleteBins)
def calculate_pcwg_overall_metrics(self):
self.overall_pcwg_err_metrics = {}
NME, NMAE, data_count = self._calculate_pcwg_error_metric(self.pcwgErrorBaseline)
self.overall_pcwg_err_metrics[self.dataCount] = data_count
self.overall_pcwg_err_metrics['Baseline NME'] = NME
self.overall_pcwg_err_metrics['Baseline NMAE'] = NMAE
if self.turbRenormActive:
NME, NMAE, _ = self._calculate_pcwg_error_metric(self.pcwgErrorTurbRenor)
self.overall_pcwg_err_metrics['TI Renorm NME'] = NME
self.overall_pcwg_err_metrics['TI Renorm NMAE'] = NMAE
if self.rewsActive:
NME, NMAE, _ = self._calculate_pcwg_error_metric(self.pcwgErrorRews)
self.overall_pcwg_err_metrics['REWS NME'] = NME
self.overall_pcwg_err_metrics['REWS NMAE'] = NMAE
if (self.turbRenormActive and self.rewsActive):
NME, NMAE, _ = self._calculate_pcwg_error_metric(self.pcwgErrorTiRewsCombined)
self.overall_pcwg_err_metrics['REWS and TI Renorm NME'] = NME
self.overall_pcwg_err_metrics['REWS and TI Renorm NMAE'] = NMAE
if self.powerDeviationMatrixActive:
NME, NMAE, _ = self._calculate_pcwg_error_metric(self.pcwgErrorPdm)
self.overall_pcwg_err_metrics['PDM NME'] = NME
self.overall_pcwg_err_metrics['PDM NMAE'] = NMAE
def calculate_pcwg_binned_metrics(self):
reporting_bins = [self.normalisedWSBin, self.hourOfDay, self.calendarMonth, self.pcwgFourCellMatrixGroup, self.pcwgRange]
if self.hasDirection:
reporting_bins.append(self.pcwgDirectionBin)
self.binned_pcwg_err_metrics = {}
for bin_col_name in reporting_bins:
self.binned_pcwg_err_metrics[bin_col_name] = {}
self.binned_pcwg_err_metrics[bin_col_name][self.pcwgErrorBaseline] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorBaseline, bin_col_name)
if self.turbRenormActive:
self.binned_pcwg_err_metrics[bin_col_name][self.pcwgErrorTurbRenor] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorTurbRenor, bin_col_name)
if self.rewsActive:
self.binned_pcwg_err_metrics[bin_col_name][self.pcwgErrorRews] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorRews, bin_col_name)
if (self.turbRenormActive and self.rewsActive):
self.binned_pcwg_err_metrics[bin_col_name][self.pcwgErrorTiRewsCombined] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorTiRewsCombined, bin_col_name)
if self.powerDeviationMatrixActive:
self.binned_pcwg_err_metrics[bin_col_name][self.pcwgErrorPdm] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorPdm, bin_col_name)
#Using Inner and Outer range data only to calculate error metrics binned by normalised WS
bin_col_name = self.normalisedWSBin
for pcwg_range in ['Inner', 'Outer']:
dict_key = bin_col_name + ' ' + pcwg_range + ' Range'
self.binned_pcwg_err_metrics[dict_key] = {}
self.binned_pcwg_err_metrics[dict_key][self.pcwgErrorBaseline] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorBaseline, bin_col_name, pcwg_range = pcwg_range)
if self.turbRenormActive:
self.binned_pcwg_err_metrics[dict_key][self.pcwgErrorTurbRenor] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorTurbRenor, bin_col_name, pcwg_range = pcwg_range)
if self.rewsActive:
self.binned_pcwg_err_metrics[dict_key][self.pcwgErrorRews] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorRews, bin_col_name, pcwg_range = pcwg_range)
if (self.turbRenormActive and self.rewsActive):
self.binned_pcwg_err_metrics[dict_key][self.pcwgErrorTiRewsCombined] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorTiRewsCombined, bin_col_name, pcwg_range = pcwg_range)
if self.powerDeviationMatrixActive:
self.binned_pcwg_err_metrics[dict_key][self.pcwgErrorPdm] = self._calculate_pcwg_error_metric_by_bin(self.pcwgErrorPdm, bin_col_name, pcwg_range = pcwg_range)
def _calculate_pcwg_error_metric_by_bin(self, candidate_error, bin_col_name, pcwg_range = 'All'):
def sum_abs(x):
return x.abs().sum()
if pcwg_range == 'All':
grouped = self.dataFrame.loc[self.dataFrame[self.pcwgErrorValid], :].groupby(bin_col_name)
elif pcwg_range == 'Inner':
grouped = self.dataFrame.loc[np.logical_and(self.dataFrame[self.pcwgErrorValid], (self.dataFrame[self.pcwgRange] == 'Inner')), :].groupby(bin_col_name)
elif pcwg_range == 'Outer':
grouped = self.dataFrame.loc[np.logical_and(self.dataFrame[self.pcwgErrorValid], (self.dataFrame[self.pcwgRange] == 'Outer')), :].groupby(bin_col_name)
else:
raise Exception('Unrecognised pcwg_range argument %s passed to Analysis._calculate_pcwg_error_metric_by_bin() method. Must be Inner, Outer or All.' % pcwg_range)
agg = grouped.agg({candidate_error: ['sum', sum_abs, 'count'], self.actualPower: 'sum'})
agg.loc[:, (candidate_error, 'NME')] = agg.loc[:, (candidate_error, 'sum')] / agg.loc[:, (self.actualPower, 'sum')]
agg.loc[:, (candidate_error, 'NMAE')] = agg.loc[:, (candidate_error, 'sum_abs')] / agg.loc[:, (self.actualPower, 'sum')]
return agg.loc[:, candidate_error].drop(['sum', 'sum_abs'], axis = 1).rename(columns = {'count': self.dataCount})
def _calculate_pcwg_error_metric(self, candidate_error):
data_count = len(self.dataFrame.loc[self.dataFrame[self.pcwgErrorValid], candidate_error].dropna())
NME = (self.dataFrame.loc[self.dataFrame[self.pcwgErrorValid], candidate_error].sum() / self.dataFrame.loc[self.dataFrame[self.pcwgErrorValid], self.actualPower].sum())
NMAE = (np.abs(self.dataFrame.loc[self.dataFrame[self.pcwgErrorValid], candidate_error]).sum() / self.dataFrame.loc[self.dataFrame[self.pcwgErrorValid], self.actualPower].sum())
return NME, NMAE, data_count
class PcwgShare01Config(AnalysisConfiguration):
pcwg_inner_ranges = {'A': {'LTI': 0.08, 'UTI': 0.12, 'LSh': 0.05, 'USh': 0.25},
'B': {'LTI': 0.05, 'UTI': 0.09, 'LSh': 0.05, 'USh': 0.25},
'C': {'LTI': 0.1, 'UTI': 0.14, 'LSh': 0.1, 'USh': 0.3}}
def __init__(self, dataset, inner_range_id):
AnalysisConfiguration.__init__(self)
self.inner_range_id = inner_range_id
self.set_config_values(dataset)
def set_config_values(self, dataset):
self.powerCurveMinimumCount = 10
self.filterMode = "All"
self.baseLineMode = "Hub"
self.interpolationMode = self.get_interpolation_mode()
self.powerCurveMode = "InnerMeasured"
self.powerCurvePaddingMode = "Max"
self.nominalWindSpeedDistribution = None
self.powerCurveFirstBin = 1.0
self.powerCurveLastBin = 30.0
self.powerCurveBinSize = 1.0
self.innerRangeLowerTurbulence = PcwgShare01Config.pcwg_inner_ranges[self.inner_range_id]['LTI']
self.innerRangeUpperTurbulence = PcwgShare01Config.pcwg_inner_ranges[self.inner_range_id]['UTI']
self.innerRangeLowerShear = PcwgShare01Config.pcwg_inner_ranges[self.inner_range_id]['LSh']
self.innerRangeUpperShear = PcwgShare01Config.pcwg_inner_ranges[self.inner_range_id]['USh']
self.specifiedPowerCurve = None
self.densityCorrectionActive = False
self.turbRenormActive = False
self.rewsActive = False
self.powerDeviationMatrixActive = False
self.specified_power_deviation_matrix.absolute_path = os.getcwd() + os.sep + 'Data' + os.sep + 'HypothesisMatrix.xml'
self.datasets.append_absolute(dataset.path)
def get_interpolation_mode(self):
return "Cubic"
class PcwgShare01dot1Config(PcwgShare01Config):
def get_interpolation_mode(self):
return "Marmander"
class PcwgShare01:
MINIMUM_COMPLETE_BINS = 10
def __init__(self, dataset, output_zip):
self.dataset = dataset
self.calculate()
if self.success:
self.export_report(output_zip)
else:
Status.add("Calculation unsuccessful. No results to export.", red = True)
def calculate(self):
self.analysis, self.success = self.calculate_best_inner_range()
def calculate_best_inner_range(self):
successes = 0
for inner_range_id in PcwgShare01Config.pcwg_inner_ranges:
analysis, success = self.attempt_calculation(self.dataset, inner_range_id)
if success:
successes += 1
if self._is_sufficient_complete_bins(analysis):
Status.add("Inner Range {0} Selected with {1} complete bins.".format(inner_range_id, analysis.number_of_complete_bins))
return (analysis, True)
if successes < 1:
Status.add("No successful calculation for any inner range")
return (None, False)
else:
Status.add("No successful calculation for any inner range (insufficient complete bins).")
return (None, False)
def _is_sufficient_complete_bins(self, analysis):
#Todo refine to be fully consistent with PCWG-Share-01 definition document
if analysis.number_of_complete_bins >= PcwgShare01.MINIMUM_COMPLETE_BINS:
return True
else:
return False
def new_config(self, dataset, inner_range_id):
return PcwgShare01Config(dataset, inner_range_id)
def attempt_calculation(self, dataset, inner_range_id):
temp_path = "temp_config.xml"
config = self.new_config(dataset, inner_range_id)
config.save(temp_path)
Status.add("Attempting PCWG analysis using Inner Range definition %s." % inner_range_id)
try:
analysis = ShareAnalysis(config)
if not self._is_sufficient_complete_bins(analysis):
raise Exception('Insufficient complete power curve bins')
os.remove(temp_path)
Status.add("Analysis success using Inner Range definition %s." % inner_range_id)
return (analysis, True)
except ExceptionHandler.ExceptionType as e:
Status.add(str(e), red = True)
os.remove(temp_path)
Status.add("Analysis failed using Inner Range definition %s." % inner_range_id, red = True)
return (None, False)
def export_report(self, output_zip):
if self.analysis == None:
Status.add("ERROR: Analysis not yet calculated", red = True)
return
if not self.analysis.hasActualPower or not self.analysis.config.turbRenormActive:
Status.add("ERROR: Anonymous report can only be generated if analysis has actual power and turbulence renormalisation is active.", red = True)
return
try:
self.analysis.pcwg_share_metrics_calc()
if not self._is_sufficient_complete_bins(self.analysis):
Status.add('Insufficient complete power curve bins', red = True)
else:
temp_file_name = "{0}.xls".format(self.analysis.uniqueAnalysisId)
Status.add("Exporting results to {0}".format(temp_file_name))
self.pcwg_data_share_report(output_fname = temp_file_name)
Status.add("Report written to {0}".format(temp_file_name))
Status.add("Adding {0} to output zip.".format(temp_file_name))
output_zip.write(temp_file_name)
Status.add("{0} added to output zip.".format(temp_file_name))
Status.add("Deleting {0}.".format(temp_file_name))
os.remove(temp_file_name)
except ExceptionHandler.ExceptionType as e:
Status.add("ERROR Exporting Report: %s" % e, red = True)
def pcwg_data_share_report(self, output_fname):
rpt = reports.pcwg_share1_rpt(self.analysis, template = "Share_1_template.xls", version = ver.version, output_fname = output_fname, pcwg_inner_ranges = PcwgShare01Config.pcwg_inner_ranges)
rpt.report()
return rpt
class PcwgShare01dot1(PcwgShare01):
def new_config(self, dataset, inner_range_id):
return PcwgShare01dot1Config(dataset, inner_range_id)
class BaseSharePortfolio(object):
def __init__(self, portfolio_configuration):
Status.add("Running Portfolio: {0}".format(self.share_name()))
self.portfolio_path = portfolio_configuration.path
self.results_base_path = os.path.join(os.path.dirname(self.portfolio_path), self.portfolio_path.split('/')[-1].split('.')[0])
self.portfolio = portfolio_configuration
self.calculate()
def share_name(self):
raise Exception("Not implemented")
def calculate(self):
Status.add("Running portfolio: {0}".format(self.portfolio_path))
self.shares = []
zip_file = "{0} ({1}).zip".format(self.results_base_path, self.share_name())
summary_file = "{0} ({1}).xls".format(self.results_base_path, self.share_name())
if os.path.exists(zip_file):
os.remove(zip_file)
if os.path.exists(summary_file):
os.remove(summary_file)
Status.add("Detailed results will be stored in: {0}".format(zip_file))
Status.add("Summary results will be stored in: {0}".format(summary_file))
with zipfile.ZipFile(zip_file, 'w') as output_zip:
for index, item in enumerate(self.portfolio.datasets):
Status.add("Loading dataset {0}".format(index + 1))
dataset = DatasetConfiguration(item.absolute_path)
Status.add("Dataset {0} loaded = ".format(index + 1, dataset.name))
Status.add("Verifying dataset {0}".format(dataset.name))
if self.verify_share_configs(dataset) == False:
Status.add("Dataset Verification Failed for {0}".format(dataset.name), red=True)
else:
Status.add("Dataset {0} Verified".format(dataset.name))
Status.add("Running: {0}".format(dataset.name))
share = self.new_share(dataset, output_zip)
if share.success:
self.shares.append(share)
if len(self.shares) < 1:
Status.add("No successful results to summarise")
self.report_summary(summary_file, output_zip)
Status.add("Portfolio Run Complete")
def verify_share_configs(self, config):
valid = True
if self.is_invalid_float(config.cutInWindSpeed):
Status.add("Cut in wind speed not defined", red=True)
valid = False
if self.is_invalid_float(config.cutOutWindSpeed):
Status.add("Cut out wind speed not defined", red=True)
valid = False
if self.is_invalid_float(config.ratedPower):
Status.add("Rated Power not defined", red=True)
valid = False
if self.is_invalid_float(config.diameter):
Status.add("Diameter not defined", red=True)
valid = False
if self.is_invalid_float(config.hubHeight):
Status.add("Hub height not defined", red=True)
valid = False
if self.is_invalid_string(config.power):
Status.add("Power not defined", red=True)
valid = False
if self.is_invalid_string(config.hubTurbulence):
if self.is_invalid_string(config.referenceWindSpeedStdDev):
Status.add("No turbulence defined", red=True)
valid = False
if self.is_invalid_list(config.referenceShearMeasurements):
Status.add("No shear defined", red=True)
valid = False
return valid
def is_invalid_list(self, value):
if value is None:
return True
if len(value) < 1:
return True
return False
def is_invalid_string(self, value):
if value is None:
return True
if len(value.strip()) < 1:
return True
return False
def is_invalid_float(self, value):
if value is None:
return True
return False
def report_summary(self, summary_file, output_zip):
Status.add("Exporting results to {0}".format(summary_file))
report = reports.PortfolioReport()
report.report(self.shares, summary_file)
Status.add("Report written to {0}".format(summary_file))
summary_file_for_zip = "Summary.xls"
if os.path.isfile(summary_file_for_zip):
os.remove(summary_file_for_zip)
Status.add("Copying to {0}".format(summary_file_for_zip))
copyfile(summary_file, summary_file_for_zip)
Status.add("Adding {0} to output zip.".format(summary_file_for_zip))
output_zip.write(summary_file_for_zip)
Status.add("{0} added to output zip.".format(summary_file_for_zip))
Status.add("Deleting {0}".format(summary_file_for_zip))
os.remove(summary_file_for_zip)
def new_share(self, dataset, output_zip):
raise Exception("Not implemented")
class PcwgShare01Portfolio(BaseSharePortfolio):
def __init__(self, portfolio_configuration):
BaseSharePortfolio.__init__(self, portfolio_configuration)
def new_share(self, dataset, output_zip):
return PcwgShare01(dataset, output_zip = output_zip)
def share_name(self):
return "PCWG-Share-01"
class PcwgShare01dot1Portfolio(BaseSharePortfolio):
def __init__(self, portfolio_configuration):
BaseSharePortfolio.__init__(self, portfolio_configuration)
def share_name(self):
return "PCWG-Share-01.1"
def new_share(self, dataset, output_zip):
return PcwgShare01dot1(dataset, output_zip = output_zip)
| |
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
import httplib as http
from django.utils import timezone
from django.core.exceptions import ValidationError
from requests.exceptions import SSLError
from flask import request
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from addons.base import generic_views
from addons.dataverse import client
from addons.dataverse.models import DataverseProvider
from addons.dataverse.settings import DEFAULT_HOSTS
from addons.dataverse.serializer import DataverseSerializer
from dataverse.exceptions import VersionJsonNotFoundError
from website.oauth.models import ExternalAccount
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_contributor_or_public
)
from website.util import rubeus, api_url_for
from website.util.sanitize import assert_clean
SHORT_NAME = 'dataverse'
FULL_NAME = 'Dataverse'
dataverse_account_list = generic_views.account_list(
SHORT_NAME,
DataverseSerializer
)
dataverse_import_auth = generic_views.import_auth(
SHORT_NAME,
DataverseSerializer
)
dataverse_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
dataverse_get_config = generic_views.get_config(
SHORT_NAME,
DataverseSerializer
)
## Auth ##
@must_be_logged_in
def dataverse_user_config_get(auth, **kwargs):
"""View for getting a JSON representation of the logged-in user's
Dataverse user settings.
"""
user_addon = auth.user.get_addon('dataverse')
user_has_auth = False
if user_addon:
user_has_auth = user_addon.has_auth
return {
'result': {
'userHasAuth': user_has_auth,
'urls': {
'create': api_url_for('dataverse_add_user_account'),
'accounts': api_url_for('dataverse_account_list'),
},
'hosts': DEFAULT_HOSTS,
},
}, http.OK
## Config ##
@must_be_logged_in
def dataverse_add_user_account(auth, **kwargs):
"""Verifies new external account credentials and adds to user's list"""
user = auth.user
provider = DataverseProvider()
host = request.json.get('host').rstrip('/')
api_token = request.json.get('api_token')
# Verify that credentials are valid
client.connect_or_error(host, api_token)
# Note: `DataverseSerializer` expects display_name to be a URL
try:
provider.account = ExternalAccount(
provider=provider.short_name,
provider_name=provider.name,
display_name=host, # no username; show host
oauth_key=host, # hijacked; now host
oauth_secret=api_token, # hijacked; now api_token
provider_id=api_token, # Change to username if Dataverse allows
)
provider.account.save()
except ValidationError:
# ... or get the old one
provider.account = ExternalAccount.objects.get(
provider=provider.short_name,
provider_id=api_token
)
if not user.external_accounts.filter(id=provider.account.id).exists():
user.external_accounts.add(provider.account)
user_addon = auth.user.get_addon('dataverse')
if not user_addon:
user.add_addon('dataverse')
user.save()
# Need to ensure that the user has dataverse enabled at this point
user.get_or_add_addon('dataverse', auth=auth)
user.save()
return {}
@must_have_permission('write')
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def dataverse_set_config(node_addon, auth, **kwargs):
"""Saves selected Dataverse and dataset to node settings"""
user_settings = node_addon.user_settings
user = auth.user
if user_settings and user_settings.owner != user:
raise HTTPError(http.FORBIDDEN)
try:
assert_clean(request.json)
except AssertionError:
# TODO: Test me!
raise HTTPError(http.NOT_ACCEPTABLE)
alias = request.json.get('dataverse', {}).get('alias')
doi = request.json.get('dataset', {}).get('doi')
if doi is None or alias is None:
return HTTPError(http.BAD_REQUEST)
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
dataset = client.get_dataset(dataverse, doi)
node_addon.set_folder(dataverse, dataset, auth)
return {'dataverse': dataverse.title, 'dataset': dataset.title}, http.OK
@must_have_permission('write')
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
def dataverse_get_datasets(node_addon, **kwargs):
"""Get list of datasets from provided Dataverse alias"""
alias = request.json.get('alias')
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, alias)
datasets = client.get_datasets(dataverse)
ret = {
'alias': alias, # include alias to verify dataset container
'datasets': [{'title': dataset.title, 'doi': dataset.doi} for dataset in datasets],
}
return ret, http.OK
## Crud ##
@must_have_permission('write')
@must_not_be_registration
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
def dataverse_publish_dataset(node_addon, auth, **kwargs):
node = node_addon.owner
publish_both = request.json.get('publish_both', False)
now = timezone.now()
connection = client.connect_from_settings_or_401(node_addon)
dataverse = client.get_dataverse(connection, node_addon.dataverse_alias)
dataset = client.get_dataset(dataverse, node_addon.dataset_doi)
if publish_both:
client.publish_dataverse(dataverse)
client.publish_dataset(dataset)
# Add a log
node.add_log(
action='dataverse_dataset_published',
params={
'project': node.parent_id,
'node': node._id,
'dataset': dataset.title,
},
auth=auth,
log_date=now,
)
return {'dataset': dataset.title}, http.OK
## HGRID ##
def _dataverse_root_folder(node_addon, auth, **kwargs):
node = node_addon.owner
default_version = 'latest-published'
version = 'latest-published' if not node.can_edit(auth) else default_version
# Quit if no dataset linked
if not node_addon.complete:
return []
can_edit = node.can_edit(auth)
permissions = {
'edit': can_edit and not node.is_registration,
'view': node.can_view(auth)
}
try:
connection = client.connect_from_settings(node_addon)
dataverse = client.get_dataverse(connection, node_addon.dataverse_alias)
dataset = client.get_dataset(dataverse, node_addon.dataset_doi)
except SSLError:
return [rubeus.build_addon_root(
node_addon,
node_addon.dataset,
permissions=permissions,
private_key=kwargs.get('view_only', None),
)]
# Quit if doi does not produce a dataset
if dataset is None:
return []
published_files = client.get_files(dataset, published=True)
# Produce draft version or quit if no published version is available
if not published_files:
if can_edit:
version = 'latest'
else:
return []
urls = {
'publish': node.api_url_for('dataverse_publish_dataset'),
}
# determine if there are any changes between the published and draft
# versions of the dataset
try:
dataset.get_metadata('latest-published')
dataset_is_published = True
dataset_draft_modified = dataset.get_state() == 'DRAFT'
except VersionJsonNotFoundError:
dataset_is_published = False
dataset_draft_modified = True
# Get the dataverse host
# (stored in oauth_key because dataverse doesn't use that)
dataverse_host = node_addon.external_account.oauth_key
return [rubeus.build_addon_root(
node_addon,
node_addon.dataset,
urls=urls,
permissions=permissions,
dataset=node_addon.dataset,
doi=dataset.doi,
dataverse=dataverse.title,
hasPublishedFiles=bool(published_files),
dataverseIsPublished=dataverse.is_published,
datasetIsPublished=dataset_is_published,
datasetDraftModified=dataset_draft_modified,
version=version,
host=dataverse_host,
private_key=kwargs.get('view_only', None),
)]
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_root_folder(node_addon, auth, **kwargs):
return _dataverse_root_folder(node_addon, auth=auth)
## Widget ##
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_widget(node_addon, **kwargs):
node = node_addon.owner
widget_url = node.api_url_for('dataverse_get_widget_contents')
ret = {
'complete': node_addon.complete,
'widget_url': widget_url,
}
ret.update(node_addon.config.to_json())
return ret, http.OK
@must_be_contributor_or_public
@must_have_addon(SHORT_NAME, 'node')
def dataverse_get_widget_contents(node_addon, **kwargs):
data = {
'connected': False,
}
if not node_addon.complete:
return {'data': data}, http.OK
doi = node_addon.dataset_doi
alias = node_addon.dataverse_alias
connection = client.connect_from_settings_or_401(node_addon)
dataverse = client.get_dataverse(connection, alias)
dataset = client.get_dataset(dataverse, doi)
if dataset is None:
return {'data': data}, http.BAD_REQUEST
dataverse_host = node_addon.external_account.oauth_key
dataverse_url = 'http://{0}/dataverse/{1}'.format(dataverse_host, alias)
dataset_url = 'https://doi.org/' + doi
data.update({
'connected': True,
'dataverse': node_addon.dataverse,
'dataverseUrl': dataverse_url,
'dataset': node_addon.dataset,
'doi': doi,
'datasetUrl': dataset_url,
'citation': dataset.citation,
})
return {'data': data}, http.OK
| |
"""
This file is part of PUQ
Copyright (c) 2013-2106 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
import sys
import puq
import numpy as np
import copy
import pymc
from scipy.stats.kde import gaussian_kde
class Calibrate(object):
"""
Bayesian calibration of variables.
Args:
model(string or function): An expression or function.
The standard python math functions defined at
https://docs.python.org/2/library/math.html are implemented.
'pi' and 'e' are not defined.
cvars(dict): Dictionary containing calibration variable
prior PDFs and calibration types. Example:
{
'a': {'prior': 'Uniform(5,20)', 'type': 'D'},
'b': {'prior': 'Normal(1,100)', 'type': 'S'},
}
nvars(dict): A dictionary of variable names
and data arrays representing observed values.
Example: {'x': xdata, 'y': ydata} where
the data arrays are nx2 (two- column) with the first
column values and the second column deviations (measurement errors).
outvar(dict): Dictionary with one entry containing the
observed output data and uncertainty as a two-column array.
Returns: A Calibrate object. You should use it to call the run method
to do the actual MCMC calibration.
-------------------
Prior PDFs:
Normal(mean,dev) : For example, "Normal(100,1)"
Uniform(min,max) : For example, "Uniform(100,120)"
Calibration Types:
'D': Deterministic. Calibrate for a fixed value.
'S': Stochastic. Calibrate the variable mean and deviation.
"""
# model can be response surface, equation, python function
def __init__(self, model, cvars, nvars, outvar, MAP='fmin_powell'):
out_var_name = list(outvar.keys())[0]
if callable(model):
# model is a python function
self.model = model
else:
try:
if model.find('=') != -1:
m = model.split('=')
if m[0].strip() == out_var_name:
model = m[1].strip()
elif m[1].strip() == out_var_name:
model = m[0].strip()
else:
raise
model = sympy.sympify(model, _clash)
except:
err = "Model expression must be of form 'varname=expression'\
and varname must have measured data in the data table."
raise ValueError(err)
# find our variable names
val_names = list(map(str, model.free_symbols))
# all variables in the model must be defined
if set(val_names) != set(nvars.keys()).union(set(cvars.keys())):
missing = list(set(val_names).difference(set(nvars.keys()).union(set(cvars.keys()))))
if missing == []:
errstr = 'Error: Model did not use all parameters'
else:
errstr = "Error: Not all parameters in the model were defined."
errstr += "\'%s\' not defined." % missing[0]
raise ValueError(errstr)
# turn our symbolic expression into a fast, safe function call
self.model = lambdify(model.free_symbols, model, dummify=False, modules=['numpy', 'mpmath', 'sympy'])
out_var = outvar[out_var_name]
var = {}
means = {}
devs = {}
dlen = out_var.shape[0]
self.num_samples = 100000
self.num_burn = 20000
self.num_thin = 8
# Calibration variables
for v in cvars.keys():
v = str(v) # pymc doesn't like unicode
# convert to lowercase and parse
d = cvars[v]['prior'].lower().replace('(', ',').replace(')', '').split(',')
d[1] = float(d[1])
d[2] = float(d[2])
if cvars[v]['type'] == 'S':
if d[0] == 'normal':
# normal prior with mean d[1] and deviation d[2]
means[v] = pymc.Normal(v + '_mean', mu=d[1], tau=1 / d[2] ** 2, value=d[1])
values = np.linspace(d[1] - 3 * d[2], d[1] + 3 * d[2], out_var.shape[0])
dval = d[2]
elif d[0] == 'uniform':
# uniform prior from d[1] to d[2]
means[v] = pymc.Uniform(v + '_mean', d[1], d[2], value=(d[1] + d[2]) / 2.0)
values = np.linspace(d[1], d[2], out_var.shape[0])
dval = 1
else:
start_val = (d[1] + d[2]) / 2.0
values = np.linspace(d[1], d[2], out_var.shape[0])
dval = 1
means[v] = pymc.Stochastic(name=v + '_mean',
logp=lambda value: -np.log(value),
doc='',
parents={},
value=start_val)
# Jeffrey prior for deviation
devs[v] = pymc.Stochastic(name=v + '_dev',
logp=lambda value: -np.log(value),
doc='',
parents={},
value=dval)
# to get a reliable deviation, we need more samples
if self.num_samples < 1000000:
self.num_samples *= 10
self.num_burn *= 10
self.num_thin *= 10
# create a stochastic node for pymc with the mean and
# dev from above and some initial values to try
var[v] = pymc.Normal(v, mu=means[v], tau=1.0 / devs[v] ** 2, value=values)
else:
if d[0] == 'normal':
var[v] = pymc.Normal(v, mu=d[1], tau=1 / d[2] ** 2)
elif d[0] == 'uniform':
var[v] = pymc.Uniform(v, lower=d[1], upper=d[2])
elif d[0] == 'jeffreys':
start_val = (d[1] + d[2]) / 2.0
var[v] = pymc.Stochastic(name=v, doc='',
logp=lambda value: -np.log(value),
parents={},
value=start_val)
else:
print('Unknown probability distribution: %s' % d[0])
return None
for v in nvars.keys():
var[v] = nvars[v][:, 0]
results = pymc.Deterministic(
eval=self.model,
name='results',
parents=var,
doc='',
trace=True,
verbose=0,
dtype=float,
plot=False,
cache_depth=2)
mdata = out_var[:, 0]
mdata_err = out_var[:, 1]
mcmc_model_out = pymc.Normal('model_out', mu=results, tau=1.0 / mdata_err ** 2, value=mdata, observed=True)
self.mcmc_model = pymc.Model(list(var.values()) + list(means.values()) + list(devs.values()) + [mcmc_model_out])
if MAP is not None:
# compute MAP and use that as start for MCMC
map_ = pymc.MAP(self.mcmc_model)
map_.fit(method=MAP)
print('\nmaximum a posteriori (MAP) using', MAP)
for v in cvars.keys():
print('%s=%s' % (v, var[v].value))
print()
# NOT calibration variables
for v in nvars.keys():
data = nvars[v][:, 0]
err = nvars[v][:, 1]
if np.all(err <= 0.0):
var[v] = data
else:
err[err == 0] = 1e-100
# norm_err = pymc.Normal(v + '_err', mu=0, tau=1.0 / err ** 2)
# var[v] = data + norm_err
var[v] = pymc.Normal(v + '_err', mu=data, tau=1.0 / err ** 2)
results = pymc.Deterministic(
eval=self.model,
name='results',
parents=var,
doc='',
trace=True,
verbose=0,
dtype=float,
plot=False,
cache_depth=2)
self.mcmc_model = pymc.Model(list(var.values()) + list(means.values()) + list(devs.values()) + [mcmc_model_out])
self.cvars = cvars
self.var = var
self.dlen = dlen
self.means = means
self.devs = devs
def run(self, samples=None, progress=True):
"""
Perform MCMC calibration. Returns pdfs and MCMC traces.
Args:
samples(integer or tuple): A tuple containing the
number of samples, the number to burn, and the number to thin.
If samples is an integer, burn will be 20% of the samples and
thin will be 8. Default will use between 10000 and 1000000
samples, depending on the number of stochastic variables
being calibrated.
progress(boolean): If True, will display a progress bar.
Returns(tuple):
Returns a tuple containing cvars and a pdf.
cvars is modified to include key 'trace'
which will be an array. It will also have a key 'pdf' which
will be a PDF function. For GAUSSIAN type, it will also
include traces 'mtrace' and 'dtrace' and 'jpdf' corresponding
to the mean and deviation traces and the joint PDF.
"""
if samples is None:
num_samples = self.num_samples
num_burn = self.num_burn
num_thin = self.num_thin
else:
if type(samples) == tuple:
if len(samples) != 3:
raise ValueError("Error: samples should be a number or tuple of length 3.")
num_samples, num_burn, num_thin = samples
else:
num_samples = samples
num_burn = int(samples * 0.20)
num_thin = 8
Calibrate.mcmc = pymc.MCMC(self.mcmc_model)
Calibrate.mcmc.sample(
iter=num_samples,
burn=num_burn,
thin=num_thin,
tune_interval=10000,
tune_throughout=True,
progress_bar=progress)
if Calibrate.mcmc is None:
return None
for v in self.cvars.keys():
t = self.var[v].trace[:]
if len(t.shape) == 2:
self.cvars[v]['ntraces'] = t.shape[1]
else:
self.cvars[v]['ntraces'] = 1
self.cvars[v]['trace'] = t.ravel()
for v in self.means.keys():
self.cvars[v]['mtrace'] = self.means[v].trace[:]
self.cvars[v]['dtrace'] = self.devs[v].trace[:]
# collect all the independent variables and compute KDE
col_count = max([self.cvars[v]['ntraces'] for v in self.cvars])
for cv in self.cvars.keys():
if self.cvars[cv]['type'] == 'S':
data = np.column_stack((self.cvars[cv]['dtrace'], self.cvars[cv]['mtrace']))
try:
self.cvars[cv]['jpdf'] = gaussian_kde(data.T)
except:
self.cvars[cv]['jpdf'] = None
# multidimensional traces get flattened and others
# get repeated to match size.
if self.cvars[cv]['ntraces'] == col_count:
n = 1
else:
n = col_count
try:
self.cvars[cv]['pdf'] = gaussian_kde(self.cvars[cv]['trace'].ravel())
except:
self.cvars[cv]['pdf'] = None
self.cvars[cv]['trace'] = self.cvars[cv]['trace'].ravel().repeat(n)
data = np.column_stack([self.cvars[v]['trace'] for v in sorted(self.cvars.keys())])
try:
k = gaussian_kde(data.T)
except:
k = None
return (self.cvars, k)
def calibrate(params, caldata, err, func, num_samples=None):
"""
Performs Bayesian calibration for a variable or variables. This
function provides a convenient interface for use with PUQ.
Args:
params (list): PUQ input parameters. Parameters which have caldata
included are not calibrated. Parameters without caldata must
have a caltype.
caldata (list or array): Experimental output values.
err (float or array of floats): Standard deviation of the measurement
error in **caldata**. If this is a scalar, then use the same error
for every data point.
func: Response function.
num_samples: A tuple containing the number of samples, the number
to burn, and the number to thin. If samples is an integer, burn
will be 20% of the samples and thin will be 8. Default will use
between 10000 and 1000000 samples, depending on the number of
stochastic variables being calibrated.
Returns:
A copy of **params** modified with the calibrated variables.
"""
print("Performing Bayesian Calibration...")
cvars = {}
nvars = {}
ovar = {}
err = np.broadcast_to(err, caldata.shape)
ovar['out'] = np.column_stack((caldata, err))
# For each PUQ parameter
for p in params:
p.name = str(p.name)
if hasattr(p, 'caldata') and p.caldata is not None:
# noncalibration parameter with measurements and errors
if len(p.caldata.shape) != 2:
print('\nWarning: caldata for %s should have two columns.' % p.name)
print('Column 1 is the data and column 2 is the error.')
print("Assuming error is zero and continuing.\n")
err = np.broadcast_to(0, p.caldata.shape)
p.caldata = np.column_stack((p.caldata, err))
nvars[p.name] = p.caldata
else:
if isinstance(p, puq.NormalParameter):
prior = 'Normal(%s,%s)' % (p.pdf.mean, p.pdf.dev)
elif isinstance(p, puq.UniformParameter):
prior = 'Uniform(%s,%s)' % (p.pdf.range[0], p.pdf.range[1])
else:
print('\nWARNING: Only Normal and Uniform priors are currently supported.')
print('\tContinuing using Uniform\n')
prior = 'Uniform(%s,%s)' % (p.pdf.range[0], p.pdf.range[1])
cvars[p.name] = {'prior': prior, 'type': p.caltype}
c = Calibrate(func, cvars, nvars, ovar)
cvars, kpdf = c.run(samples=num_samples)
newparams = copy.copy(params)
for i, p in enumerate(newparams):
if hasattr(p, 'caldata') and p.caldata is not None:
continue
vals = cvars[p.name]['trace']
print("Calibrated %s to a PDF with mean=%s and dev=%s" % (p.name, np.mean(vals), np.std(vals)))
pdf = puq.ExperimentalPDF(vals, fit=True)
newparams[i] = puq.CustomParameter(newparams[i].name,
newparams[i].description,
pdf=pdf,
use_samples=True)
try:
newparams[i].values = copy.copy(p.values)
except:
pass
newparams[i].trace = vals
newparams[i].original_parameter = p
return newparams, kpdf
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declares some predicates useful for expressing IF/AND/OR conditions."""
from .predicate import ValuePredicate
from .sequenced_predicate_result import SequencedPredicateResult
class ConjunctivePredicate(ValuePredicate):
"""A ValuePredicate that calls a sequence of predicates until one fails."""
@property
def predicates(self):
"""The list of predicates that are ANDed together."""
return self.__conjunction
def __init__(self, conjunction, **kwargs):
super(ConjunctivePredicate, self).__init__(**kwargs)
self.__conjunction = [] + conjunction # Elements are ValuePredicate
def append(self, pred):
"""Adds predicate to the conjunction."""
self.__conjunction.append(pred)
def __str__(self):
return ' AND '.join([str(c) for c in self.__conjunction])
def __repr__(self):
return ' AND '.join([repr(c) for c in self.__conjunction])
def __eq__(self, pred):
return (self.__class__ == pred.__class__
and self.predicates == pred.predicates)
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
snapshot.edge_builder.make(entity, 'Conjunction', self.__conjunction,
join='AND')
def __call__(self, context, value):
everything = []
valid = True
for pred in self.__conjunction:
result = pred(context, value)
everything.append(result)
if not result:
valid = False
break
return SequencedPredicateResult(
valid=valid, pred=self, results=everything)
class DisjunctivePredicate(ValuePredicate):
"""A ValuePredicate that calls a sequence of predicates until one succeeds."""
@property
def predicates(self):
"""The list of predicates that are ORed together."""
return self.__disjunction
def __init__(self, disjunction, **kwargs):
super(DisjunctivePredicate, self).__init__(**kwargs)
self.__disjunction = [] + disjunction # Elements are ValuePredicate
def __str__(self):
return ' OR '.join([str(c) for c in self.__disjunction])
def __repr__(self):
return ' OR '.join([repr(c) for c in self.__disjunction])
def __eq__(self, pred):
return (self.__class__ == pred.__class__
and self.predicates == pred.predicates)
def append(self, pred):
"""Adds predicate to the disjunction."""
self.__disjunction.append(pred)
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
snapshot.edge_builder.make(entity, 'Disjunction', self.__disjunction,
join='OR')
def __call__(self, context, value):
everything = []
valid = False
for pred in self.__disjunction:
result = pred(context, value)
everything.append(result)
if result:
valid = True
break
return SequencedPredicateResult(
valid=valid, pred=self, results=everything)
class NegationPredicate(ValuePredicate):
"""A ValuePredicate that negates another predicate."""
@property
def predicate(self):
"""The list of predicates that are NOTed together."""
return self.__pred
def __init__(self, pred, **kwargs):
super(NegationPredicate, self).__init__(**kwargs)
self.__pred = pred
def __str__(self):
return 'NOT ({0})'.format(self.__pred)
def __repr__(self):
return 'NOT ({0!r})'.format(self.__pred)
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.__pred == other.predicate)
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
snapshot.edge_builder.make_mechanism(entity, 'Not', self.__pred)
def __call__(self, context, value):
base_result = self.__pred(context, value)
return SequencedPredicateResult(
valid=not base_result.valid, pred=self, results=[base_result])
class ConditionalPredicate(ValuePredicate):
"""A ValuePredicate that implements IF/THEN.
A conditional has an optional ELSE clause.
If the else clause is provided then the IF condition is evaluated
then either the THEN or ELSE condition depending on whether the IF was valid
or not. The final validity is the result of that second (THEN or ELSE)
result.
If the ELSE clause is not provided, then the condition evaluates as
NOT IF or THEN (transforming the expression using demorgan's law).
"""
@property
def if_predicate(self):
"""The predicate forming the IF condition."""
return self.__if_pred
@property
def then_predicate(self):
"""The predicate forming the THEN clause."""
return self.__then_pred
@property
def else_predicate(self):
"""The predicate forming the ELSE clause."""
return self.__then_pred
def __init__(self, if_predicate, then_predicate, else_predicate=None,
**kwargs):
"""Constructs an if/then clause.
Args:
if_predicate: The ValuePredicate acting as the antecedent
then_predicate: The ValuePredicate acting as the consequent
is only executed when the if_predicate returns true.
else_predicate: The ValuePredicate is only executed when the
if_predicate returns false, if any is provided.
See the base class (ValuePredicate) for additional kwargs.
"""
self.__if_pred = if_predicate
self.__then_pred = then_predicate
self.__else_pred = else_predicate
self.__demorgan_pred = None # If else is None, this is Demogans Law.
if not else_predicate:
# The clause is implemented using DeMorgan's law.
self.__demorgan_pred = DisjunctivePredicate(
[NegationPredicate(if_predicate), then_predicate])
super(ConditionalPredicate, self).__init__(**kwargs)
def __str__(self):
return 'IF ({0}) THEN ({1})'.format(self.__if_pred, self.__then_pred)
def __repr__(self):
return 'IF ({0!r}) THEN ({1!r})'.format(self.__if_pred, self.__then_pred)
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.__if_pred == other.if_predicate
and self.__then_pred == other.then_predicate
and self.__else_pred == other.else_predicate)
def export_to_json_snapshot(self, snapshot, entity):
"""Implements JsonSnapshotableEntity interface."""
snapshot.edge_builder.make_mechanism(entity, 'If', self.__if_pred)
snapshot.edge_builder.make_mechanism(entity, 'Then', self.__then_pred)
if self.__else_pred:
snapshot.edge_builder.make_mechanism(entity, 'Else', self.__else_pred)
def __call__(self, context, value):
if self.__demorgan_pred:
return self.__demorgan_pred(context, value)
# Run the "if" predicate
# then, depending on the result, run either "then" or "else" predicate.
result = self.__if_pred(context, value)
tried = [result]
if result:
result = self.__then_pred(context, value)
else:
result = self.__else_pred(context, value)
tried.append(result)
return SequencedPredicateResult(
valid=result.valid, pred=self, results=tried)
AND = ConjunctivePredicate
OR = DisjunctivePredicate
NOT = NegationPredicate
IF = ConditionalPredicate
| |
"""
Running in dev:
python app.py
If you want to create a new db when you run:
DROP_CREATE=true python app.py
"""
from flask import Flask, render_template, jsonify, request
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.schema import Index
from datetime import datetime
from os import environ
from json import loads, dumps
app = Flask(__name__)
env_db = environ.get('DATABASE_URL')
drop_and_create_db = environ.get('DROP_CREATE')
if env_db:
app.config['SQLALCHEMY_DATABASE_URI'] = env_db
drop_and_create_db = False
else:
# just assume dev, who cares
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
TIME_FORMAT = "%a, %d %b %Y %H:%M:%S %Z"
# API
def render(content):
'''Render inner template into base'''
return render_template("base.html", content=content)
@app.route("/")
def add_form():
'''Render the map view to add a report'''
key = "AIzaSyAF_o9iMtsGlET7yYVhAWoLFsRGBU9ge4o"
content = render_template("form.html", key=key)
return render(content)
@app.route("/save", methods=['POST'])
def post_save():
'''Save a report'''
success = False
message = None
try:
data = loads(request.data)
success = save(data)
except Exception as e:
app.logger.error(e)
message = str(e)
return jsonify({"success": success, "message": message})
@app.route("/view")
def view():
view_html = render_template("view.html")
return render(view_html)
@app.route("/view-json")
def view_json():
event_types = request.args.get('event_types', '').split(',')
start_date = request.args.get('start', None)
end_date = request.args.get('end', None)
page = int(request.args.get('page', 1)) - 1
limit = int(request.args.get('limit', 50))
start_date = datetime.strptime(start_date, TIME_FORMAT) if start_date else None
end_date = datetime.strptime(end_date, TIME_FORMAT) if end_date else None
report_resp, total = load_reports(page=page, limit=limit, event_types=event_types,
start_date=start_date, end_date=end_date)
reports = []
events = []
if report_resp:
reports = [r for r in report_resp]
report_map = {r.id: r for r in reports} if reports else {}
events = load_events_for_reports(reports) if reports else []
for e in events:
report = report_map.get(e.report_id)
if not report.events:
report_map = []
report.events.append(e)
return jsonify({
"events": [e.to_dict() for e in events],
"reports": [r.to_dict() for r in reports],
"total": total
})
@app.route("/recent")
def view_recent():
recents, total = load_reports(limit=10)
return render(render_template("recent.html", recents=recents, total=total))
# Service
def save(form):
email = form.get('email')
conditions = form.get('conditions')
starting = form.get('starting')
ending = form.get('ending')
input_date = form.get('date')
association = form.get('association')
events = form.get('events')
if not events:
raise Exception("Please include events")
date = datetime.strptime(input_date, TIME_FORMAT)
report = Report(email, conditions, starting, ending, date, association)
if not report.is_valid():
raise Exception('Report not valid. {}'.format(report))
db.session.add(report)
for f_event in events:
event_type = f_event.get('type')
latitude = f_event.get('k')
longitude = f_event.get('d')
comment = f_event.get('comment')
people_involved = f_event.get('people_involved')
event = Event(event_type, latitude, longitude, people_involved, comment, report)
if event.is_valid():
db.session.add(event)
else:
raise Exception('Event {} is not valid'.format(event))
db.session.commit()
return True
def load_reports(page=0, limit=50, event_types=None, reporter=None,
start_date=None, end_date=None):
offset = page * limit
query = Report.query
if start_date:
query = query.filter(Report.date >= start_date)
if end_date:
query = query.filter(Report.date <= end_date)
if event_types:
# TODO - will actually require rewriting how these queries are performed
pass
# query = query.filter(.type.in_((123,456))
total = query.count()
query = query.order_by("ID DESC")
query = query.limit(limit)
query = query.offset(offset)
return query.all(), total
def load_events_for_reports(reports):
report_ids = [r.id for r in reports]
query = Event.query.filter(Event.id.in_(report_ids))
return query.all()
# DB Models
class Report(db.Model):
id = db.Column(db.Integer, primary_key=True)
reporter = db.Column(db.String(120))
conditions = db.Column(db.String(2000))
starting = db.Column(db.String(120))
ending = db.Column(db.String(120))
date = db.Column(db.DateTime(), index=True)
association = db.Column(db.String(255))
def __init__(self, email, conditions, starting, ending, date, association):
self.reporter = self.munge_email(email)
self.conditions = conditions
self.starting = starting
self.ending = ending
self.date = date
self.association = association
def __repr__(self):
return '<Report {} {}>'.format(self.id, self.date)
def munge_email(self, email):
try:
at_pos = email.index("@")
return email[:2] + "..." + email[at_pos:]
except Exception:
return "no email"
def to_dict(self):
return {
'id': self.id,
'reporter': self.reporter,
'conditions': self.conditions,
'starting': self.starting,
'ending': self.ending,
'date': self.date.strftime("%B %-d, %Y"),
'association': self.association,
'events': [e.to_dict() for e in self.events] if self.events else []
}
def is_valid(self):
if self.date:
return True
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
event_type = db.Column(db.String(3))
latitude = db.Column(db.Float())
longitude = db.Column(db.Float())
comment = db.Column(db.String(500))
people_involved = db.Column(db.Integer)
report_id = db.Column(db.Integer, db.ForeignKey('report.id'))
report = db.relationship('Report',
backref=db.backref('events', lazy='dynamic'))
report_id = db.Column(db.Integer, db.ForeignKey('report.id'))
def __init__(self, event_type, latitude, longitude, people_involved, comment, report):
self.event_type = event_type
self.latitude = latitude
self.longitude = longitude
self.people_involved = people_involved
self.comment = comment
self.report = report
def __repr__(self):
return '<Event {}: {} ({}, {}) report: {}>'.format(self.id,
self.event_type,
self.latitude,
self.longitude,
self.report_id)
def to_dict(self):
return {
'id': self.id,
'event_type': self.event_type,
'latitude': self.latitude,
'longitude': self.longitude,
'report_id': self.report_id,
'comment': self.comment,
'people_involved': self.people_involved
}
def is_valid(self):
required = [self.event_type, self.latitude, self.longitude, self.report]
for prop in required:
if not prop:
return False
return True
Index('event_type_index', Event.report_id, Event.event_type)
if __name__ == "__main__":
if drop_and_create_db:
print db.drop_all()
print db.create_all()
app.run(debug=True)
| |
from __future__ import absolute_import, division, unicode_literals
import copy
import random
import socket
import string
import sys
from argparse import ArgumentParser as ArgParser
from argparse import (_VersionAction, Action, ArgumentError, ArgumentTypeError, Namespace, PARSER, REMAINDER, SUPPRESS,
_SubParsersAction)
import pkg_resources
import flexget
from flexget.entry import Entry
from flexget.event import fire_event
from flexget.logger import console
from flexget.utils import requests
_UNSET = object()
core_parser = None
def unicode_argv():
"""Like sys.argv, but decodes all arguments."""
return [arg.decode(sys.getfilesystemencoding()) for arg in sys.argv]
def get_parser(command=None):
global core_parser
if not core_parser:
core_parser = CoreArgumentParser()
# Add all plugin options to the parser
fire_event('options.register')
if command:
return core_parser.get_subparser(command)
return core_parser
def register_command(command, callback, **kwargs):
"""
Register a callback function to be executed when flexget is launched with the given `command`.
:param command: The command being defined.
:param callback: Callback function executed when this command is invoked from the CLI. Should take manager instance
and parsed argparse namespace as parameters.
:param kwargs: Other keyword arguments will be passed to the :class:`arparse.ArgumentParser` constructor
:returns: An :class:`argparse.ArgumentParser` instance ready to be configured with the options for this command.
"""
return get_parser().add_subparser(command, parent_defaults={'cli_command_callback': callback}, **kwargs)
def required_length(nmin, nmax):
"""Generates a custom Action to validate an arbitrary range of arguments."""
class RequiredLength(Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
raise ArgumentError(self, 'requires between %s and %s arguments' % (nmin, nmax))
setattr(args, self.dest, values)
return RequiredLength
class VersionAction(_VersionAction):
"""Action to print the current version. Also checks latest release revision."""
def __call__(self, parser, namespace, values, option_string=None):
# Print the version number
console('%s' % self.version)
# Check for latest version from server
try:
page = requests.get('http://download.flexget.com/latestversion')
except requests.RequestException:
console('Error getting latest version number from download.flexget.com')
else:
ver = page.text.strip()
if self.version == ver:
console('You are on the latest release.')
else:
console('Latest release: %s' % ver)
parser.exit()
class DebugAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
namespace.loglevel = 'debug'
class DebugTraceAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
namespace.debug = True
namespace.log_level = 'trace'
class CronAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
# Only set loglevel if it has not already explicitly been set
if not hasattr(namespace, 'loglevel'):
namespace.loglevel = 'info'
# This makes the old --inject form forwards compatible
class InjectAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
kwargs = {'title': values.pop(0)}
if values:
kwargs['url'] = values.pop(0)
else:
kwargs['url'] = 'http://localhost/inject/%s' % ''.join(random.sample(string.letters + string.digits, 30))
if 'force' in [v.lower() for v in values]:
kwargs['immortal'] = True
entry = Entry(**kwargs)
if 'accept' in [v.lower() for v in values]:
entry.accept(reason='accepted by --inject')
existing = getattr(namespace, self.dest, None) or []
setattr(namespace, self.dest, existing + [entry])
class ParseExtrasAction(Action):
"""This action will take extra arguments, and parser them with a different parser."""
def __init__(self, option_strings, parser, help=None, metavar=None, dest=None, required=False):
if metavar is None:
metavar = '<%s arguments>' % parser.prog
if help is None:
help = 'arguments for the `%s` command are allowed here' % parser.prog
self._parser = parser
super(ParseExtrasAction, self).__init__(option_strings=option_strings, dest=SUPPRESS, help=help,
metavar=metavar, nargs=REMAINDER, required=required)
def __call__(self, parser, namespace, values, option_string=None):
namespace, extras = self._parser.parse_known_args(values, namespace)
if extras:
parser.error('unrecognized arguments: %s' % ' '.join(extras))
class ScopedNamespace(Namespace):
def __init__(self, **kwargs):
super(ScopedNamespace, self).__init__(**kwargs)
self.__parent__ = None
def __getattr__(self, key):
if '.' in key:
scope, key = key.split('.', 1)
return getattr(getattr(self, scope), key)
if self.__parent__:
return getattr(self.__parent__, key)
raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, key))
def __setattr__(self, key, value):
if '.' in key:
scope, key = key.split('.', 1)
if not hasattr(self, scope):
setattr(self, scope, type(self)())
sub_ns = getattr(self, scope, None)
return object.__setattr__(sub_ns, key, value)
# Let child namespaces keep track of us
if key != '__parent__' and isinstance(value, ScopedNamespace):
value.__parent__ = self
return object.__setattr__(self, key, value)
def __iter__(self):
return (i for i in self.__dict__.iteritems() if i[0] != '__parent__')
def __copy__(self):
new = self.__class__()
new.__dict__.update(self.__dict__)
# Make copies of any nested namespaces
for key, value in self:
if isinstance(value, ScopedNamespace):
setattr(new, key, copy.copy(value))
return new
class NestedSubparserAction(_SubParsersAction):
def __init__(self, *args, **kwargs):
self.nested_namespaces = kwargs.pop('nested_namespaces', False)
self.parent_defaults = {}
super(NestedSubparserAction, self).__init__(*args, **kwargs)
def add_parser(self, name, parent_defaults=None, **kwargs):
if parent_defaults:
self.parent_defaults[name] = parent_defaults
return super(NestedSubparserAction, self).add_parser(name, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
if parser_name in self.parent_defaults:
for dest in self.parent_defaults[parser_name]:
if not hasattr(namespace, dest):
setattr(namespace, dest, self.parent_defaults[parser_name][dest])
if self.nested_namespaces:
subnamespace = ScopedNamespace()
super(NestedSubparserAction, self).__call__(parser, subnamespace, values, option_string)
# If dest is set, it should be set on the parent namespace, not subnamespace
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
delattr(subnamespace, self.dest)
setattr(namespace, parser_name, subnamespace)
else:
super(NestedSubparserAction, self).__call__(parser, namespace, values, option_string)
class ParserError(Exception):
def __init__(self, message, parser):
self.message = message
self.parser = parser
def __unicode__(self):
return self.message
def __repr__(self):
return 'ParserError(%s, %s)' % (self.message, self.parser)
class ArgumentParser(ArgParser):
"""
Mimics the default :class:`argparse.ArgumentParser` class, with a few distinctions, mostly to ease subparser usage:
- If `add_subparsers` is called with the `nested_namespaces` kwarg, all subcommand options will be stored in a
nested namespace based on the command name for the subparser
- Adds the `add_subparser` method. After `add_subparsers` has been called, the `add_subparser` method can be used
instead of the `add_parser` method of the object returned by the `add_subparsers` call.
- `add_subparser` takes takes the `parent_defaults` argument, which will set/change the defaults for the parent
parser when that subparser is selected.
- The `get_subparser` method will get the :class:`ArgumentParser` instance for an existing subparser on this parser
- For any arguments defined both in this parser and one of its subparsers, the selected subparser default will
override the main one.
- Adds the `set_post_defaults` method. This works like the normal argparse `set_defaults` method, but all actions
and subparsers will be run before any of these defaults are set.
- Command shortening: If the command for a subparser is abbreviated unambiguously, it will still be accepted.
- The add_argument `nargs` keyword argument supports a range of arguments, e.g. `"2-4"
- If the `raise_errors` keyword argument to `parse_args` is True, a `ParserError` will be raised instead of sys.exit
- If the `file` argument is given to `parse_args`, output will be printed there instead of sys.stdout or stderr
"""
file = None # This is created as a class attribute so that we can set it for parser and all subparsers at once
def __init__(self, **kwargs):
"""
:param nested_namespace_name: When used as a subparser, options from this parser will be stored nested under
this attribute name in the root parser's namespace
"""
# Do this early, so even option processing stuff is caught
if '--bugreport' in unicode_argv():
self._debug_tb_callback()
self.subparsers = None
self.raise_errors = None
ArgParser.__init__(self, **kwargs)
# Overwrite _SubparserAction with our custom one
self.register('action', 'parsers', NestedSubparserAction)
self.post_defaults = {}
if kwargs.get('parents'):
for parent in kwargs['parents']:
if hasattr(parent, 'post_defaults'):
self.set_post_defaults(**parent.post_defaults)
def add_argument(self, *args, **kwargs):
if isinstance(kwargs.get('nargs'), basestring) and '-' in kwargs['nargs']:
# Handle a custom range of arguments
min, max = kwargs['nargs'].split('-')
min, max = int(min), int(max)
kwargs['action'] = required_length(min, max)
# Make the usage string a bit better depending on whether the first argument is optional
if min == 0:
kwargs['nargs'] = '*'
else:
kwargs['nargs'] = '+'
return super(ArgumentParser, self).add_argument(*args, **kwargs)
def _print_message(self, message, file=None):
"""If a file argument was passed to `parse_args` make sure output goes there."""
if self.file:
file = self.file
super(ArgumentParser, self)._print_message(message, file)
def set_post_defaults(self, **kwargs):
"""Like set_defaults method, but these defaults will be defined after parsing instead of before."""
self.post_defaults.update(kwargs)
# if these defaults match any existing arguments, suppress
# the previous default so that it can be filled after parsing
for action in self._actions:
if action.dest in kwargs:
action.default = SUPPRESS
def error(self, msg):
raise ParserError(msg, self)
def parse_args(self, args=None, namespace=None, raise_errors=False, file=None):
"""
:param raise_errors: If this is true, errors will be raised as `ParserError`s instead of calling sys.exit
"""
ArgumentParser.file = file
try:
return super(ArgumentParser, self).parse_args(args, namespace)
except ParserError as e:
if raise_errors:
raise
super(ArgumentParser, e.parser).error(e.message)
finally:
ArgumentParser.file = None
def parse_known_args(self, args=None, namespace=None):
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
if namespace is None:
namespace = ScopedNamespace()
namespace, args = super(ArgumentParser, self).parse_known_args(args, namespace)
# add any post defaults that aren't present
for dest in self.post_defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self.post_defaults[dest])
return namespace, args
def add_subparsers(self, **kwargs):
"""
:param nested_namespaces: If True, options from subparsers will appear in nested namespace under the subparser
name.
"""
# Set the parser class so subparsers don't end up being an instance of a subclass, like CoreArgumentParser
kwargs.setdefault('parser_class', ArgumentParser)
self.subparsers = super(ArgumentParser, self).add_subparsers(**kwargs)
return self.subparsers
def add_subparser(self, name, **kwargs):
"""
Adds a parser for a new subcommand and returns it.
:param name: Name of the subcommand
:param parent_defaults: Default argument values which should be supplied to the parent parser if this subparser
is selected.
"""
if not self.subparsers:
raise TypeError('This parser does not have subparsers')
result = self.subparsers.add_parser(name, **kwargs)
return result
def get_subparser(self, name, default=_UNSET):
if not self.subparsers:
raise TypeError('This parser does not have subparsers')
p = self.subparsers.choices.get(name, default)
if p is _UNSET:
raise ValueError('%s is not an existing subparser name' % name)
return p
def _get_values(self, action, arg_strings):
"""Complete the full name for partial subcommands"""
if action.nargs == PARSER and self.subparsers:
subcommand = arg_strings[0]
if subcommand not in self.subparsers.choices:
matches = [x for x in self.subparsers.choices if x.startswith(subcommand)]
if len(matches) == 1:
arg_strings[0] = matches[0]
return super(ArgumentParser, self)._get_values(action, arg_strings)
def _debug_tb_callback(self, *dummy):
import cgitb
cgitb.enable(format="text")
# This will hold just the arguments directly for Manager. Webui needs this clean, to build its parser.
manager_parser = ArgumentParser(add_help=False)
manager_parser.add_argument('-V', '--version', action=VersionAction, version=flexget.__version__,
help='Print FlexGet version and exit.')
manager_parser.add_argument('--test', action='store_true', dest='test', default=0,
help='Verbose what would happen on normal execution.')
manager_parser.add_argument('-c', dest='config', default='config.yml',
help='Specify configuration file. Default: %(default)s')
manager_parser.add_argument('--logfile', '-l', default='flexget.log',
help='Specify a custom logfile name/location. '
'Default: %(default)s in the config directory.')
manager_parser.add_argument('--loglevel', '-L', metavar='LEVEL',
help='Set the verbosity of the logger. Levels: %(choices)s',
choices=['none', 'critical', 'error', 'warning', 'info', 'verbose', 'debug', 'trace'])
manager_parser.set_post_defaults(loglevel='verbose')
# This option is already handled above.
manager_parser.add_argument('--bugreport', action='store_true', dest='debug_tb',
help='Use this option to create a detailed bug report, '
'note that the output might contain PRIVATE data, so edit that out')
manager_parser.add_argument('--profile', metavar='OUTFILE', nargs='?', const='flexget.profile',
help='Use the python profiler for this run to debug performance issues.')
manager_parser.add_argument('--debug', action=DebugAction, nargs=0, help=SUPPRESS)
manager_parser.add_argument('--debug-trace', action=DebugTraceAction, nargs=0, help=SUPPRESS)
manager_parser.add_argument('--debug-sql', action='store_true', default=False, help=SUPPRESS)
manager_parser.add_argument('--experimental', action='store_true', default=False, help=SUPPRESS)
manager_parser.add_argument('--ipc-port', type=int, help=SUPPRESS)
manager_parser.add_argument('--cron', action=CronAction, default=False, nargs=0,
help='use when executing FlexGet non-interactively: allows background '
'maintenance to run, disables stdout and stderr output, reduces logging level')
class CoreArgumentParser(ArgumentParser):
"""
The core argument parser, contains the manager arguments, command parsers, and plugin arguments.
Warning: Only gets plugin arguments if instantiated after plugins have been loaded.
"""
def __init__(self, **kwargs):
kwargs.setdefault('parents', [manager_parser])
kwargs.setdefault('prog', 'flexget')
super(CoreArgumentParser, self).__init__(**kwargs)
self.add_subparsers(title='commands', metavar='<command>', dest='cli_command', nested_namespaces=True)
# The parser for the execute command
exec_parser = self.add_subparser('execute', help='execute tasks now')
exec_parser.add_argument('--tasks', nargs='+', metavar='TASK',
help='run only specified task(s), optionally using glob patterns ("tv-*"). '
'matching is case-insensitive')
exec_parser.add_argument('--learn', action='store_true', dest='learn', default=False,
help='matches are not downloaded but will be skipped in the future')
exec_parser.add_argument('--profile', action='store_true', default=False, help=SUPPRESS)
exec_parser.add_argument('--disable-phases', nargs='*', help=SUPPRESS)
exec_parser.add_argument('--inject', nargs='+', action=InjectAction, help=SUPPRESS)
# Plugins should respect these flags where appropriate
exec_parser.add_argument('--retry', action='store_true', dest='retry', default=False, help=SUPPRESS)
exec_parser.add_argument('--no-cache', action='store_true', dest='nocache', default=False,
help='disable caches. works only in plugins that have explicit support')
daemonize_help = SUPPRESS
if not sys.platform.startswith('win'):
daemonize_help = 'causes process to daemonize after starting'
# The parser for the daemon command
daemon_parser = self.add_subparser('daemon', parent_defaults={'loglevel': 'info'},
help='run continuously, executing tasks according to schedules defined '
'in config')
daemon_parser.add_subparsers(title='actions', metavar='<action>', dest='action')
start_parser = daemon_parser.add_subparser('start', help='start the daemon')
start_parser.add_argument('-d', '--daemonize', action='store_true', help=daemonize_help)
stop_parser = daemon_parser.add_subparser('stop', help='shutdown the running daemon')
stop_parser.add_argument('--wait', action='store_true',
help='wait for all queued tasks to finish before stopping daemon')
daemon_parser.add_subparser('status', help='check if a daemon is running')
daemon_parser.add_subparser('reload', help='causes a running daemon to reload the config from disk')
# The parser for the webui
# Hide the webui command if deps aren't available
webui_kwargs = {}
try:
pkg_resources.require('flexget[webui]')
webui_kwargs['help'] = 'run continuously, with a web interface to configure and interact with the daemon'
except pkg_resources.DistributionNotFound:
pass
webui_parser = self.add_subparser('webui', **webui_kwargs)
def ip_type(value):
try:
socket.inet_aton(value)
except socket.error:
raise ArgumentTypeError('must be a valid ip address to bind to')
return value
webui_parser.add_argument('--bind', type=ip_type, default='0.0.0.0', metavar='IP',
help='IP address to bind to when serving the web interface [default: %(default)s]')
webui_parser.add_argument('--port', type=int, default=5050,
help='run FlexGet webui on port [default: %(default)s]')
webui_parser.add_argument('-d', '--daemonize', action='store_true', help=daemonize_help)
# TODO: move these to authentication plugin?
webui_parser.add_argument('--no-auth', action='store_true',
help='runs without authentication required (dangerous)')
webui_parser.add_argument('--no-local-auth', action='store_true',
help='runs without any authentication required when accessed from localhost')
webui_parser.add_argument('--username', help='username needed to login [default: flexget]')
webui_parser.add_argument('--password', help='password needed to login [default: flexget]')
# enable flask autoreloading (development)
webui_parser.add_argument('--autoreload', action='store_true', help=SUPPRESS)
webui_parser.set_defaults(loglevel='info')
def add_subparsers(self, **kwargs):
# The subparsers should not be CoreArgumentParsers
kwargs.setdefault('parser_class', ArgumentParser)
return super(CoreArgumentParser, self).add_subparsers(**kwargs)
def parse_args(self, *args, **kwargs):
result = super(CoreArgumentParser, self).parse_args(*args, **kwargs)
# Make sure we always have execute parser settings even when other commands called
if not result.cli_command == 'execute':
exec_options = get_parser('execute').parse_args([])
if hasattr(result, 'execute'):
exec_options.__dict__.update(result.execute.__dict__)
result.execute = exec_options
return result
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Defines the `Topology` class, that describes a TPU fabric topology."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf.tpu import topology_pb2
def _tpu_device_name(job, task, device):
"""Returns the device name for the TPU `device` on `task` of `job`."""
if job is None:
return "/task:%d/device:TPU:%d" % (task, device)
else:
return "/job:%s/task:%d/device:TPU:%d" % (job, task, device)
def _tpu_host_device_name(job, task):
"""Returns the device name for the CPU device on `task` of `job`."""
if job is None:
return "/task:%d/device:CPU:0" % task
else:
return "/job:%s/task:%d/device:CPU:0" % (job, task)
class Topology(object):
"""Describes a set of TPU devices.
Represents both the shape of the physical mesh, and the mapping between
TensorFlow TPU devices to physical mesh coordinates.
"""
def __init__(self, serialized=None, mesh_shape=None, device_coordinates=None):
"""Builds a Topology object.
If `serialized` is not `None`, the topology is parsed from `serialized` and
the other arguments are ignored. Otherwise, the topology is computed from
`mesh_shape` and `device_coordinates`.
Args:
serialized: A serialized `TopologyProto`, or `None`. If not `None`, the
serialized proto is parsed to discover the topology.
mesh_shape: A sequence of 3 positive integers, or `None`. If not `None`,
the shape of the TPU topology, in number of cores. Ignored if
`serialized` is not `None`.
device_coordinates: A rank 3 numpy array that describes the mapping from
TensorFlow TPU devices to TPU fabric coordinates, or `None`. Ignored
if `serialized is not `None`.
Raises:
ValueError: If `serialized` does not describe a well-formed topology.
ValueError: If `serialized` is `None` and `mesh_shape` is not a sequence
of 3 positive integers.
ValueError: If `serialized` is `None` and `device_coordinates` is not a
rank 3 numpy int32 array that describes a valid coordinate mapping.
"""
self._serialized = serialized
if serialized:
self._parse_topology(serialized)
else:
self._mesh_shape = np.asarray(mesh_shape, dtype=np.int32)
self._device_coordinates = np.asarray(device_coordinates, np.int32)
if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):
raise ValueError("`mesh_shape` must be a sequence of 3 positive "
"entries; got {}".format(self._mesh_shape))
if (len(self._device_coordinates.shape) != 3 or
self._device_coordinates.shape[2] != len(self._mesh_shape)):
raise ValueError("`device_coordinates` must be a rank 3 int32 array "
"with minor dimension equal to the mesh shape rank")
self._topology_tasks, self._topology_devices = self._invert_topology()
# Coordinates of devices that are missing
self._missing_devices = np.argwhere(self._topology_tasks < 0)
def _parse_topology(self, serialized):
"""Parses a serialized `TopologyProto` into `self`."""
proto = topology_pb2.TopologyProto()
proto.ParseFromString(serialized)
self._mesh_shape = np.array(proto.mesh_shape, dtype=np.int32)
if len(self._mesh_shape) != 3 or any(self._mesh_shape < 1):
raise ValueError("`mesh_shape` must be a vector of size 3 with positive "
"entries; got {}".format(self._mesh_shape))
if proto.num_tasks < 0:
raise ValueError("`num_tasks` must be >= 0; got {}".format(
proto.num_tasks))
if proto.num_tpu_devices_per_task < 0:
raise ValueError("`num_tpu_devices_per_task` must be >= 0; got {}".format(
proto.num_tpu_devices_per_task))
expected_coordinates_size = (
proto.num_tasks * proto.num_tpu_devices_per_task * len(
proto.mesh_shape))
if len(proto.device_coordinates) != expected_coordinates_size:
raise ValueError("`device_coordinates` must have shape num_tasks ({}) * "
"num_tpu_devices_per_task ({}) * len(mesh_shape) ({}); "
"got shape {}".format(proto.num_tasks,
proto.num_tpu_devices_per_task,
proto.mesh_shape,
len(proto.device_coordinates)))
coords = np.array(proto.device_coordinates, dtype=np.int32)
if any(coords < 0):
raise ValueError("`device_coordinates` must be >= 0")
coords = coords.reshape((proto.num_tasks, proto.num_tpu_devices_per_task,
len(proto.mesh_shape)))
self._device_coordinates = coords
def _invert_topology(self):
"""Inverts a [task,device,axis] topology to [x,y,z] -> task/device maps."""
tasks = np.full(list(self.mesh_shape), -1, dtype=np.int32)
devices = np.full(list(self.mesh_shape), -1, dtype=np.int32)
for task in xrange(self.device_coordinates.shape[0]):
for device in xrange(self.device_coordinates.shape[1]):
x, y, z = self.device_coordinates[task, device, :]
tasks[x, y, z] = task
devices[x, y, z] = device
return tasks, devices
@property
def mesh_shape(self):
"""A rank 1 int32 array describing the shape of the TPU topology."""
return self._mesh_shape
@property
def mesh_rank(self):
"""Returns the number of dimensions in the mesh."""
return len(self._mesh_shape)
@property
def device_coordinates(self):
"""Describes the mapping from TPU devices to topology coordinates.
Returns:
A rank 3 int32 array with shape `[tasks, devices, axis]`.
`tasks` is the number of tasks in the TPU cluster, `devices` is the number
of TPU devices per task, and `axis` is the number of axes in the TPU
cluster topology. Each entry gives the `axis`-th coordinate in the
topology of a task/device pair. TPU topologies are 3-dimensional, with
dimensions `(x, y, core number)`.
"""
return self._device_coordinates
@property
def missing_devices(self):
"""Array of indices of missing devices."""
return self._missing_devices
def task_ordinal_at_coordinates(self, device_coordinates):
"""Returns the TensorFlow task number attached to `device_coordinates`.
Args:
device_coordinates: An integer sequence describing a device's physical
coordinates in the TPU fabric.
Returns:
Returns the TensorFlow task number that contains the TPU device with those
physical coordinates.
"""
return self._topology_tasks[tuple(device_coordinates)]
def tpu_device_ordinal_at_coordinates(self, device_coordinates):
"""Returns the TensorFlow device number at `device_coordinates`.
Args:
device_coordinates: An integer sequence describing a device's physical
coordinates in the TPU fabric.
Returns:
Returns the TensorFlow device number within the task corresponding to
attached to the device with those physical coordinates.
"""
return self._topology_devices[tuple(device_coordinates)]
def cpu_device_name_at_coordinates(self, device_coordinates, job=None):
"""Returns the CPU device attached to a logical core."""
return _tpu_host_device_name(
job, self._topology_tasks[tuple(device_coordinates)])
def tpu_device_name_at_coordinates(self, device_coordinates, job=None):
"""Returns the name of the TPU device assigned to a logical core."""
return _tpu_device_name(job,
self._topology_tasks[tuple(device_coordinates)],
self._topology_devices[tuple(device_coordinates)])
@property
def num_tasks(self):
"""Returns the number of TensorFlow tasks in the TPU slice."""
return self._device_coordinates.shape[0]
@property
def num_tpus_per_task(self):
"""Returns the number of TPU devices per task in the TPU slice."""
return self._device_coordinates.shape[1]
def serialized(self):
"""Returns the serialized form of the topology."""
if self._serialized is None:
proto = topology_pb2.TopologyProto()
proto.mesh_shape[:] = list(self._mesh_shape)
proto.num_tasks = self._device_coordinates.shape[0]
proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]
proto.device_coordinates.extend(list(self._device_coordinates.flatten()))
self._serialized = proto.SerializeToString()
return self._serialized
| |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fractal demo."""
from __future__ import with_statement
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
import json
import logging
import os
import time
import lib_path
import google_cloud.gce as gce
import google_cloud.gce_appengine as gce_appengine
import google_cloud.oauth as oauth
import jinja2
import oauth2client.appengine as oauth2client
import user_data
import webapp2
from google.appengine.api import urlfetch
DEMO_NAME = 'fractal'
CUSTOM_IMAGE = 'fractal-demo-image'
MACHINE_TYPE='n1-highcpu-2'
FIREWALL = 'www-fractal'
FIREWALL_DESCRIPTION = 'Fractal Demo Firewall'
GCE_SCOPE = 'https://www.googleapis.com/auth/compute'
HEALTH_CHECK_TIMEOUT = 1
VM_FILES = os.path.join(os.path.dirname(__file__), 'vm_files')
STARTUP_SCRIPT = os.path.join(VM_FILES, 'startup.sh')
GO_PROGRAM = os.path.join(VM_FILES, 'mandelbrot.go')
GO_ARGS = '--portBase=80 --numPorts=1'
GO_TILESERVER_FLAG = '--tileServers='
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(''))
oauth_decorator = oauth.decorator
parameters = [
user_data.DEFAULTS[user_data.GCE_PROJECT_ID],
user_data.DEFAULTS[user_data.GCE_ZONE_NAME],
user_data.DEFAULTS[user_data.GCE_LOAD_BALANCER_IP],
]
data_handler = user_data.DataHandler(DEMO_NAME, parameters)
class ServerVarsAggregator(object):
"""Aggregate stats across multiple servers and produce a summary."""
def __init__(self):
"""Constructor for ServerVarsAggregator."""
# A map of tile-size -> count
self.tile_counts = {}
# A map of tile-size -> time
self.tile_times = {}
# The uptime of the server that has been up and running the longest.
self.max_uptime = 0
def aggregate_vars(self, instance_vars):
"""Integrate instance_vars into the running aggregates.
Args:
instance_vars A parsed JSON object returned from /debug/vars
"""
self._aggregate_map(instance_vars['tileCount'], self.tile_counts)
self._aggregate_map(instance_vars['tileTime'], self.tile_times)
self.max_uptime = max(self.max_uptime, instance_vars['uptime'])
def _aggregate_map(self, src_map, dest_map):
"""Aggregate one map from src_map into dest_map."""
for k, v in src_map.items():
dest_map[k] = dest_map.get(k, 0L) + long(v)
def get_aggregate(self):
"""Get the overall aggregate, including derived values."""
tile_time_avg = {}
result = {
'tileCount': self.tile_counts.copy(),
'tileTime': self.tile_times.copy(),
'tileTimeAvgMs': tile_time_avg,
'maxUptime': self.max_uptime,
}
for size, count in self.tile_counts.items():
time = self.tile_times.get(size, 0)
if time and count:
# Compute average tile time in milliseconds. The raw time is in
# nanoseconds.
tile_time_avg[size] = float(time / count) / float(1000*1000)
logging.debug('tile-size: %s count: %d time: %d avg: %d', size, count, time, tile_time_avg[size])
return result
class Fractal(webapp2.RequestHandler):
"""Fractal demo."""
@oauth_decorator.oauth_required
@data_handler.data_required
def get(self):
"""Show main page of Fractal demo."""
template = jinja_environment.get_template(
'demos/%s/templates/index.html' % DEMO_NAME)
data = data_handler.stored_user_data
gce_project_id = data[user_data.GCE_PROJECT_ID]
gce_load_balancer_ip = self._get_lb_servers()
self.response.out.write(template.render({
'demo_name': DEMO_NAME,
'lb_enabled': bool(gce_load_balancer_ip),
'lb_ip': ', '.join(gce_load_balancer_ip),
}))
@oauth_decorator.oauth_required
@data_handler.data_required
def get_instances(self):
"""List instances.
Uses app engine app identity to retrieve an access token for the app
engine service account. No client OAuth required. External IP is used
to determine if the instance is actually running.
"""
gce_project = self._create_gce()
instances = gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.list_instances,
'Error listing instances: ',
filter='name eq ^%s-.*' % self.instance_prefix())
# A map of instanceName -> (ip, RPC)
health_rpcs = {}
# Convert instance info to dict and check server status.
num_running = 0
instance_dict = {}
if instances:
for instance in instances:
instance_record = {}
instance_dict[instance.name] = instance_record
if instance.status:
instance_record['status'] = instance.status
else:
instance_record['status'] = 'OTHER'
ip = None
for interface in instance.network_interfaces:
for config in interface.get('accessConfigs', []):
if 'natIP' in config:
ip = config['natIP']
instance_record['externalIp'] = ip
break
if ip: break
# Ping the instance server. Grab stats from /debug/vars.
if ip and instance.status == 'RUNNING':
num_running += 1
health_url = 'http://%s/debug/vars?t=%d' % (ip, int(time.time()))
logging.debug('Health checking %s', health_url)
rpc = urlfetch.create_rpc(deadline = HEALTH_CHECK_TIMEOUT)
urlfetch.make_fetch_call(rpc, url=health_url)
health_rpcs[instance.name] = rpc
# Ping through a LBs too. Only if we get success there do we know we are
# really serving.
loadbalancers = []
lb_rpcs = {}
if instances and len(instances) > 1:
loadbalancers = self._get_lb_servers()
if num_running > 0 and loadbalancers:
for lb in loadbalancers:
health_url = 'http://%s/health?t=%d' % (lb, int(time.time()))
logging.debug('Health checking %s', health_url)
rpc = urlfetch.create_rpc(deadline = HEALTH_CHECK_TIMEOUT)
urlfetch.make_fetch_call(rpc, url=health_url)
lb_rpcs[lb] = rpc
# wait for RPCs to complete and update dict as necessary
vars_aggregator = ServerVarsAggregator()
# TODO: there is significant duplication here. Refactor.
for (instance_name, rpc) in health_rpcs.items():
result = None
instance_record = instance_dict[instance_name]
try:
result = rpc.get_result()
if result and "memstats" in result.content:
logging.debug('%s healthy!', instance_name)
instance_record['status'] = 'SERVING'
instance_vars = {}
try:
instance_vars = json.loads(result.content)
instance_record['vars'] = instance_vars
vars_aggregator.aggregate_vars(instance_vars)
except ValueError as error:
logging.error('Error decoding vars json for %s: %s', instance_name, error)
else:
logging.debug('%s unhealthy. Content: %s', instance_name, result.content)
except urlfetch.Error as error:
logging.debug('%s unhealthy: %s', instance_name, str(error))
# Check health status through the load balancer.
loadbalancer_healthy = bool(lb_rpcs)
for (lb, lb_rpc) in lb_rpcs.items():
result = None
try:
result = lb_rpc.get_result()
if result and "ok" in result.content:
logging.info('LB %s healthy: %s\n%s', lb, result.headers, result.content)
else:
logging.info('LB %s result not okay: %s, %s', lb, result.status_code, result.content)
loadbalancer_healthy = False
break
except urlfetch.Error as error:
logging.info('LB %s fetch error: %s', lb, str(error))
loadbalancer_healthy = False
break
response_dict = {
'instances': instance_dict,
'vars': vars_aggregator.get_aggregate(),
'loadbalancers': loadbalancers,
'loadbalancer_healthy': loadbalancer_healthy,
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(response_dict))
@oauth_decorator.oauth_required
@data_handler.data_required
def set_instances(self):
"""Start/stop instances so we have the requested number running."""
gce_project = self._create_gce()
self._setup_firewall(gce_project)
image = self._get_image(gce_project)
disks = self._get_disks(gce_project)
# Get the list of instances to insert.
num_instances = int(self.request.get('num_instances'))
target = self._get_instance_list(
gce_project, num_instances, image, disks)
target_set = set()
target_map = {}
for instance in target:
target_set.add(instance.name)
target_map[instance.name] = instance
# Get the list of instances running
current = gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.list_instances,
'Error listing instances: ',
filter='name eq ^%s-.*' % self.instance_prefix())
current_set = set()
current_map = {}
for instance in current:
current_set.add(instance.name)
current_map[instance.name] = instance
# Add the new instances
to_add_set = target_set - current_set
to_add = [target_map[name] for name in to_add_set]
if to_add:
gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.bulk_insert,
'Error inserting instances: ',
resources=to_add)
# Remove the old instances
to_remove_set = current_set - target_set
to_remove = [current_map[name] for name in to_remove_set]
if to_remove:
gce_appengine.GceAppEngine().run_gce_request(
self,
gce_project.bulk_delete,
'Error deleting instances: ',
resources=to_remove)
logging.info("current_set: %s", current_set)
logging.info("target_set: %s", target_set)
logging.info("to_add_set: %s", to_add_set)
logging.info("to_remove_set: %s", to_remove_set)
@oauth_decorator.oauth_required
@data_handler.data_required
def cleanup(self):
"""Stop instances using the gce_appengine helper class."""
gce_project = self._create_gce()
gce_appengine.GceAppEngine().delete_demo_instances(
self, gce_project, self.instance_prefix())
def _get_lb_servers(self):
data = data_handler.stored_user_data
return data.get(user_data.GCE_LOAD_BALANCER_IP, [])
def instance_prefix(self):
"""Return a prefix based on a request/query params."""
tag = self.request.get('tag')
prefix = DEMO_NAME
if tag:
prefix = prefix + '-' + tag
return prefix
def _create_gce(self):
gce_project_id = data_handler.stored_user_data[user_data.GCE_PROJECT_ID]
gce_zone_name = data_handler.stored_user_data[user_data.GCE_ZONE_NAME]
return gce.GceProject(oauth_decorator.credentials,
project_id=gce_project_id,
zone_name=gce_zone_name)
def _setup_firewall(self, gce_project):
"Create the firewall if it doesn't exist."
firewalls = gce_project.list_firewalls()
firewall_names = [firewall.name for firewall in firewalls]
if not FIREWALL in firewall_names:
firewall = gce.Firewall(
name=FIREWALL,
target_tags=[DEMO_NAME],
description=FIREWALL_DESCRIPTION)
gce_project.insert(firewall)
def _get_image(self, gce_project):
"""Returns the appropriate image to use. def _has_custom_image(self, gce_project):
Args:
gce_project: An instance of gce.GceProject
Returns: (project, image_name) for the image to use.
"""
images = gce_project.list_images(filter='name eq ^%s$' % CUSTOM_IMAGE)
if images:
return (gce_project.project_id, CUSTOM_IMAGE)
return ('google', None)
def _get_disks(self, gce_project):
"""Get boot disks for VMs."""
disks_array = gce_project.list_disks(
filter='name eq ^boot-%s-.*' % self.instance_prefix())
disks = {}
for d in disks_array:
disks[d.name] = d
return disks
def _get_instance_metadata(self, gce_project, instance_names):
"""The metadata values to pass into the instance."""
inline_values = {
'goargs': GO_ARGS,
}
file_values = {
'startup-script': STARTUP_SCRIPT,
'goprog': GO_PROGRAM,
}
# Try and use LBs if we have any. But only do that if we have more than one
# instance.
if instance_names:
tile_servers = ''
if len(instance_names) > 1:
tile_servers = self._get_lb_servers()
if not tile_servers:
tile_servers = instance_names
tile_servers = ','.join(tile_servers)
inline_values['goargs'] += ' %s%s' %(GO_TILESERVER_FLAG, tile_servers)
metadata = []
for k, v in inline_values.items():
metadata.append({'key': k, 'value': v})
for k, fv in file_values.items():
v = open(fv, 'r').read()
metadata.append({'key': k, 'value': v})
return metadata
def _get_instance_list(self, gce_project, num_instances, image, disks):
"""Get a list of instances to start.
Args:
gce_project: An instance of gce.GceProject.
num_instances: The number of instances to start.
image: tuple with (project_name, image_name) for the image to use.
disks: A dictionary of disk_name -> disk resources
Returns:
A list of gce.Instances.
"""
instance_names = []
for i in range(num_instances):
instance_names.append('%s-%02d' % (self.instance_prefix(), i))
instance_list = []
for instance_name in instance_names:
disk_name = 'boot-%s' % instance_name
disk = disks.get(disk_name, None)
disk_mounts = []
image_project_id = None
image_name = None
kernel = None
if disk:
dm = gce.DiskMount(disk=disk, boot=True)
kernel = gce_project.settings['compute']['kernel']
disk_mounts.append(dm)
else:
image_project_id, image_name = image
gce_zone_name = data_handler.stored_user_data[user_data.GCE_ZONE_NAME]
# Define a network interfaces list here that requests an ephemeral
# external IP address. We will apply this configuration to all VMs
# started by the fractal app.
network = gce.Network('default')
network.gce_project = gce_project
ext_net = [{ 'network': network.url,
'accessConfigs': [{ 'name': 'External IP access config',
'type': 'ONE_TO_ONE_NAT'
}]
}]
instance = gce.Instance(
name=instance_name,
machine_type_name=MACHINE_TYPE,
zone_name=gce_zone_name,
image_name=image_name,
image_project_id=image_project_id,
disk_mounts=disk_mounts,
kernel=kernel,
network_interfaces=ext_net,
tags=[DEMO_NAME, self.instance_prefix()],
metadata=self._get_instance_metadata(gce_project, instance_names),
service_accounts=gce_project.settings['cloud_service_account'])
instance_list.append(instance)
return instance_list
app = webapp2.WSGIApplication(
[
('/%s' % DEMO_NAME, Fractal),
webapp2.Route('/%s/instance' % DEMO_NAME,
handler=Fractal, handler_method='get_instances',
methods=['GET']),
webapp2.Route('/%s/instance' % DEMO_NAME,
handler=Fractal, handler_method='set_instances',
methods=['POST']),
webapp2.Route('/%s/cleanup' % DEMO_NAME,
handler=Fractal, handler_method='cleanup',
methods=['POST']),
(data_handler.url_path, data_handler.data_handler),
], debug=True)
| |
# -*- coding: utf-8 -*-
#!/usr/bin/python
import datetime
from passlib.hash import bcrypt
from faker import Factory
import json
import logging
import logging.handlers
import os
from peewee import *
from playhouse.postgres_ext import *
import shortuuid
from slugify import slugify, Slugify
import unittest
SITES_DIR = "/home/jason/Desktop/muckamuck_shit/sites"
json_dir = "/home/jason/Desktop/muckamuck_shit"
user_json_dir = os.path.join(json_dir, "user")
fake = Factory.create()
sluggy = Slugify(to_lower=True)
def jsonifyer(someDict):
return json.dumps(someDict, sort_keys=True, indent=4, separators=(',', ': '))
####################################################
# Logging Boilerplate
####################################################
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)15s - %(levelname)s - %(message)s')
console_handle = logging.StreamHandler()
console_handle.setFormatter(formatter)
logger.addHandler(console_handle)
LOG_FILENAME = "muchamuck_models.log"
file_handle = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=5 * 1024 * 1024, backupCount=5)
file_handle = logging.FileHandler('muchamuck_models.log')
file_handle.setFormatter(formatter)
logger.addHandler(file_handle)
#logger.info('log message')
db = PostgresqlExtDatabase('directorium', user='jason', register_hstore=False)
####################################################
# Base Model
####################################################
class BaseModel(Model):
created_date = DateTimeField(default=datetime.datetime.now)
uuid = CharField(index=True)
class Meta:
database = db
def to_json(self):
return jsonifyer(self.to_dict())
def generate_UUID(self):
self.uuid = shortuuid.ShortUUID().random()
#logger.debug("Generated UUID: " + self.uuid)
####################################################
# User Model
####################################################
class User(BaseModel):
admin = BooleanField(default=False)
email = CharField(index=True, unique=True)
password = CharField(null=True)
public_name = CharField(default="")
public_email = CharField(default="")
def json_location(self):
return os.path.join(user_json_dir, self.uuid + ".json")
def encrypt_password(self, password):
self.password = bcrypt.encrypt(password)
def verify_password(self, password):
return bcrypt.verify(password, self.password)
def to_dict(self):
userDict = {}
userDict["created_date"] = self.created_date.isoformat()
userDict["email"] = self.public_email
userDict["name"] = self.public_name
userDict["uuid"] = self.uuid
return userDict
def dummy(self):
self.generate_UUID()
self.email = fake.free_email()
self.encrypt_password(fake.password())
self.public_name = fake.name()
self.public_email = fake.free_email()
def create_dummy_user():
user = User()
user.generate_UUID()
user.email = fake.free_email()
user.encrypt_password(fake.password())
user.public_name = fake.name()
user.public_email = fake.free_email()
return user
####################################################
# Site Model
####################################################
class Site(BaseModel):
description = CharField(null=True)
domain = CharField(unique=True)
language = CharField(default="en-us")
owner = ForeignKeyField(User)
title = CharField()
def site_dir(self):
return os.path.join(SITES_DIR, self.uuid)
def site_post_dir(self):
return os.path.join(self.site_dir(), "post")
def site_json_dir(self):
return os.path.join(self.site_dir(), "json")
def make_dir(self):
if not os.path.exists(self.site_dir()):
os.makedirs(self.site_dir())
if not os.path.exists(self.site_post_dir()):
os.makedirs(self.site_post_dir())
if not os.path.exists(self.site_json_dir()):
os.makedirs(self.site_json_dir())
def to_dict(self):
siteDict = {}
siteDict["created_date"] = self.created_date.isoformat()
siteDict['description'] = self.description
siteDict['domain'] = self.domain
siteDict['language'] = self.language
siteDict['owner'] = self.owner.to_dict()
siteDict['title'] = self.title
siteDict['uuid'] = self.uuid
return siteDict
def dummy(self, owner):
self.generate_UUID()
self.description = fake.text(max_nb_chars=200)
self.domain = fake.domain_name()
self.language = fake.locale()
self.owner = owner
self.title = fake.company()
def create_dummy_site(owner):
site = Site()
site.generate_UUID()
site.description = fake.text(max_nb_chars=200)
site.domain = fake.word() + "_" + fake.word() + "_" + fake.word() + ".muckamuck.net"
site.language = fake.locale()
site.owner = owner
site.title = fake.company()
return site
def get_random_site():
site = Site.select().order_by(fn.Random()).limit(1)[0]
return site
####################################################
# Theme Model
####################################################
class Theme(BaseModel):
site = ForeignKeyField(Site, unique=True)
template = TextField()
def create_dummy_theme(site):
theme = Theme()
theme.generate_UUID()
theme.site = site
file_object = open("render_templates/dummy_theme.html", "r")
theme.template = file_object.read()
return theme
####################################################
# Membership Model
####################################################
class Membership(BaseModel):
site = ForeignKeyField(Site, unique=True)
user = ForeignKeyField(User)
class Meta:
indexes = (
(('site', 'user'), True),
)
def check_membership(site, user):
try:
membership = Membership.get( (Membership.site == site) & (Membership.user == user))
except Membership.DoesNotExist:
return False
return True
def make_member(site, user):
membership = Membership()
membership.generate_UUID()
membership.site = site
membership.user = user
return membership
####################################################
# Page Model
####################################################
class Page(BaseModel):
author = ForeignKeyField(User)
body = TextField()
description = CharField()
slug = CharField()
site = ForeignKeyField(Site)
title = CharField()
class Meta:
indexes = (
(('site', 'slug'), True),
)
def to_dict(self):
pageDict = {}
pageDict["created_date"] = self.created_date.isoformat()
pageDict['author'] = self.author.to_dict()
pageDict['body'] = self.body
pageDict['site'] = self.site.to_dict()
pageDict['slug'] = self.slug
pageDict['title'] = self.title
pageDict['uuid'] = self.uuid
return pageDict
def create_dummy_page(site, author):
page = Page()
page.generate_UUID()
page.author = author
page.body = fake.paragraph(nb_sentences=5, variable_nb_sentences=True)
page.site = site
page.title = fake.words(nb=3)
page.slug = sluggy(page.title)
return page
####################################################
# Post Model
####################################################
class Post(Page):
#author = ForeignKeyField(User)
#body = TextField()
#description = CharField()
#slug = CharField()
#site = ForeignKeyField(Site)
tags = ArrayField(CharField)
#title = CharField()
def to_dict(self):
postDict = {}
postDict["created_date"] = self.created_date.isoformat()
postDict['author'] = self.author.to_dict()
postDict['body'] = self.body
postDict['description'] = self.description
postDict['site'] = self.site.to_dict()
postDict['slug'] = self.slug
postDict['tags'] = self.tags
postDict['title'] = self.title
postDict['uuid'] = self.uuid
return postDict
def dummy(self, site, author):
self.generate_UUID()
self.author = author
self.body = fake.paragraph(nb_sentences=15, variable_nb_sentences=True)
self.description = fake.text(max_nb_chars=200)
self.site = site
self.title = fake.word() + "_" + fake.company() + " " + fake.word() + " " + fake.word()
self.slug = sluggy(self.title)
self.tags = fake.words(nb=3)
self.tags.append("tag")
def json_path(self):
return os.path.join(self.site.site_json_dir(), self.slug + ".json")
def write_json(self):
file_object = open(self.post_json_path(), "wb")
file_object.write(self.to_json())
file_object.close()
def create_dummy_post(site, author):
post = Post()
post.generate_UUID()
post.author = author
post.body = fake.paragraph(nb_sentences=5, variable_nb_sentences=True)
post.description = fake.text(max_nb_chars=200)
post.site = site
post.title = fake.word() + "_" + fake.company() + " " + fake.word() + " " + fake.word()
post.slug = sluggy(post.title)
post.tags = fake.words(nb=3)
post.tags.append("tag")
return post
def get_random_post_from_site(site_uuid):
site = Site.select().where(Site.uuid == site_uuid).get()
post = Post.select().where(Post.site == site).order_by(fn.Random()).limit(1)[0]
return post
####################################################
# Tags
####################################################
def get_site_tags(uuid):
site = Site.select().where(Site.uuid == uuid).get()
tag_set = set()
for post in Post.select().where(Post.site == site).iterator():
for tag in post.tags:
tag_set.add(tag)
return tag_set
def reset_db():
db.drop_tables([User, Site, Page, Post, Membership, Theme])
db.create_tables([User, Site, Page, Post, Membership, Theme])
####################################################
# Tests
####################################################
#create_dummy_user().save()
class FooTest(unittest.TestCase):
def setUp(self):
db.connect()
try:
db.drop_tables([User, Site, Page, Post, Membership, Theme])
except:
pass
db.create_tables([User, Site, Page, Post, Membership, Theme])
# ending the test
def tearDown(self):
pass
####################################################
# Membership Tests
####################################################
def test_Check_Membership(self):
user = create_dummy_user()
user.save()
site = create_dummy_site(user)
site.save()
membership = make_member(site, user)
membership.save()
self.assertTrue(check_membership(site, user))
def test_Check_No_Membership(self):
user = create_dummy_user()
user.save()
site = create_dummy_site(user)
site.save()
self.assertFalse(check_membership(site, user))
def test_Duplicate_Membership(self):
user = create_dummy_user()
user.save()
site = create_dummy_site(user)
site.save()
membership1 = make_member(site, user)
membership1.save()
with self.assertRaises(IntegrityError):
membership2 = make_member(site, user)
membership2.save()
####################################################
# Page Tests
####################################################
def test_Count_Page(self):
user = create_dummy_user()
user.save()
site = create_dummy_site(user)
site.save()
for i in range(10):
page = create_dummy_page(site, user)
page.save()
#print page.to_json()
self.assertEqual(Page.select().count(), 10)
####################################################
# Post Tests
####################################################
def test_Count_Page(self):
user = create_dummy_user()
user.save()
site = create_dummy_site(user)
site.save()
for i in range(10):
post = Post()
post.dummy(site, user)
post.save()
#print post.to_json()
self.assertEqual(Post.select().count(), 10)
####################################################
# Site Tests
####################################################
def test_Count_Sites(self):
user = create_dummy_user()
user.save()
for i in range(10):
site = create_dummy_site(user)
site.save()
#print site.to_json()
self.assertEqual(Site.select().count(), 10)
####################################################
# User Tests
####################################################
def test_Count_Users(self):
for i in range(10):
user = create_dummy_user()
user.save()
#print user.to_json()
self.assertEqual(User.select().count(), 10)
def test_Create_User(self):
user_email = fake.free_email()
user = User()
user.generate_UUID()
original_uuid = user.uuid
user.email = user_email
user.save()
user2 = User.get(User.email == user_email)
self.assertEqual(user2.uuid, user.uuid)
def test_Create_Duplicate_User(self):
user_email = fake.free_email()
user1 = User()
user1.generate_UUID()
user1.email = user_email
user1.save()
user2 = User()
user2.generate_UUID()
user2.email = user_email
with self.assertRaises(IntegrityError):
user2.save()
def test_User_Password(self):
user_email = fake.free_email()
user_password = fake.password()
user = User()
user.email = user_email
user.encrypt_password(user_password)
user.generate_UUID()
user.save()
self.assertTrue(user.verify_password(user_password))
def test_User_BlankPassword(self):
user_email = fake.free_email()
user_password = fake.password()
user = User()
user.email = user_email
user.generate_UUID()
user.save()
with self.assertRaises(TypeError):
user.verify_password(user_password)
def test_User_BadPassword(self):
user_email = fake.free_email()
user_password = fake.password()
user_BADpassword = fake.password()
user = User()
user.email = user_email
user.encrypt_password(user_password)
user.generate_UUID()
user.save()
self.assertFalse(user.verify_password(user_BADpassword))
#if __name__ == '__main__':
# unittest.main()
| |
"""
test cython .agg behavior
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_float_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
Timedelta,
Timestamp,
bdate_range,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"op_name",
[
"count",
"sum",
"std",
"var",
"sem",
"mean",
pytest.param(
"median",
# ignore mean of empty slice
# and all-NaN
marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],
),
"prod",
"min",
"max",
],
)
def test_cythonized_aggers(op_name):
data = {
"A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],
"B": ["A", "B"] * 6,
"C": np.random.randn(12),
}
df = DataFrame(data)
df.loc[2:10:2, "C"] = np.nan
op = lambda x: getattr(x, op_name)()
# single column
grouped = df.drop(["B"], axis=1).groupby("A")
exp = {cat: op(group["C"]) for cat, group in grouped}
exp = DataFrame({"C": exp})
exp.index.name = "A"
result = op(grouped)
tm.assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(["A", "B"])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group["C"])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ["A", "B"]
exp.name = "C"
result = op(grouped)["C"]
if op_name in ["sum", "prod"]:
tm.assert_series_equal(result, exp)
def test_cython_agg_boolean():
frame = DataFrame(
{
"a": np.random.randint(0, 5, 50),
"b": np.random.randint(0, 2, 50).astype("bool"),
}
)
result = frame.groupby("a")["b"].mean()
expected = frame.groupby("a")["b"].agg(np.mean)
tm.assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg():
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
with pytest.raises(NotImplementedError, match="does not implement"):
frame.groupby("a")["b"].mean(numeric_only=True)
with pytest.raises(TypeError, match="Could not convert (foo|bar)*"):
frame.groupby("a")["b"].mean()
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = frame[["b"]].groupby(frame["a"]).mean()
expected = DataFrame([], index=frame["a"].sort_values().drop_duplicates())
tm.assert_frame_equal(result, expected)
def test_cython_agg_nothing_to_agg_with_dates():
frame = DataFrame(
{
"a": np.random.randint(0, 5, 50),
"b": ["foo", "bar"] * 25,
"dates": pd.date_range("now", periods=50, freq="T"),
}
)
with pytest.raises(NotImplementedError, match="does not implement"):
frame.groupby("b").dates.mean(numeric_only=True)
def test_cython_agg_frame_columns():
# #2113
df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
def test_cython_agg_return_dict():
# GH 16741
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())
expected = Series(
[{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],
index=Index(["bar", "foo"], name="A"),
name="B",
)
tm.assert_series_equal(ts, expected)
def test_cython_fail_agg():
dr = bdate_range("1/1/2000", periods=50)
ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr)
grouped = ts.groupby(lambda x: x.month)
summed = grouped.sum()
expected = grouped.agg(np.sum)
tm.assert_series_equal(summed, expected)
@pytest.mark.parametrize(
"op, targop",
[
("mean", np.mean),
("median", np.median),
("var", np.var),
("add", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
],
)
def test__cython_agg_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op, targop",
[
("mean", np.mean),
("median", lambda x: np.median(x) if len(x) > 0 else np.nan),
("var", lambda x: np.var(x, ddof=1)),
("min", np.min),
("max", np.max),
],
)
def test_cython_agg_empty_buckets(op, targop, observed):
df = DataFrame([11, 12, 13])
grps = range(0, 55, 5)
# calling _cython_agg_general directly, instead of via the user API
# which sets different values for min_count, so do that here.
g = df.groupby(pd.cut(df[0], grps), observed=observed)
result = g._cython_agg_general(op, alt=None, numeric_only=True)
g = df.groupby(pd.cut(df[0], grps), observed=observed)
expected = g.agg(lambda x: targop(x))
tm.assert_frame_equal(result, expected)
def test_cython_agg_empty_buckets_nanops(observed):
# GH-18869 can't call nanops on empty groups, so hardcode expected
# for these
df = DataFrame([11, 12, 13], columns=["a"])
grps = range(0, 25, 5)
# add / sum
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
"add", alt=None, numeric_only=True
)
intervals = pd.interval_range(0, 20, freq=5)
expected = DataFrame(
{"a": [0, 0, 36, 0]},
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
)
if observed:
expected = expected[expected.a != 0]
tm.assert_frame_equal(result, expected)
# prod
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
"prod", alt=None, numeric_only=True
)
expected = DataFrame(
{"a": [1, 1, 1716, 1]},
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
)
if observed:
expected = expected[expected.a != 1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["first", "last", "max", "min"])
@pytest.mark.parametrize(
"data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]
)
def test_cython_with_timestamp_and_nat(op, data):
# https://github.com/pandas-dev/pandas/issues/19526
df = DataFrame({"a": [0, 1], "b": [data, NaT]})
index = Index([0, 1], name="a")
# We will group by a and test the cython aggregations
expected = DataFrame({"b": [data, NaT]}, index=index)
result = df.groupby("a").aggregate(op)
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"agg",
[
"min",
"max",
"count",
"sum",
"prod",
"var",
"mean",
"median",
"ohlc",
"cumprod",
"cumsum",
"shift",
"any",
"all",
"quantile",
"first",
"last",
"rank",
"cummin",
"cummax",
],
)
def test_read_only_buffer_source_agg(agg):
# https://github.com/pandas-dev/pandas/issues/36014
df = DataFrame(
{
"sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0],
"species": ["setosa", "setosa", "setosa", "setosa", "setosa"],
}
)
df._mgr.arrays[0].flags.writeable = False
result = df.groupby(["species"]).agg({"sepal_length": agg})
expected = df.copy().groupby(["species"]).agg({"sepal_length": agg})
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"op_name",
[
"count",
"sum",
"std",
"var",
"sem",
"mean",
"median",
"prod",
"min",
"max",
],
)
def test_cython_agg_nullable_int(op_name):
# ensure that the cython-based aggregations don't fail for nullable dtype
# (eg https://github.com/pandas-dev/pandas/issues/37415)
df = DataFrame(
{
"A": ["A", "B"] * 5,
"B": pd.array([1, 2, 3, 4, 5, 6, 7, 8, 9, pd.NA], dtype="Int64"),
}
)
result = getattr(df.groupby("A")["B"], op_name)()
df2 = df.assign(B=df["B"].astype("float64"))
expected = getattr(df2.groupby("A")["B"], op_name)()
if op_name != "count":
# the result is not yet consistently using Int64/Float64 dtype,
# so for now just checking the values by casting to float
result = result.astype("float64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("with_na", [True, False])
@pytest.mark.parametrize(
"op_name, action",
[
# ("count", "always_int"),
("sum", "large_int"),
# ("std", "always_float"),
("var", "always_float"),
# ("sem", "always_float"),
("mean", "always_float"),
("median", "always_float"),
("prod", "large_int"),
("min", "preserve"),
("max", "preserve"),
("first", "preserve"),
("last", "preserve"),
],
)
@pytest.mark.parametrize(
"data",
[
pd.array([1, 2, 3, 4], dtype="Int64"),
pd.array([1, 2, 3, 4], dtype="Int8"),
pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float32"),
pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64"),
pd.array([True, True, False, False], dtype="boolean"),
],
)
def test_cython_agg_EA_known_dtypes(data, op_name, action, with_na):
if with_na:
data[3] = pd.NA
df = DataFrame({"key": ["a", "a", "b", "b"], "col": data})
grouped = df.groupby("key")
if action == "always_int":
# always Int64
expected_dtype = pd.Int64Dtype()
elif action == "large_int":
# for any int/bool use Int64, for float preserve dtype
if is_float_dtype(data.dtype):
expected_dtype = data.dtype
else:
expected_dtype = pd.Int64Dtype()
elif action == "always_float":
# for any int/bool use Float64, for float preserve dtype
if is_float_dtype(data.dtype):
expected_dtype = data.dtype
else:
expected_dtype = pd.Float64Dtype()
elif action == "preserve":
expected_dtype = data.dtype
result = getattr(grouped, op_name)()
assert result["col"].dtype == expected_dtype
result = grouped.aggregate(op_name)
assert result["col"].dtype == expected_dtype
result = getattr(grouped["col"], op_name)()
assert result.dtype == expected_dtype
result = grouped["col"].aggregate(op_name)
assert result.dtype == expected_dtype
| |
import os
import tempfile
import warnings
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.image import Image
except ImproperlyConfigured:
Image = None
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageFieldFile, ImageField
from django.utils import six
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class Counter(six.Iterator):
def __init__(self):
self.n = 1
def __iter__(self):
return self
def __next__(self):
if self.n > 5:
raise StopIteration
else:
self.n += 1
return (self.n, 'val-' + str(self.n))
class WhizIter(models.Model):
c = models.IntegerField(choices=Counter(), null=True)
class WhizIterEmpty(models.Model):
c = models.CharField(choices=(x for x in []), blank=True, max_length=1)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField(default=None)
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow/PIL in this test
#field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
field13 = models.IPAddressField("verbose field13")
field14 = models.GenericIPAddressField("verbose field14", protocol="ipv4")
field15 = models.NullBooleanField("verbose field15")
field16 = models.PositiveIntegerField("verbose field16")
field17 = models.PositiveSmallIntegerField("verbose field17")
field18 = models.SlugField("verbose field18")
field19 = models.SmallIntegerField("verbose field19")
field20 = models.TextField("verbose field20")
field21 = models.TimeField("verbose field21")
field22 = models.URLField("verbose field22")
# This model isn't used in any test, just here to ensure it validates successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow/PIL available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbsctractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbsctractPersonWithHeight):
"""
Concrete model that subclass an abctract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
###############################################################################
| |
# Copyright 2018 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from hy._compat import PY3
import hy.inspect
from hy.models import replace_hy_obj, HyExpression, HySymbol
from hy.lex.parser import mangle
from hy._compat import str_type
from hy.errors import HyTypeError, HyMacroExpansionError
from collections import defaultdict
CORE_MACROS = [
"hy.core.bootstrap",
]
EXTRA_MACROS = [
"hy.core.macros",
]
_hy_macros = defaultdict(dict)
_hy_tag = defaultdict(dict)
def macro(name):
"""Decorator to define a macro called `name`.
This stores the macro `name` in the namespace for the module where it is
defined.
If the module where it is defined is in `hy.core`, then the macro is stored
in the default `None` namespace.
This function is called from the `defmacro` special form in the compiler.
"""
name = mangle(name)
def _(fn):
fn.__name__ = '({})'.format(name)
try:
fn._hy_macro_pass_compiler = hy.inspect.has_kwargs(fn)
except Exception:
# An exception might be raised if fn has arguments with
# names that are invalid in Python.
fn._hy_macro_pass_compiler = False
module_name = fn.__module__
if module_name.startswith("hy.core"):
module_name = None
_hy_macros[module_name][name] = fn
return fn
return _
def tag(name):
"""Decorator to define a tag macro called `name`.
This stores the macro `name` in the namespace for the module where it is
defined.
If the module where it is defined is in `hy.core`, then the macro is stored
in the default `None` namespace.
This function is called from the `deftag` special form in the compiler.
"""
def _(fn):
_name = mangle('#{}'.format(name))
if not PY3:
_name = _name.encode('UTF-8')
fn.__name__ = _name
module_name = fn.__module__
if module_name.startswith("hy.core"):
module_name = None
_hy_tag[module_name][mangle(name)] = fn
return fn
return _
def require(source_module, target_module,
all_macros=False, assignments={}, prefix=""):
"""Load macros from `source_module` in the namespace of
`target_module`. `assignments` maps old names to new names, but is
ignored if `all_macros` is true. If `prefix` is nonempty, it is
prepended to the name of each imported macro. (This means you get
macros named things like "mymacromodule.mymacro", which looks like
an attribute of a module, although it's actually just a symbol
with a period in its name.)
This function is called from the `require` special form in the compiler.
"""
seen_names = set()
if prefix:
prefix += "."
assignments = {mangle(str_type(k)): v for k, v in assignments.items()}
for d in _hy_macros, _hy_tag:
for name, macro in d[source_module].items():
seen_names.add(name)
if all_macros:
d[target_module][mangle(prefix + name)] = macro
elif name in assignments:
d[target_module][mangle(prefix + assignments[name])] = macro
if not all_macros:
unseen = frozenset(assignments.keys()).difference(seen_names)
if unseen:
raise ImportError("cannot require names: " + repr(list(unseen)))
def load_macros(module_name):
"""Load the hy builtin macros for module `module_name`.
Modules from `hy.core` can only use the macros from CORE_MACROS.
Other modules get the macros from CORE_MACROS and EXTRA_MACROS.
"""
def _import(module, module_name=module_name):
"__import__ a module, avoiding recursions"
if module != module_name:
__import__(module)
for module in CORE_MACROS:
_import(module)
if module_name.startswith("hy.core"):
return
for module in EXTRA_MACROS:
_import(module)
def make_empty_fn_copy(fn):
try:
# This might fail if fn has parameters with funny names, like o!n. In
# such a case, we return a generic function that ensures the program
# can continue running. Unfortunately, the error message that might get
# raised later on while expanding a macro might not make sense at all.
formatted_args = hy.inspect.format_args(fn)
fn_str = 'lambda {}: None'.format(
formatted_args.lstrip('(').rstrip(')'))
empty_fn = eval(fn_str)
except Exception:
def empty_fn(*args, **kwargs):
None
return empty_fn
def macroexpand(tree, compiler):
"""Expand the toplevel macros for the `tree`.
Load the macros from the given `module_name`, then expand the (top-level)
macros in `tree` until it stops changing.
"""
load_macros(compiler.module_name)
old = None
while old != tree:
old = tree
tree = macroexpand_1(tree, compiler)
return tree
def macroexpand_1(tree, compiler):
"""Expand the toplevel macro from `tree` once, in the context of
`module_name`."""
if isinstance(tree, HyExpression):
if tree == []:
return tree
fn = tree[0]
if fn in ("quote", "quasiquote"):
return tree
ntree = HyExpression(tree[:])
ntree.replace(tree)
opts = {}
if isinstance(fn, HySymbol):
fn = mangle(str_type(fn))
m = _hy_macros[compiler.module_name].get(fn)
if m is None:
m = _hy_macros[None].get(fn)
if m is not None:
if m._hy_macro_pass_compiler:
opts['compiler'] = compiler
try:
m_copy = make_empty_fn_copy(m)
m_copy(compiler.module_name, *ntree[1:], **opts)
except TypeError as e:
msg = "expanding `" + str(tree[0]) + "': "
msg += str(e).replace("<lambda>()", "", 1).strip()
raise HyMacroExpansionError(tree, msg)
try:
obj = m(compiler.module_name, *ntree[1:], **opts)
except HyTypeError as e:
if e.expression is None:
e.expression = tree
raise
except Exception as e:
msg = "expanding `" + str(tree[0]) + "': " + repr(e)
raise HyMacroExpansionError(tree, msg)
replace_hy_obj(obj, tree)
return obj
return ntree
return tree
def tag_macroexpand(tag, tree, compiler):
"""Expand the tag macro "tag" with argument `tree`."""
load_macros(compiler.module_name)
tag_macro = _hy_tag[compiler.module_name].get(tag)
if tag_macro is None:
try:
tag_macro = _hy_tag[None][tag]
except KeyError:
raise HyTypeError(
tag,
"`{0}' is not a defined tag macro.".format(tag)
)
expr = tag_macro(tree)
return replace_hy_obj(expr, tree)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2017_10_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from oslo_utils import excutils
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
from tempest import config
from tempest import exceptions
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseComputeTest(tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
_api_version = 2
force_tenant_isolation = False
# TODO(andreaf) We should care also for the alt_manager here
# but only once client lazy load in the manager is done
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseComputeTest, cls).skip_checks()
if cls._api_version != 2:
msg = ("Unexpected API version is specified (%s)" %
cls._api_version)
raise exceptions.InvalidConfiguration(message=msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(BaseComputeTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(BaseComputeTest, cls).setup_clients()
cls.servers_client = cls.os.servers_client
cls.flavors_client = cls.os.flavors_client
cls.images_client = cls.os.images_client
cls.extensions_client = cls.os.extensions_client
cls.floating_ips_client = cls.os.floating_ips_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_groups_client = cls.os.security_groups_client
cls.quotas_client = cls.os.quotas_client
# NOTE(mriedem): os-quota-class-sets is v2 API only
cls.quota_classes_client = cls.os.quota_classes_client
# NOTE(mriedem): os-networks is v2 API only
cls.networks_client = cls.os.networks_client
cls.limits_client = cls.os.limits_client
cls.volumes_extensions_client = cls.os.volumes_extensions_client
cls.volumes_client = cls.os.volumes_client
cls.interfaces_client = cls.os.interfaces_client
cls.fixed_ips_client = cls.os.fixed_ips_client
cls.availability_zone_client = cls.os.availability_zone_client
cls.agents_client = cls.os.agents_client
cls.aggregates_client = cls.os.aggregates_client
cls.services_client = cls.os.services_client
cls.instance_usages_audit_log_client = (
cls.os.instance_usages_audit_log_client)
cls.hypervisor_client = cls.os.hypervisor_client
cls.certificates_client = cls.os.certificates_client
cls.migrations_client = cls.os.migrations_client
cls.security_group_default_rules_client = (
cls.os.security_group_default_rules_client)
@classmethod
def resource_setup(cls):
super(BaseComputeTest, cls).resource_setup()
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.ssh_user = CONF.compute.ssh_user
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.image_ssh_user = CONF.compute.image_ssh_user
cls.image_ssh_password = CONF.compute.image_ssh_password
cls.servers = []
cls.images = []
cls.security_groups = []
cls.server_groups = []
@classmethod
def resource_cleanup(cls):
cls.clear_images()
cls.clear_servers()
cls.clear_security_groups()
cls.clear_server_groups()
super(BaseComputeTest, cls).resource_cleanup()
@classmethod
def clear_servers(cls):
LOG.debug('Clearing servers: %s', ','.join(
server['id'] for server in cls.servers))
for server in cls.servers:
try:
cls.servers_client.delete_server(server['id'])
except lib_exc.NotFound:
# Something else already cleaned up the server, nothing to be
# worried about
pass
except Exception:
LOG.exception('Deleting server %s failed' % server['id'])
for server in cls.servers:
try:
cls.servers_client.wait_for_server_termination(server['id'])
except Exception:
LOG.exception('Waiting for deletion of server %s failed'
% server['id'])
@classmethod
def server_check_teardown(cls):
"""Checks is the shared server clean enough for subsequent test.
Method will delete the server when it's dirty.
The setUp method is responsible for creating a new server.
Exceptions raised in tearDown class are fails the test case,
This method supposed to use only by tierDown methods, when
the shared server_id is stored in the server_id of the class.
"""
if getattr(cls, 'server_id', None) is not None:
try:
cls.servers_client.wait_for_server_status(cls.server_id,
'ACTIVE')
except Exception as exc:
LOG.exception(exc)
cls.servers_client.delete_server(cls.server_id)
cls.servers_client.wait_for_server_termination(cls.server_id)
cls.server_id = None
raise
@classmethod
def clear_images(cls):
LOG.debug('Clearing images: %s', ','.join(cls.images))
for image_id in cls.images:
try:
cls.images_client.delete_image(image_id)
except lib_exc.NotFound:
# The image may have already been deleted which is OK.
pass
except Exception:
LOG.exception('Exception raised deleting image %s' % image_id)
@classmethod
def clear_security_groups(cls):
LOG.debug('Clearing security groups: %s', ','.join(
str(sg['id']) for sg in cls.security_groups))
for sg in cls.security_groups:
try:
cls.security_groups_client.delete_security_group(sg['id'])
except lib_exc.NotFound:
# The security group may have already been deleted which is OK.
pass
except Exception as exc:
LOG.info('Exception raised deleting security group %s',
sg['id'])
LOG.exception(exc)
@classmethod
def clear_server_groups(cls):
LOG.debug('Clearing server groups: %s', ','.join(cls.server_groups))
for server_group_id in cls.server_groups:
try:
cls.servers_client.delete_server_group(server_group_id)
except lib_exc.NotFound:
# The server-group may have already been deleted which is OK.
pass
except Exception:
LOG.exception('Exception raised deleting server-group %s',
server_group_id)
@classmethod
def create_test_server(cls, **kwargs):
"""Wrapper utility that returns a test server."""
name = data_utils.rand_name(cls.__name__ + "-instance")
if 'name' in kwargs:
name = kwargs.pop('name')
flavor = kwargs.get('flavor', cls.flavor_ref)
image_id = kwargs.get('image_id', cls.image_ref)
kwargs = fixed_network.set_networks_kwarg(
cls.get_tenant_network(), kwargs) or {}
body = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)
# handle the case of multiple servers
servers = [body]
if 'min_count' in kwargs or 'max_count' in kwargs:
# Get servers created which name match with name param.
b = cls.servers_client.list_servers()
servers = [s for s in b['servers'] if s['name'].startswith(name)]
if 'wait_until' in kwargs:
for server in servers:
try:
cls.servers_client.wait_for_server_status(
server['id'], kwargs['wait_until'])
except Exception:
with excutils.save_and_reraise_exception():
if ('preserve_server_on_error' not in kwargs
or kwargs['preserve_server_on_error'] is False):
for server in servers:
try:
cls.servers_client.delete_server(
server['id'])
except Exception:
pass
cls.servers.extend(servers)
return body
@classmethod
def create_security_group(cls, name=None, description=None):
if name is None:
name = data_utils.rand_name(cls.__name__ + "-securitygroup")
if description is None:
description = data_utils.rand_name('description')
body = \
cls.security_groups_client.create_security_group(name,
description)
cls.security_groups.append(body)
return body
@classmethod
def create_test_server_group(cls, name="", policy=None):
if not name:
name = data_utils.rand_name(cls.__name__ + "-Server-Group")
if policy is None:
policy = ['affinity']
body = cls.servers_client.create_server_group(name, policy)
cls.server_groups.append(body['id'])
return body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
@staticmethod
def _delete_volume(volumes_client, volume_id):
"""Deletes the given volume and waits for it to be gone."""
try:
volumes_client.delete_volume(volume_id)
# TODO(mriedem): We should move the wait_for_resource_deletion
# into the delete_volume method as a convenience to the caller.
volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
LOG.warn("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?" % volume_id)
@classmethod
def prepare_instance_network(cls):
if (CONF.compute.ssh_auth_method != 'disabled' and
CONF.compute.ssh_connect_method == 'floating'):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server."""
name = data_utils.rand_name(cls.__name__ + "-image")
if 'name' in kwargs:
name = kwargs.pop('name')
image = cls.images_client.create_image(server_id, name)
image_id = data_utils.parse_image_id(image.response['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
image = cls.images_client.get_image(image_id)
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
cls.servers_client.wait_for_server_status(server_id,
'ACTIVE')
return image
@classmethod
def rebuild_server(cls, server_id, **kwargs):
# Destroy an existing server and creates a new one
if server_id:
try:
cls.servers_client.delete_server(server_id)
cls.servers_client.wait_for_server_termination(server_id)
except Exception:
LOG.exception('Failed to delete server %s' % server_id)
server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
cls.password = server['adminPass']
return server['id']
@classmethod
def delete_server(cls, server_id):
"""Deletes an existing server and waits for it to be gone."""
try:
cls.servers_client.delete_server(server_id)
cls.servers_client.wait_for_server_termination(server_id)
except Exception:
LOG.exception('Failed to delete server %s' % server_id)
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
cls._delete_volume(cls.volumes_extensions_client, volume_id)
class BaseV2ComputeTest(BaseComputeTest):
_api_version = 2
class BaseComputeAdminTest(BaseComputeTest):
"""Base test case class for Compute Admin API tests."""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseComputeAdminTest, cls).setup_clients()
cls.availability_zone_admin_client = (
cls.os_adm.availability_zone_client)
class BaseV2ComputeAdminTest(BaseComputeAdminTest):
"""Base test case class for Compute Admin V2 API tests."""
_api_version = 2
| |
from sympy import sympify, Add, ImmutableMatrix as Matrix
from sympy.core.compatibility import u, unicode
from .printing import (VectorLatexPrinter, VectorPrettyPrinter,
VectorStrPrinter)
__all__ = ['Dyadic']
class Dyadic(object):
"""A Dyadic object.
See:
http://en.wikipedia.org/wiki/Dyadic_tensor
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
A more powerful way to represent a rigid body's inertia. While it is more
complex, by choosing Dyadic components to be in body fixed basis vectors,
the resulting matrix is equivalent to the inertia tensor.
"""
def __init__(self, inlist):
"""
Just like Vector's init, you shouldn't call this unless creating a
zero dyadic.
zd = Dyadic(0)
Stores a Dyadic as a list of lists; the inner list has the measure
number and the two unit vectors; the outerlist holds each unique
unit vector pair.
"""
self.args = []
if inlist == 0:
inlist = []
while len(inlist) != 0:
added = 0
for i, v in enumerate(self.args):
if ((str(inlist[0][1]) == str(self.args[i][1])) and
(str(inlist[0][2]) == str(self.args[i][2]))):
self.args[i] = (self.args[i][0] + inlist[0][0],
inlist[0][1], inlist[0][2])
inlist.remove(inlist[0])
added = 1
break
if added != 1:
self.args.append(inlist[0])
inlist.remove(inlist[0])
i = 0
# This code is to remove empty parts from the list
while i < len(self.args):
if ((self.args[i][0] == 0) | (self.args[i][1] == 0) |
(self.args[i][2] == 0)):
self.args.remove(self.args[i])
i -= 1
i += 1
def __add__(self, other):
"""The add operator for Dyadic. """
other = _check_dyadic(other)
return Dyadic(self.args + other.args)
def __and__(self, other):
"""The inner product operator for a Dyadic and a Dyadic or Vector.
Parameters
==========
other : Dyadic or Vector
The other Dyadic or Vector to take the inner product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> D1 = outer(N.x, N.y)
>>> D2 = outer(N.y, N.y)
>>> D1.dot(D2)
(N.x|N.y)
>>> D1.dot(N.y)
N.x
"""
from sympy.physics.vector.vector import Vector, _check_vector
if isinstance(other, Dyadic):
other = _check_dyadic(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
for i2, v2 in enumerate(other.args):
ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2])
else:
other = _check_vector(other)
ol = Vector(0)
for i, v in enumerate(self.args):
ol += v[0] * v[1] * (v[2] & other)
return ol
def __div__(self, other):
"""Divides the Dyadic by a sympifyable expression. """
return self.__mul__(1 / other)
__truediv__ = __div__
def __eq__(self, other):
"""Tests for equality.
Is currently weak; needs stronger comparison testing
"""
if other == 0:
other = Dyadic(0)
other = _check_dyadic(other)
if (self.args == []) and (other.args == []):
return True
elif (self.args == []) or (other.args == []):
return False
return set(self.args) == set(other.args)
def __mul__(self, other):
"""Multiplies the Dyadic by a sympifyable expression.
Parameters
==========
other : Sympafiable
The scalar to multiply this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> 5 * d
5*(N.x|N.x)
"""
newlist = [v for v in self.args]
for i, v in enumerate(newlist):
newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1],
newlist[i][2])
return Dyadic(newlist)
def __ne__(self, other):
return not self.__eq__(other)
def __neg__(self):
return self * -1
def _latex(self, printer=None):
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
mlp = VectorLatexPrinter()
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.append(' + ' + mlp.doprint(ar[i][1]) + r"\otimes " +
mlp.doprint(ar[i][2]))
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.append(' - ' +
mlp.doprint(ar[i][1]) +
r"\otimes " +
mlp.doprint(ar[i][2]))
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
arg_str = mlp.doprint(ar[i][0])
if isinstance(ar[i][0], Add):
arg_str = '(%s)' % arg_str
if arg_str.startswith('-'):
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + mlp.doprint(ar[i][1]) +
r"\otimes " + mlp.doprint(ar[i][2]))
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def _pretty(self, printer=None):
e = self
class Fake(object):
baseline = 0
def render(self, *args, **kwargs):
ar = e.args # just to shorten things
settings = printer._settings if printer else {}
if printer:
use_unicode = printer._use_unicode
else:
from sympy.printing.pretty.pretty_symbology import (
pretty_use_unicode)
use_unicode = pretty_use_unicode()
mpp = printer if printer else VectorPrettyPrinter(settings)
if len(ar) == 0:
return unicode(0)
bar = u"\N{CIRCLED TIMES}" if use_unicode else "|"
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.extend([u" + ",
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.extend([u" - ",
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
if isinstance(ar[i][0], Add):
arg_str = mpp._print(
ar[i][0]).parens()[0]
else:
arg_str = mpp.doprint(ar[i][0])
if arg_str.startswith(u"-"):
arg_str = arg_str[1:]
str_start = u" - "
else:
str_start = u" + "
ol.extend([str_start, arg_str, u" ",
mpp.doprint(ar[i][1]),
bar,
mpp.doprint(ar[i][2])])
outstr = u"".join(ol)
if outstr.startswith(u" + "):
outstr = outstr[3:]
elif outstr.startswith(" "):
outstr = outstr[1:]
return outstr
return Fake()
def __rand__(self, other):
"""The inner product operator for a Vector or Dyadic, and a Dyadic
This is for: Vector dot Dyadic
Parameters
==========
other : Vector
The vector we are dotting with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dot, outer
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> dot(N.x, d)
N.x
"""
from sympy.physics.vector.vector import Vector, _check_vector
other = _check_vector(other)
ol = Vector(0)
for i, v in enumerate(self.args):
ol += v[0] * v[2] * (v[1] & other)
return ol
def __rsub__(self, other):
return (-1 * self) + other
def __rxor__(self, other):
"""For a cross product in the form: Vector x Dyadic
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> cross(N.y, d)
- (N.z|N.x)
"""
from sympy.physics.vector.vector import _check_vector
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
ol += v[0] * ((other ^ v[1]) | v[2])
return ol
def __str__(self, printer=None):
"""Printing method. """
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
# if the coef of the dyadic is 1, we skip the 1
if ar[i][0] == 1:
ol.append(' + (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')')
# if the coef of the dyadic is -1, we skip the 1
elif ar[i][0] == -1:
ol.append(' - (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')')
# If the coefficient of the dyadic is not 1 or -1,
# we might wrap it in parentheses, for readability.
elif ar[i][0] != 0:
arg_str = VectorStrPrinter().doprint(ar[i][0])
if isinstance(ar[i][0], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + '*(' + str(ar[i][1]) +
'|' + str(ar[i][2]) + ')')
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def __sub__(self, other):
"""The subtraction operator. """
return self.__add__(other * -1)
def __xor__(self, other):
"""For a cross product in the form: Dyadic x Vector.
Parameters
==========
other : Vector
The Vector that we are crossing this Dyadic with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, cross
>>> N = ReferenceFrame('N')
>>> d = outer(N.x, N.x)
>>> cross(d, N.y)
(N.x|N.z)
"""
from sympy.physics.vector.vector import _check_vector
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
ol += v[0] * (v[1] | (v[2] ^ other))
return ol
_sympystr = __str__
_sympyrepr = _sympystr
__repr__ = __str__
__radd__ = __add__
__rmul__ = __mul__
def express(self, frame1, frame2=None):
"""Expresses this Dyadic in alternate frame(s)
The first frame is the list side expression, the second frame is the
right side; if Dyadic is in form A.x|B.y, you can express it in two
different frames. If no second frame is given, the Dyadic is
expressed in only one frame.
Calls the global express function
Parameters
==========
frame1 : ReferenceFrame
The frame to express the left side of the Dyadic in
frame2 : ReferenceFrame
If provided, the frame to express the right side of the Dyadic in
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> d.express(B, N)
cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
"""
from sympy.physics.vector.functions import express
return express(self, frame1, frame2)
def to_matrix(self, reference_frame, second_reference_frame=None):
"""Returns the matrix form of the dyadic with respect to one or two
reference frames.
Parameters
----------
reference_frame : ReferenceFrame
The reference frame that the rows and columns of the matrix
correspond to. If a second reference frame is provided, this
only corresponds to the rows of the matrix.
second_reference_frame : ReferenceFrame, optional, default=None
The reference frame that the columns of the matrix correspond
to.
Returns
-------
matrix : ImmutableMatrix, shape(3,3)
The matrix that gives the 2D tensor form.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> Vector.simp = True
>>> from sympy.physics.mechanics import inertia
>>> Ixx, Iyy, Izz, Ixy, Iyz, Ixz = symbols('Ixx, Iyy, Izz, Ixy, Iyz, Ixz')
>>> N = ReferenceFrame('N')
>>> inertia_dyadic = inertia(N, Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
>>> inertia_dyadic.to_matrix(N)
Matrix([
[Ixx, Ixy, Ixz],
[Ixy, Iyy, Iyz],
[Ixz, Iyz, Izz]])
>>> beta = symbols('beta')
>>> A = N.orientnew('A', 'Axis', (beta, N.x))
>>> inertia_dyadic.to_matrix(A)
Matrix([
[ Ixx, Ixy*cos(beta) + Ixz*sin(beta), -Ixy*sin(beta) + Ixz*cos(beta)],
[ Ixy*cos(beta) + Ixz*sin(beta), Iyy*cos(2*beta)/2 + Iyy/2 + Iyz*sin(2*beta) - Izz*cos(2*beta)/2 + Izz/2, -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2],
[-Ixy*sin(beta) + Ixz*cos(beta), -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2, -Iyy*cos(2*beta)/2 + Iyy/2 - Iyz*sin(2*beta) + Izz*cos(2*beta)/2 + Izz/2]])
"""
if second_reference_frame is None:
second_reference_frame = reference_frame
return Matrix([i.dot(self).dot(j) for i in reference_frame for j in
second_reference_frame]).reshape(3, 3)
def doit(self, **hints):
"""Calls .doit() on each term in the Dyadic"""
return sum([Dyadic([(v[0].doit(**hints), v[1], v[2])])
for v in self.args], Dyadic(0))
def dt(self, frame):
"""Take the time derivative of this Dyadic in a frame.
This function calls the global time_derivative method
Parameters
==========
frame : ReferenceFrame
The frame to take the time derivative in
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> d = outer(N.x, N.x)
>>> d.dt(B)
- q'*(N.y|N.x) - q'*(N.x|N.y)
"""
from sympy.physics.vector.functions import time_derivative
return time_derivative(self, frame)
def simplify(self):
"""Returns a simplified Dyadic."""
out = Dyadic(0)
for v in self.args:
out += Dyadic([(v[0].simplify(), v[1], v[2])])
return out
def subs(self, *args, **kwargs):
"""Substituion on the Dyadic.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> s = Symbol('s')
>>> a = s * (N.x|N.x)
>>> a.subs({s: 2})
2*(N.x|N.x)
"""
return sum([Dyadic([(v[0].subs(*args, **kwargs), v[1], v[2])])
for v in self.args], Dyadic(0))
def applyfunc(self, f):
"""Apply a function to each component of a Dyadic."""
if not callable(f):
raise TypeError("`f` must be callable.")
out = Dyadic(0)
for a, b, c in self.args:
out += f(a) * (b|c)
return out
dot = __and__
cross = __xor__
def _check_dyadic(other):
if not isinstance(other, Dyadic):
raise TypeError('A Dyadic must be supplied')
return other
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.estimators.dynamic_rnn_estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tempfile
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib import rnn
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import target_column as target_column_lib
from tensorflow.contrib.learn.python.learn.estimators import dynamic_rnn_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class IdentityRNNCell(rnn.RNNCell):
def __init__(self, state_size, output_size):
self._state_size = state_size
self._output_size = output_size
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state):
return array_ops.identity(inputs), array_ops.ones(
[array_ops.shape(inputs)[0], self.state_size])
class MockTargetColumn(object):
def __init__(self, num_label_columns=None):
self._num_label_columns = num_label_columns
def get_eval_ops(self, features, activations, labels, metrics):
raise NotImplementedError(
'MockTargetColumn.get_eval_ops called unexpectedly.')
def logits_to_predictions(self, flattened_activations, proba=False):
raise NotImplementedError(
'MockTargetColumn.logits_to_predictions called unexpectedly.')
def loss(self, activations, labels, features):
raise NotImplementedError('MockTargetColumn.loss called unexpectedly.')
@property
def num_label_columns(self):
if self._num_label_columns is None:
raise ValueError('MockTargetColumn.num_label_columns has not been set.')
return self._num_label_columns
def set_num_label_columns(self, n):
self._num_label_columns = n
def sequence_length_mask(values, lengths):
masked = values
for i, length in enumerate(lengths):
masked[i, length:, :] = np.zeros_like(masked[i, length:, :])
return masked
class DynamicRnnEstimatorTest(test.TestCase):
NUM_RNN_CELL_UNITS = 8
NUM_LABEL_COLUMNS = 6
INPUTS_COLUMN = feature_column.real_valued_column(
'inputs', dimension=NUM_LABEL_COLUMNS)
def setUp(self):
super(DynamicRnnEstimatorTest, self).setUp()
self.rnn_cell = core_rnn_cell_impl.BasicRNNCell(self.NUM_RNN_CELL_UNITS)
self.mock_target_column = MockTargetColumn(
num_label_columns=self.NUM_LABEL_COLUMNS)
location = feature_column.sparse_column_with_keys(
'location', keys=['west_side', 'east_side', 'nyc'])
location_onehot = feature_column.one_hot_column(location)
self.context_feature_columns = [location_onehot]
wire_cast = feature_column.sparse_column_with_keys(
'wire_cast', ['marlo', 'omar', 'stringer'])
wire_cast_embedded = feature_column.embedding_column(wire_cast, dimension=8)
measurements = feature_column.real_valued_column(
'measurements', dimension=2)
self.sequence_feature_columns = [measurements, wire_cast_embedded]
def GetColumnsToTensors(self):
"""Get columns_to_tensors matching setUp(), in the current default graph."""
return {
'location':
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]],
values=['west_side', 'west_side', 'nyc'],
dense_shape=[3, 1]),
'wire_cast':
sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0],
[1, 0, 0], [1, 1, 0], [1, 1, 1],
[2, 0, 0]],
values=[b'marlo', b'stringer',
b'omar', b'stringer', b'marlo',
b'marlo'],
dense_shape=[3, 2, 2]),
'measurements':
random_ops.random_uniform(
[3, 2, 2], seed=4711)
}
def GetClassificationTargetsOrNone(self, mode):
"""Get targets matching setUp() and mode, in the current default graph."""
return (random_ops.random_uniform(
[3, 2, 1], 0, 2, dtype=dtypes.int64, seed=1412) if
mode != model_fn_lib.ModeKeys.INFER else None)
def testBuildSequenceInputInput(self):
sequence_input = dynamic_rnn_estimator.build_sequence_input(
self.GetColumnsToTensors(), self.sequence_feature_columns,
self.context_feature_columns)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(data_flow_ops.initialize_all_tables())
sequence_input_val = sess.run(sequence_input)
expected_shape = np.array([
3, # expected batch size
2, # padded sequence length
3 + 8 + 2 # location keys + embedding dim + measurement dimension
])
self.assertAllEqual(expected_shape, sequence_input_val.shape)
def testConstructRNN(self):
initial_state = None
sequence_input = dynamic_rnn_estimator.build_sequence_input(
self.GetColumnsToTensors(), self.sequence_feature_columns,
self.context_feature_columns)
activations_t, final_state_t = dynamic_rnn_estimator.construct_rnn(
initial_state, sequence_input, self.rnn_cell,
self.mock_target_column.num_label_columns)
# Obtain values of activations and final state.
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(data_flow_ops.initialize_all_tables())
activations, final_state = sess.run([activations_t, final_state_t])
expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
self.assertAllEqual(expected_activations_shape, activations.shape)
expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
self.assertAllEqual(expected_state_shape, final_state.shape)
def testMaskActivationsAndLabels(self):
"""Test `mask_activations_and_labels`."""
batch_size = 4
padded_length = 6
num_classes = 4
np.random.seed(1234)
sequence_length = np.random.randint(0, padded_length + 1, batch_size)
activations = np.random.rand(batch_size, padded_length, num_classes)
labels = np.random.randint(0, num_classes, [batch_size, padded_length])
(activations_masked_t,
labels_masked_t) = dynamic_rnn_estimator.mask_activations_and_labels(
constant_op.constant(
activations, dtype=dtypes.float32),
constant_op.constant(
labels, dtype=dtypes.int32),
constant_op.constant(
sequence_length, dtype=dtypes.int32))
with session.Session() as sess:
activations_masked, labels_masked = sess.run(
[activations_masked_t, labels_masked_t])
expected_activations_shape = [sum(sequence_length), num_classes]
np.testing.assert_equal(
expected_activations_shape, activations_masked.shape,
'Wrong activations shape. Expected {}; got {}.'.format(
expected_activations_shape, activations_masked.shape))
expected_labels_shape = [sum(sequence_length)]
np.testing.assert_equal(expected_labels_shape, labels_masked.shape,
'Wrong labels shape. Expected {}; got {}.'.format(
expected_labels_shape, labels_masked.shape))
masked_index = 0
for i in range(batch_size):
for j in range(sequence_length[i]):
actual_activations = activations_masked[masked_index]
expected_activations = activations[i, j, :]
np.testing.assert_almost_equal(
expected_activations,
actual_activations,
err_msg='Unexpected logit value at index [{}, {}, :].'
' Expected {}; got {}.'.format(i, j, expected_activations,
actual_activations))
actual_labels = labels_masked[masked_index]
expected_labels = labels[i, j]
np.testing.assert_almost_equal(
expected_labels,
actual_labels,
err_msg='Unexpected logit value at index [{}, {}].'
' Expected {}; got {}.'.format(i, j, expected_labels,
actual_labels))
masked_index += 1
def testSelectLastActivations(self):
"""Test `select_last_activations`."""
batch_size = 4
padded_length = 6
num_classes = 4
np.random.seed(4444)
sequence_length = np.random.randint(0, padded_length + 1, batch_size)
activations = np.random.rand(batch_size, padded_length, num_classes)
last_activations_t = dynamic_rnn_estimator.select_last_activations(
constant_op.constant(
activations, dtype=dtypes.float32),
constant_op.constant(
sequence_length, dtype=dtypes.int32))
with session.Session() as sess:
last_activations = sess.run(last_activations_t)
expected_activations_shape = [batch_size, num_classes]
np.testing.assert_equal(
expected_activations_shape, last_activations.shape,
'Wrong activations shape. Expected {}; got {}.'.format(
expected_activations_shape, last_activations.shape))
for i in range(batch_size):
actual_activations = last_activations[i, :]
expected_activations = activations[i, sequence_length[i] - 1, :]
np.testing.assert_almost_equal(
expected_activations,
actual_activations,
err_msg='Unexpected logit value at index [{}, :].'
' Expected {}; got {}.'.format(i, expected_activations,
actual_activations))
# testGetDynamicRnnModelFn{Train,Eval,Infer}() test which fields
# of ModelFnOps are set depending on mode.
def testGetDynamicRnnModelFnTrain(self):
model_fn_ops = self._GetModelFnOpsForMode(model_fn_lib.ModeKeys.TRAIN)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept neither.
self.assertNotEqual(len(model_fn_ops.eval_metric_ops), 0)
def testGetDynamicRnnModelFnEval(self):
model_fn_ops = self._GetModelFnOpsForMode(model_fn_lib.ModeKeys.EVAL)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept neither.
self.assertNotEqual(len(model_fn_ops.eval_metric_ops), 0)
def testGetDynamicRnnModelFnInfer(self):
model_fn_ops = self._GetModelFnOpsForMode(model_fn_lib.ModeKeys.INFER)
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
# None may get normalized to {}; we accept both.
self.assertFalse(model_fn_ops.eval_metric_ops)
def _GetModelFnOpsForMode(self, mode):
"""Helper for testGetDynamicRnnModelFn{Train,Eval,Infer}()."""
model_fn = dynamic_rnn_estimator._get_dynamic_rnn_model_fn(
self.rnn_cell,
target_column=target_column_lib.multi_class_target(n_classes=2),
# Only CLASSIFICATION yields eval metrics to test for.
problem_type=dynamic_rnn_estimator.ProblemType.CLASSIFICATION,
prediction_type=dynamic_rnn_estimator.PredictionType.MULTIPLE_VALUE,
optimizer='SGD',
sequence_feature_columns=self.sequence_feature_columns,
context_feature_columns=self.context_feature_columns,
learning_rate=0.1)
labels = self.GetClassificationTargetsOrNone(mode)
model_fn_ops = model_fn(
features=self.GetColumnsToTensors(), labels=labels, mode=mode)
return model_fn_ops
def testExport(self):
input_feature_key = 'magic_input_feature_key'
def get_input_fn(mode):
def input_fn():
features = self.GetColumnsToTensors()
if mode == model_fn_lib.ModeKeys.INFER:
input_examples = array_ops.placeholder(dtypes.string)
features[input_feature_key] = input_examples
# Real code would now parse features out of input_examples,
# but this test can just stick to the constants above.
return features, self.GetClassificationTargetsOrNone(mode)
return input_fn
model_dir = tempfile.mkdtemp()
def estimator_fn():
return dynamic_rnn_estimator.multi_value_rnn_classifier(
num_classes=2,
num_units=self.NUM_RNN_CELL_UNITS,
sequence_feature_columns=self.sequence_feature_columns,
context_feature_columns=self.context_feature_columns,
predict_probabilities=True,
model_dir=model_dir)
# Train a bit to create an exportable checkpoint.
estimator_fn().fit(input_fn=get_input_fn(model_fn_lib.ModeKeys.TRAIN),
steps=100)
# Now export, but from a fresh estimator instance, like you would
# in an export binary. That means .export() has to work without
# .fit() being called on the same object.
export_dir = tempfile.mkdtemp()
print('Exporting to', export_dir)
estimator_fn().export(
export_dir,
input_fn=get_input_fn(model_fn_lib.ModeKeys.INFER),
use_deprecated_input_fn=False,
input_feature_key=input_feature_key)
def testStateTupleDictConversion(self):
"""Test `state_tuple_to_dict` and `dict_to_state_tuple`."""
cell_sizes = [5, 3, 7]
# A MultiRNNCell of LSTMCells is both a common choice and an interesting
# test case, because it has two levels of nesting, with an inner class that
# is not a plain tuple.
cell = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.LSTMCell(i) for i in cell_sizes])
state_dict = {
dynamic_rnn_estimator._get_state_name(i):
array_ops.expand_dims(math_ops.range(cell_size), 0)
for i, cell_size in enumerate([5, 5, 3, 3, 7, 7])
}
expected_state = (core_rnn_cell_impl.LSTMStateTuple(
np.reshape(np.arange(5), [1, -1]), np.reshape(np.arange(5), [1, -1])),
core_rnn_cell_impl.LSTMStateTuple(
np.reshape(np.arange(3), [1, -1]),
np.reshape(np.arange(3), [1, -1])),
core_rnn_cell_impl.LSTMStateTuple(
np.reshape(np.arange(7), [1, -1]),
np.reshape(np.arange(7), [1, -1])))
actual_state = dynamic_rnn_estimator.dict_to_state_tuple(state_dict, cell)
flattened_state = dynamic_rnn_estimator.state_tuple_to_dict(actual_state)
with self.test_session() as sess:
(state_dict_val, actual_state_val, flattened_state_val) = sess.run(
[state_dict, actual_state, flattened_state])
def _recursive_assert_equal(x, y):
self.assertEqual(type(x), type(y))
if isinstance(x, (list, tuple)):
self.assertEqual(len(x), len(y))
for i, _ in enumerate(x):
_recursive_assert_equal(x[i], y[i])
elif isinstance(x, np.ndarray):
np.testing.assert_array_equal(x, y)
else:
self.fail('Unexpected type: {}'.format(type(x)))
for k in state_dict_val.keys():
np.testing.assert_array_almost_equal(
state_dict_val[k],
flattened_state_val[k],
err_msg='Wrong value for state component {}.'.format(k))
_recursive_assert_equal(expected_state, actual_state_val)
def testMultiRNNState(self):
"""Test that state flattening/reconstruction works for `MultiRNNCell`."""
batch_size = 11
sequence_length = 16
train_steps = 5
cell_sizes = [4, 8, 7]
learning_rate = 0.1
def get_shift_input_fn(batch_size, sequence_length, seed=None):
def input_fn():
random_sequence = random_ops.random_uniform(
[batch_size, sequence_length + 1],
0,
2,
dtype=dtypes.int32,
seed=seed)
labels = array_ops.slice(random_sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
array_ops.slice(random_sequence, [0, 1],
[batch_size, sequence_length])), 2)
input_dict = {
dynamic_rnn_estimator._get_state_name(i): random_ops.random_uniform(
[batch_size, cell_size], seed=((i + 1) * seed))
for i, cell_size in enumerate([4, 4, 8, 8, 7, 7])
}
input_dict['inputs'] = inputs
return input_dict, labels
return input_fn
seq_columns = [feature_column.real_valued_column('inputs', dimension=1)]
config = run_config.RunConfig(tf_random_seed=21212)
cell = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.BasicLSTMCell(size) for size in cell_sizes])
sequence_estimator = dynamic_rnn_estimator.multi_value_rnn_classifier(
num_classes=2,
num_units=None,
sequence_feature_columns=seq_columns,
cell_type=cell,
learning_rate=learning_rate,
config=config,
predict_probabilities=True)
train_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=12321)
eval_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=32123)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
prediction_dict = sequence_estimator.predict(
input_fn=eval_input_fn, as_iterable=False)
for i, state_size in enumerate([4, 4, 8, 8, 7, 7]):
state_piece = prediction_dict[dynamic_rnn_estimator._get_state_name(i)]
self.assertListEqual(list(state_piece.shape), [batch_size, state_size])
def testMultipleRuns(self):
"""Tests resuming training by feeding state."""
cell_sizes = [4, 7]
batch_size = 11
learning_rate = 0.1
train_sequence_length = 21
train_steps = 121
prediction_steps = [3, 2, 5, 11, 6]
def get_input_fn(batch_size, sequence_length, state_dict, starting_step=0):
def input_fn():
sequence = constant_op.constant(
[[(starting_step + i + j) % 2 for j in range(sequence_length + 1)]
for i in range(batch_size)],
dtype=dtypes.int32)
labels = array_ops.slice(sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
array_ops.slice(sequence, [0, 1], [batch_size, sequence_length
])), 2)
input_dict = state_dict
input_dict['inputs'] = inputs
return input_dict, labels
return input_fn
seq_columns = [feature_column.real_valued_column('inputs', dimension=1)]
config = run_config.RunConfig(tf_random_seed=21212)
cell = core_rnn_cell_impl.MultiRNNCell(
[core_rnn_cell_impl.BasicLSTMCell(size) for size in cell_sizes])
model_dir = tempfile.mkdtemp()
sequence_estimator = dynamic_rnn_estimator.multi_value_rnn_classifier(
num_classes=2,
num_units=None,
sequence_feature_columns=seq_columns,
cell_type=cell,
learning_rate=learning_rate,
config=config,
model_dir=model_dir)
train_input_fn = get_input_fn(
batch_size, train_sequence_length, state_dict={})
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
def incremental_predict(estimator, increments):
"""Run `estimator.predict` for `i` steps for `i` in `increments`."""
step = 0
incremental_state_dict = {}
for increment in increments:
input_fn = get_input_fn(
batch_size,
increment,
state_dict=incremental_state_dict,
starting_step=step)
prediction_dict = estimator.predict(
input_fn=input_fn, as_iterable=False)
step += increment
incremental_state_dict = {
k: v
for (k, v) in prediction_dict.items()
if k.startswith(dynamic_rnn_estimator.RNNKeys.STATE_PREFIX)
}
return prediction_dict
pred_all_at_once = incremental_predict(sequence_estimator,
[sum(prediction_steps)])
pred_step_by_step = incremental_predict(sequence_estimator,
prediction_steps)
# Check that the last `prediction_steps[-1]` steps give the same
# predictions.
np.testing.assert_array_equal(
pred_all_at_once['predictions'][:, -1 * prediction_steps[-1]:],
pred_step_by_step['predictions'],
err_msg='Mismatch on last {} predictions.'.format(prediction_steps[-1]))
# Check that final states are identical.
for k, v in pred_all_at_once.items():
if k.startswith(dynamic_rnn_estimator.RNNKeys.STATE_PREFIX):
np.testing.assert_array_equal(
v, pred_step_by_step[k], err_msg='Mismatch on state {}.'.format(k))
# TODO(jamieas): move all tests below to a benchmark test.
class DynamicRNNEstimatorLearningTest(test.TestCase):
"""Learning tests for dynamic RNN Estimators."""
def testLearnSineFunction(self):
"""Tests learning a sine function."""
batch_size = 8
sequence_length = 64
train_steps = 200
eval_steps = 20
cell_size = 4
learning_rate = 0.1
loss_threshold = 0.02
def get_sin_input_fn(batch_size, sequence_length, increment, seed=None):
def _sin_fn(x):
ranger = math_ops.linspace(
array_ops.reshape(x[0], []), (sequence_length - 1) * increment,
sequence_length + 1)
return math_ops.sin(ranger)
def input_fn():
starts = random_ops.random_uniform(
[batch_size], maxval=(2 * np.pi), seed=seed)
sin_curves = functional_ops.map_fn(
_sin_fn, (starts,), dtype=dtypes.float32)
inputs = array_ops.expand_dims(
array_ops.slice(sin_curves, [0, 0], [batch_size, sequence_length]),
2)
labels = array_ops.slice(sin_curves, [0, 1],
[batch_size, sequence_length])
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size)
]
config = run_config.RunConfig(tf_random_seed=1234)
sequence_estimator = dynamic_rnn_estimator.multi_value_rnn_regressor(
num_units=cell_size,
sequence_feature_columns=seq_columns,
learning_rate=learning_rate,
input_keep_probability=0.9,
output_keep_probability=0.9,
config=config)
train_input_fn = get_sin_input_fn(
batch_size, sequence_length, np.pi / 32, seed=1234)
eval_input_fn = get_sin_input_fn(
batch_size, sequence_length, np.pi / 32, seed=4321)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
loss = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)['loss']
self.assertLess(loss, loss_threshold,
'Loss should be less than {}; got {}'.format(loss_threshold,
loss))
def testLearnShiftByOne(self):
"""Tests that learning a 'shift-by-one' example.
Each label sequence consists of the input sequence 'shifted' by one place.
The RNN must learn to 'remember' the previous input.
"""
batch_size = 16
sequence_length = 32
train_steps = 200
eval_steps = 20
cell_size = 4
learning_rate = 0.3
accuracy_threshold = 0.9
def get_shift_input_fn(batch_size, sequence_length, seed=None):
def input_fn():
random_sequence = random_ops.random_uniform(
[batch_size, sequence_length + 1],
0,
2,
dtype=dtypes.int32,
seed=seed)
labels = array_ops.slice(random_sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
array_ops.slice(random_sequence, [0, 1],
[batch_size, sequence_length])), 2)
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size)
]
config = run_config.RunConfig(tf_random_seed=21212)
sequence_estimator = dynamic_rnn_estimator.multi_value_rnn_classifier(
num_classes=2,
num_units=cell_size,
sequence_feature_columns=seq_columns,
learning_rate=learning_rate,
config=config,
predict_probabilities=True)
train_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=12321)
eval_input_fn = get_shift_input_fn(batch_size, sequence_length, seed=32123)
sequence_estimator.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_estimator.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
accuracy = evaluation['accuracy']
self.assertGreater(accuracy, accuracy_threshold,
'Accuracy should be higher than {}; got {}'.format(
accuracy_threshold, accuracy))
# Testing `predict` when `predict_probabilities=True`.
prediction_dict = sequence_estimator.predict(
input_fn=eval_input_fn, as_iterable=False)
self.assertListEqual(
sorted(list(prediction_dict.keys())),
sorted([
dynamic_rnn_estimator.RNNKeys.PREDICTIONS_KEY,
dynamic_rnn_estimator.RNNKeys.PROBABILITIES_KEY,
dynamic_rnn_estimator._get_state_name(0)
]))
predictions = prediction_dict[dynamic_rnn_estimator.RNNKeys.PREDICTIONS_KEY]
probabilities = prediction_dict[
dynamic_rnn_estimator.RNNKeys.PROBABILITIES_KEY]
self.assertListEqual(list(predictions.shape), [batch_size, sequence_length])
self.assertListEqual(
list(probabilities.shape), [batch_size, sequence_length, 2])
def testLearnMean(self):
"""Test learning to calculate a mean."""
batch_size = 16
sequence_length = 3
train_steps = 200
eval_steps = 20
cell_type = 'basic_rnn'
cell_size = 8
optimizer_type = 'Momentum'
learning_rate = 0.1
momentum = 0.9
loss_threshold = 0.1
def get_mean_input_fn(batch_size, sequence_length, seed=None):
def input_fn():
# Create examples by choosing 'centers' and adding uniform noise.
centers = math_ops.matmul(
random_ops.random_uniform(
[batch_size, 1], -0.75, 0.75, dtype=dtypes.float32, seed=seed),
array_ops.ones([1, sequence_length]))
noise = random_ops.random_uniform(
[batch_size, sequence_length],
-0.25,
0.25,
dtype=dtypes.float32,
seed=seed)
sequences = centers + noise
inputs = array_ops.expand_dims(sequences, 2)
labels = math_ops.reduce_mean(sequences, reduction_indices=[1])
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size)
]
config = run_config.RunConfig(tf_random_seed=6)
sequence_regressor = dynamic_rnn_estimator.single_value_rnn_regressor(
num_units=cell_size,
sequence_feature_columns=seq_columns,
cell_type=cell_type,
optimizer_type=optimizer_type,
learning_rate=learning_rate,
momentum=momentum,
config=config)
train_input_fn = get_mean_input_fn(batch_size, sequence_length, 121)
eval_input_fn = get_mean_input_fn(batch_size, sequence_length, 212)
sequence_regressor.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_regressor.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
loss = evaluation['loss']
self.assertLess(loss, loss_threshold,
'Loss should be less than {}; got {}'.format(loss_threshold,
loss))
def testLearnMajority(self):
"""Test learning the 'majority' function."""
batch_size = 16
sequence_length = 7
train_steps = 200
eval_steps = 20
cell_type = 'lstm'
cell_size = 4
optimizer_type = 'Momentum'
learning_rate = 2.0
momentum = 0.9
accuracy_threshold = 0.9
def get_majority_input_fn(batch_size, sequence_length, seed=None):
random_seed.set_random_seed(seed)
def input_fn():
random_sequence = random_ops.random_uniform(
[batch_size, sequence_length], 0, 2, dtype=dtypes.int32, seed=seed)
inputs = array_ops.expand_dims(math_ops.to_float(random_sequence), 2)
labels = math_ops.to_int32(
array_ops.squeeze(
math_ops.reduce_sum(
inputs, reduction_indices=[1]) > (sequence_length / 2.0)))
return {'inputs': inputs}, labels
return input_fn
seq_columns = [
feature_column.real_valued_column(
'inputs', dimension=cell_size)
]
config = run_config.RunConfig(tf_random_seed=77)
sequence_classifier = dynamic_rnn_estimator.single_value_rnn_classifier(
num_classes=2,
num_units=cell_size,
sequence_feature_columns=seq_columns,
cell_type=cell_type,
optimizer_type=optimizer_type,
learning_rate=learning_rate,
momentum=momentum,
config=config,
predict_probabilities=True)
train_input_fn = get_majority_input_fn(batch_size, sequence_length, 1111)
eval_input_fn = get_majority_input_fn(batch_size, sequence_length, 2222)
sequence_classifier.fit(input_fn=train_input_fn, steps=train_steps)
evaluation = sequence_classifier.evaluate(
input_fn=eval_input_fn, steps=eval_steps)
accuracy = evaluation['accuracy']
self.assertGreater(accuracy, accuracy_threshold,
'Accuracy should be higher than {}; got {}'.format(
accuracy_threshold, accuracy))
# Testing `predict` when `predict_probabilities=True`.
prediction_dict = sequence_classifier.predict(
input_fn=eval_input_fn, as_iterable=False)
self.assertListEqual(
sorted(list(prediction_dict.keys())),
sorted([
dynamic_rnn_estimator.RNNKeys.PREDICTIONS_KEY,
dynamic_rnn_estimator.RNNKeys.PROBABILITIES_KEY,
dynamic_rnn_estimator._get_state_name(0),
dynamic_rnn_estimator._get_state_name(1)
]))
predictions = prediction_dict[dynamic_rnn_estimator.RNNKeys.PREDICTIONS_KEY]
probabilities = prediction_dict[
dynamic_rnn_estimator.RNNKeys.PROBABILITIES_KEY]
self.assertListEqual(list(predictions.shape), [batch_size])
self.assertListEqual(list(probabilities.shape), [batch_size, 2])
if __name__ == '__main__':
test.main()
| |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell messaging module.
This module defines the different message types that are passed between
cells and the methods that they can call when the target cell has been
reached.
The interface into this module is the MessageRunner class.
"""
import sys
from eventlet import queue
from oslo.config import cfg
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import compute
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import utils
cell_messaging_opts = [
cfg.IntOpt('max_hop_count',
default=10,
help='Maximum number of hops for cells routing.'),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help='Cells scheduler to use')]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_messaging_opts, group='cells')
LOG = logging.getLogger(__name__)
# Separator used between cell names for the 'full cell name' and routing
# path.
_PATH_CELL_SEP = cells_utils._PATH_CELL_SEP
def _reverse_path(path):
"""Reverse a path. Used for sending responses upstream."""
path_parts = path.split(_PATH_CELL_SEP)
path_parts.reverse()
return _PATH_CELL_SEP.join(path_parts)
def _response_cell_name_from_path(routing_path, neighbor_only=False):
"""Reverse the routing_path. If we only want to send to our parent,
set neighbor_only to True.
"""
path = _reverse_path(routing_path)
if not neighbor_only or len(path) == 1:
return path
return _PATH_CELL_SEP.join(path.split(_PATH_CELL_SEP)[:2])
#
# Message classes.
#
class _BaseMessage(object):
"""Base message class. It defines data that is passed with every
single message through every cell.
Messages are JSON-ified before sending and turned back into a
class instance when being received.
Every message has a unique ID. This is used to route responses
back to callers. In the future, this might be used to detect
receiving the same message more than once.
routing_path is updated on every hop through a cell. The current
cell name is appended to it (cells are separated by
_PATH_CELL_SEP ('!')). This is used to tell if we've reached the
target cell and also to determine the source of a message for
responses by reversing it.
hop_count is incremented and compared against max_hop_count. The
only current usefulness of this is to break out of a routing loop
if someone has a broken config.
fanout means to send to all nova-cells services running in a cell.
This is useful for capacity and capability broadcasting as well
as making sure responses get back to the nova-cells service that
is waiting.
"""
# Override message_type in a subclass
message_type = None
base_attrs_to_json = ['message_type',
'ctxt',
'method_name',
'method_kwargs',
'direction',
'need_response',
'fanout',
'uuid',
'routing_path',
'hop_count',
'max_hop_count']
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, need_response=False, fanout=False, uuid=None,
routing_path=None, hop_count=0, max_hop_count=None,
**kwargs):
self.ctxt = ctxt
self.resp_queue = None
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
# Copy these.
self.base_attrs_to_json = self.base_attrs_to_json[:]
# Normally this would just be CONF.cells.name, but going through
# the msg_runner allows us to stub it more easily.
self.our_path_part = self.msg_runner.our_name
self.uuid = uuid
if self.uuid is None:
self.uuid = uuidutils.generate_uuid()
self.method_name = method_name
self.method_kwargs = method_kwargs
self.direction = direction
self.need_response = need_response
self.fanout = fanout
self.routing_path = routing_path
self.hop_count = hop_count
if max_hop_count is None:
max_hop_count = CONF.cells.max_hop_count
self.max_hop_count = max_hop_count
self.is_broadcast = False
self._append_hop()
# Each sub-class should set this when the message is inited
self.next_hops = []
self.resp_queue = None
def __repr__(self):
_dict = self._to_dict()
_dict.pop('method_kwargs')
return "<%s: %s>" % (self.__class__.__name__, _dict)
def _append_hop(self):
"""Add our hop to the routing_path."""
routing_path = (self.routing_path and
self.routing_path + _PATH_CELL_SEP or '')
self.routing_path = routing_path + self.our_path_part
self.hop_count += 1
def _at_max_hop_count(self, do_raise=True):
"""Check if we're at the max hop count. If we are and do_raise is
True, raise CellMaxHopCountReached. If we are at the max and
do_raise is False... return True, else False.
"""
if self.hop_count >= self.max_hop_count:
if do_raise:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
return True
return False
def _process_locally(self):
"""Its been determined that we should process this message in this
cell. Go through the MessageRunner to call the appropriate
method for this message. Catch the response and/or exception and
encode it within a Response instance. Return it so the caller
can potentially return it to another cell... or return it to
a caller waiting in this cell.
"""
try:
resp_value = self.msg_runner._process_message_locally(self)
failure = False
except Exception as exc:
resp_value = sys.exc_info()
failure = True
LOG.exception(_("Error processing message locally: %(exc)s"),
locals())
return Response(self.routing_path, resp_value, failure)
def _setup_response_queue(self):
"""Shortcut to creating a response queue in the MessageRunner."""
self.resp_queue = self.msg_runner._setup_response_queue(self)
def _cleanup_response_queue(self):
"""Shortcut to deleting a response queue in the MessageRunner."""
if self.resp_queue:
self.msg_runner._cleanup_response_queue(self)
self.resp_queue = None
def _wait_for_json_responses(self, num_responses=1):
"""Wait for response(s) to be put into the eventlet queue. Since
each queue entry actually contains a list of JSON-ified responses,
combine them all into a single list to return.
Destroy the eventlet queue when done.
"""
if not self.resp_queue:
# Source is not actually expecting a response
return
responses = []
wait_time = CONF.cells.call_timeout
try:
for x in xrange(num_responses):
json_responses = self.resp_queue.get(timeout=wait_time)
responses.extend(json_responses)
except queue.Empty:
raise exception.CellTimeout()
finally:
self._cleanup_response_queue()
return responses
def _send_json_responses(self, json_responses, neighbor_only=False,
fanout=False):
"""Send list of responses to this message. Responses passed here
are JSON-ified. Targeted messages have a single response while
Broadcast messages may have multiple responses.
If this cell was the source of the message, these responses will
be returned from self.process().
Otherwise, we will route the response to the source of the
request. If 'neighbor_only' is True, the response will be sent
to the neighbor cell, not the original requester. Broadcast
messages get aggregated at each hop, so neighbor_only will be
True for those messages.
"""
if not self.need_response:
return
if self.source_is_us():
responses = []
for json_response in json_responses:
responses.append(Response.from_json(json_response))
return responses
direction = self.direction == 'up' and 'down' or 'up'
response_kwargs = {'orig_message': self.to_json(),
'responses': json_responses}
target_cell = _response_cell_name_from_path(self.routing_path,
neighbor_only=neighbor_only)
response = self.msg_runner._create_response_message(self.ctxt,
direction, target_cell, self.uuid, response_kwargs,
fanout=fanout)
response.process()
def _send_response(self, response, neighbor_only=False):
"""Send a response to this message. If the source of the
request was ourselves, just return the response. It'll be
passed back to the caller of self.process(). See DocString for
_send_json_responses() as it handles most of the real work for
this method.
'response' is an instance of Response class.
"""
if not self.need_response:
return
if self.source_is_us():
return response
self._send_json_responses([response.to_json()],
neighbor_only=neighbor_only)
def _send_response_from_exception(self, exc_info):
"""Take an exception as returned from sys.exc_info(), encode
it in a Response, and send it.
"""
response = Response(self.routing_path, exc_info, True)
return self._send_response(response)
def _to_dict(self):
"""Convert a message to a dictionary. Only used internally."""
_dict = {}
for key in self.base_attrs_to_json:
_dict[key] = getattr(self, key)
return _dict
def to_json(self):
"""Convert a message into JSON for sending to a sibling cell."""
_dict = self._to_dict()
# Convert context to dict.
_dict['ctxt'] = _dict['ctxt'].to_dict()
return jsonutils.dumps(_dict)
def source_is_us(self):
"""Did this cell create this message?"""
return self.routing_path == self.our_path_part
def process(self):
"""Process a message. Deal with it locally and/or forward it to a
sibling cell.
Override in a subclass.
"""
raise NotImplementedError()
class _TargetedMessage(_BaseMessage):
"""A targeted message is a message that is destined for a specific
single cell.
'target_cell' can be a full cell name like 'api!child-cell' or it can
be an instance of the CellState class if the target is a neighbor cell.
"""
message_type = 'targeted'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, **kwargs):
super(_TargetedMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
if isinstance(target_cell, cells_state.CellState):
# Neighbor cell or ourselves. Convert it to a 'full path'.
if target_cell.is_me:
target_cell = self.our_path_part
else:
target_cell = '%s%s%s' % (self.our_path_part,
_PATH_CELL_SEP,
target_cell.name)
self.target_cell = target_cell
self.base_attrs_to_json.append('target_cell')
def _get_next_hop(self):
"""Return the cell name for the next hop. If the next hop is
the current cell, return None.
"""
if self.target_cell == self.routing_path:
return self.state_manager.my_cell_state
target_cell = self.target_cell
routing_path = self.routing_path
current_hops = routing_path.count(_PATH_CELL_SEP)
next_hop_num = current_hops + 1
dest_hops = target_cell.count(_PATH_CELL_SEP)
if dest_hops < current_hops:
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % locals()
raise exception.CellRoutingInconsistency(reason=reason)
dest_name_parts = target_cell.split(_PATH_CELL_SEP)
if (_PATH_CELL_SEP.join(dest_name_parts[:next_hop_num]) !=
routing_path):
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % locals()
raise exception.CellRoutingInconsistency(reason=reason)
next_hop_name = dest_name_parts[next_hop_num]
if self.direction == 'up':
next_hop = self.state_manager.get_parent_cell(next_hop_name)
else:
next_hop = self.state_manager.get_child_cell(next_hop_name)
if not next_hop:
cell_type = 'parent' if self.direction == 'up' else 'child'
reason = _("Unknown %(cell_type)s when routing to "
"%(target_cell)s") % locals()
raise exception.CellRoutingInconsistency(reason=reason)
return next_hop
def process(self):
"""Process a targeted message. This is called for all cells
that touch this message. If the local cell is the one that
created this message, we reply directly with a Response instance.
If the local cell is not the target, an eventlet queue is created
and we wait for the response to show up via another thread
receiving the Response back.
Responses to targeted messages are routed directly back to the
source. No eventlet queues are created in intermediate hops.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller.
"""
try:
next_hop = self._get_next_hop()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hop for message: %(exc)s"),
locals())
return self._send_response_from_exception(exc_info)
if next_hop.is_me:
# Final destination.
response = self._process_locally()
return self._send_response(response)
# Need to forward via neighbor cell.
if self.need_response and self.source_is_us():
# A response is needed and the source of the message is
# this cell. Create the eventlet queue.
self._setup_response_queue()
wait_for_response = True
else:
wait_for_response = False
try:
# This is inside the try block, so we can encode the
# exception and return it to the caller.
if self.hop_count >= self.max_hop_count:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
next_hop.send_message(self)
except Exception as exc:
exc_info = sys.exc_info()
err_str = _("Failed to send message to cell: %(next_hop)s: "
"%(exc)s")
LOG.exception(err_str, locals())
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if wait_for_response:
# Targeted messages only have 1 response.
remote_response = self._wait_for_json_responses()[0]
return Response.from_json(remote_response)
class _BroadcastMessage(_BaseMessage):
"""A broadcast message. This means to call a method in every single
cell going in a certain direction.
"""
message_type = 'broadcast'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, run_locally=True, **kwargs):
super(_BroadcastMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
# The local cell creating this message has the option
# to be able to process the message locally or not.
self.run_locally = run_locally
self.is_broadcast = True
def _get_next_hops(self):
"""Set the next hops and return the number of hops. The next
hops may include ourself.
"""
if self.hop_count >= self.max_hop_count:
return []
if self.direction == 'down':
return self.state_manager.get_child_cells()
else:
return self.state_manager.get_parent_cells()
def _send_to_cells(self, target_cells):
"""Send a message to multiple cells."""
for cell in target_cells:
cell.send_message(self)
def _send_json_responses(self, json_responses):
"""Responses to broadcast messages always need to go to the
neighbor cell from which we received this message. That
cell aggregates the responses and makes sure to forward them
to the correct source.
"""
return super(_BroadcastMessage, self)._send_json_responses(
json_responses, neighbor_only=True, fanout=True)
def process(self):
"""Process a broadcast message. This is called for all cells
that touch this message.
The message is sent to all cells in the certain direction and
the creator of this message has the option of whether or not
to process it locally as well.
If responses from all cells are required, each hop creates an
eventlet queue and waits for responses from its immediate
neighbor cells. All responses are then aggregated into a
single list and are returned to the neighbor cell until the
source is reached.
When the source is reached, a list of Response instances are
returned to the caller.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller. It is possible to get a mix of
successful responses and failure responses. The caller is
responsible for dealing with this.
"""
try:
next_hops = self._get_next_hops()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hops for message: %(exc)s"),
locals())
return self._send_response_from_exception(exc_info)
# Short circuit if we don't need to respond
if not self.need_response:
if self.run_locally:
self._process_locally()
self._send_to_cells(next_hops)
return
# We'll need to aggregate all of the responses (from ourself
# and our sibling cells) into 1 response
try:
self._setup_response_queue()
self._send_to_cells(next_hops)
except Exception as exc:
# Error just trying to send to cells. Send a single response
# with the failure.
exc_info = sys.exc_info()
LOG.exception(_("Error sending message to next hops: %(exc)s"),
locals())
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if self.run_locally:
# Run locally and store the Response.
local_response = self._process_locally()
else:
local_response = None
try:
remote_responses = self._wait_for_json_responses(
num_responses=len(next_hops))
except Exception as exc:
# Error waiting for responses, most likely a timeout.
# Send a single response back with the failure.
exc_info = sys.exc_info()
err_str = _("Error waiting for responses from neighbor cells: "
"%(exc)s")
LOG.exception(err_str, locals())
return self._send_response_from_exception(exc_info)
if local_response:
remote_responses.append(local_response.to_json())
return self._send_json_responses(remote_responses)
class _ResponseMessage(_TargetedMessage):
"""A response message is really just a special targeted message,
saying to call 'parse_responses' when we reach the source of a 'call'.
The 'fanout' attribute on this message may be true if we're responding
to a broadcast or if we're about to respond to the source of an
original target message. Because multiple nova-cells services may
be running within a cell, we need to make sure the response gets
back to the correct one, so we have to fanout.
"""
message_type = 'response'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, response_uuid, **kwargs):
super(_ResponseMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, target_cell, **kwargs)
self.response_uuid = response_uuid
self.base_attrs_to_json.append('response_uuid')
def process(self):
"""Process a response. If the target is the local cell, process
the response here. Otherwise, forward it to where it needs to
go.
"""
next_hop = self._get_next_hop()
if next_hop.is_me:
self._process_locally()
return
if self.fanout is False:
# Really there's 1 more hop on each of these below, but
# it doesn't matter for this logic.
target_hops = self.target_cell.count(_PATH_CELL_SEP)
current_hops = self.routing_path.count(_PATH_CELL_SEP)
if current_hops + 1 == target_hops:
# Next hop is the target.. so we must fanout. See
# DocString above.
self.fanout = True
next_hop.send_message(self)
#
# Methods that may be called when processing messages after reaching
# a target cell.
#
class _BaseMessageMethods(base.Base):
"""Base class for defining methods by message types."""
def __init__(self, msg_runner):
super(_BaseMessageMethods, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
def task_log_get_all(self, message, task_name, period_beginning,
period_ending, host, state):
"""Get task logs from the DB. The message could have
directly targeted this cell, or it could have been a broadcast
message.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
"""
task_logs = self.db.task_log_get_all(message.ctxt, task_name,
period_beginning,
period_ending,
host=host,
state=state)
return jsonutils.to_primitive(task_logs)
class _ResponseMessageMethods(_BaseMessageMethods):
"""Methods that are called from a ResponseMessage. There's only
1 method (parse_responses) and it is called when the message reaches
the source of a 'call'. All we do is stuff the response into the
eventlet queue to signal the caller that's waiting.
"""
def parse_responses(self, message, orig_message, responses):
self.msg_runner._put_response(message.response_uuid,
responses)
class _TargetedMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called when routing a message
to a specific cell.
"""
def __init__(self, *args, **kwargs):
super(_TargetedMessageMethods, self).__init__(*args, **kwargs)
def schedule_run_instance(self, message, host_sched_kwargs):
"""Parent cell told us to schedule new instance creation."""
self.msg_runner.scheduler.run_instance(message, host_sched_kwargs)
def run_compute_api_method(self, message, method_info):
"""Run a method in the compute api class."""
method = method_info['method']
fn = getattr(self.compute_api, method, None)
if not fn:
detail = _("Unknown method '%(method)s' in compute API")
raise exception.CellServiceAPIMethodNotFound(
detail=detail % locals())
args = list(method_info['method_args'])
# 1st arg is instance_uuid that we need to turn into the
# instance object.
instance_uuid = args[0]
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
args[0] = instance
return fn(message.ctxt, *args, **method_info['method_kwargs'])
def update_capabilities(self, message, cell_name, capabilities):
"""A child cell told us about their capabilities."""
LOG.debug(_("Received capabilities from child cell "
"%(cell_name)s: %(capabilities)s"), locals())
self.state_manager.update_cell_capabilities(cell_name,
capabilities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def update_capacities(self, message, cell_name, capacities):
"""A child cell told us about their capacity."""
LOG.debug(_("Received capacities from child cell "
"%(cell_name)s: %(capacities)s"), locals())
self.state_manager.update_cell_capacities(cell_name,
capacities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def announce_capabilities(self, message):
"""A parent cell has told us to send our capabilities, so let's
do so.
"""
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def announce_capacities(self, message):
"""A parent cell has told us to send our capacity, so let's
do so.
"""
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def service_get_by_compute_host(self, message, host_name):
"""Return the service entry for a compute host."""
service = self.db.service_get_by_compute_host(message.ctxt,
host_name)
return jsonutils.to_primitive(service)
def proxy_rpc_to_manager(self, message, host_name, rpc_message,
topic, timeout):
"""Proxy RPC to the given compute topic."""
# Check that the host exists.
self.db.service_get_by_compute_host(message.ctxt, host_name)
if message.need_response:
return rpc.call(message.ctxt, topic, rpc_message,
timeout=timeout)
rpc.cast(message.ctxt, topic, rpc_message)
def compute_node_get(self, message, compute_id):
"""Get compute node by ID."""
compute_node = self.db.compute_node_get(message.ctxt,
compute_id)
return jsonutils.to_primitive(compute_node)
class _BroadcastMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called as a part of a broadcast
message.
"""
def _at_the_top(self):
"""Are we the API level?"""
return not self.state_manager.get_parent_cells()
def instance_update_at_top(self, message, instance, **kwargs):
"""Update an instance in the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
# Remove things that we can't update in the top level cells.
# 'metadata' is only updated in the API cell, so don't overwrite
# it based on what child cells say. Make sure to update
# 'cell_name' based on the routing path.
items_to_remove = ['id', 'security_groups', 'instance_type',
'volumes', 'cell_name', 'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
instance['cell_name'] = _reverse_path(message.routing_path)
# Fixup info_cache. We'll have to update this separately if
# it exists.
info_cache = instance.pop('info_cache', None)
if info_cache is not None:
info_cache.pop('id', None)
info_cache.pop('instance', None)
# Fixup system_metadata (should be a dict for update, not a list)
if ('system_metadata' in instance and
isinstance(instance['system_metadata'], list)):
sys_metadata = dict([(md['key'], md['value'])
for md in instance['system_metadata']])
instance['system_metadata'] = sys_metadata
LOG.debug(_("Got update for instance %(instance_uuid)s: "
"%(instance)s") % locals())
# It's possible due to some weird condition that the instance
# was already set as deleted... so we'll attempt to update
# it with permissions that allows us to read deleted.
with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
try:
self.db.instance_update(message.ctxt, instance_uuid,
instance, update_cells=False)
except exception.NotFound:
# FIXME(comstud): Strange. Need to handle quotas here,
# if we actually want this code to remain..
self.db.instance_create(message.ctxt, instance)
if info_cache:
try:
self.db.instance_info_cache_update(message.ctxt,
instance_uuid, info_cache, update_cells=False)
except exception.InstanceInfoCacheNotFound:
# Can happen if we try to update a deleted instance's
# network information.
pass
def instance_destroy_at_top(self, message, instance, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
LOG.debug(_("Got update to delete instance %(instance_uuid)s") %
locals())
try:
self.db.instance_destroy(message.ctxt, instance_uuid,
update_cells=False)
except exception.InstanceNotFound:
pass
def instance_delete_everywhere(self, message, instance, delete_type,
**kwargs):
"""Call compute API delete() or soft_delete() in every cell.
This is used when the API cell doesn't know what cell an instance
belongs to but the instance was requested to be deleted or
soft-deleted. So, we'll run it everywhere.
"""
LOG.debug(_("Got broadcast to %(delete_type)s delete instance"),
locals(), instance=instance)
if delete_type == 'soft':
self.compute_api.soft_delete(message.ctxt, instance)
else:
self.compute_api.delete(message.ctxt, instance)
def instance_fault_create_at_top(self, message, instance_fault, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
items_to_remove = ['id']
for key in items_to_remove:
instance_fault.pop(key, None)
log_str = _("Got message to create instance fault: "
"%(instance_fault)s")
LOG.debug(log_str, locals())
self.db.instance_fault_create(message.ctxt, instance_fault)
def bw_usage_update_at_top(self, message, bw_update_info, **kwargs):
"""Update Bandwidth usage in the DB if we're a top level cell."""
if not self._at_the_top():
return
self.db.bw_usage_update(message.ctxt, **bw_update_info)
def _sync_instance(self, ctxt, instance):
if instance['deleted']:
self.msg_runner.instance_destroy_at_top(ctxt, instance)
else:
self.msg_runner.instance_update_at_top(ctxt, instance)
def sync_instances(self, message, project_id, updated_since, deleted,
**kwargs):
projid_str = project_id is None and "<all>" or project_id
since_str = updated_since is None and "<all>" or updated_since
LOG.info(_("Forcing a sync of instances, project_id="
"%(projid_str)s, updated_since=%(since_str)s"), locals())
if updated_since is not None:
updated_since = timeutils.parse_isotime(updated_since)
instances = cells_utils.get_instances_to_sync(message.ctxt,
updated_since=updated_since, project_id=project_id,
deleted=deleted)
for instance in instances:
self._sync_instance(message.ctxt, instance)
def service_get_all(self, message, filters):
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
services = self.db.service_get_all(message.ctxt, disabled=disabled)
ret_services = []
for service in services:
service = jsonutils.to_primitive(service)
for key, val in filters.iteritems():
if service[key] != val:
break
else:
ret_services.append(service)
return ret_services
def compute_node_get_all(self, message, hypervisor_match):
"""Return compute nodes in this cell."""
if hypervisor_match is not None:
nodes = self.db.compute_node_search_by_hypervisor(message.ctxt,
hypervisor_match)
else:
nodes = self.db.compute_node_get_all(message.ctxt)
return jsonutils.to_primitive(nodes)
def compute_node_stats(self, message):
"""Return compute node stats from this cell."""
return self.db.compute_node_statistics(message.ctxt)
_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
'broadcast': _BroadcastMessage,
'response': _ResponseMessage}
_CELL_MESSAGE_TYPE_TO_METHODS_CLS = {'targeted': _TargetedMessageMethods,
'broadcast': _BroadcastMessageMethods,
'response': _ResponseMessageMethods}
#
# Below are the public interfaces into this module.
#
class MessageRunner(object):
"""This class is the main interface into creating messages and
processing them.
Public methods in this class are typically called by the CellsManager
to create a new message and process it with the exception of
'message_from_json' which should be used by CellsDrivers to convert
a JSONified message it has received back into the appropriate Message
class.
Private methods are used internally when we need to keep some
'global' state. For instance, eventlet queues used for responses are
held in this class. Also, when a Message is process()ed above and
it's determined we should take action locally,
_process_message_locally() will be called.
When needing to add a new method to call in a Cell2Cell message,
define the new method below and also add it to the appropriate
MessageMethods class where the real work will be done.
"""
def __init__(self, state_manager):
self.state_manager = state_manager
cells_scheduler_cls = importutils.import_class(
CONF.cells.scheduler)
self.scheduler = cells_scheduler_cls(self)
self.response_queues = {}
self.methods_by_type = {}
self.our_name = CONF.cells.name
for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems():
self.methods_by_type[msg_type] = cls(self)
def _process_message_locally(self, message):
"""Message processing will call this when its determined that
the message should be processed within this cell. Find the
method to call based on the message type, and call it. The
caller is responsible for catching exceptions and returning
results to cells, if needed.
"""
methods = self.methods_by_type[message.message_type]
fn = getattr(methods, message.method_name)
return fn(message, **message.method_kwargs)
def _put_response(self, response_uuid, response):
"""Put a response into a response queue. This is called when
a _ResponseMessage is processed in the cell that initiated a
'call' to another cell.
"""
resp_queue = self.response_queues.get(response_uuid)
if not resp_queue:
# Response queue is gone. We must have restarted or we
# received a response after our timeout period.
return
resp_queue.put(response)
def _setup_response_queue(self, message):
"""Set up an eventlet queue to use to wait for replies.
Replies come back from the target cell as a _ResponseMessage
being sent back to the source.
"""
resp_queue = queue.Queue()
self.response_queues[message.uuid] = resp_queue
return resp_queue
def _cleanup_response_queue(self, message):
"""Stop tracking the response queue either because we're
done receiving responses, or we've timed out.
"""
try:
del self.response_queues[message.uuid]
except KeyError:
# Ignore if queue is gone already somehow.
pass
def _create_response_message(self, ctxt, direction, target_cell,
response_uuid, response_kwargs, **kwargs):
"""Create a ResponseMessage. This is used internally within
the messaging module.
"""
return _ResponseMessage(self, ctxt, 'parse_responses',
response_kwargs, direction, target_cell,
response_uuid, **kwargs)
def message_from_json(self, json_message):
"""Turns a message in JSON format into an appropriate Message
instance. This is called when cells receive a message from
another cell.
"""
message_dict = jsonutils.loads(json_message)
message_type = message_dict.pop('message_type')
# Need to convert context back.
ctxt = message_dict['ctxt']
message_dict['ctxt'] = context.RequestContext.from_dict(ctxt)
message_cls = _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS[message_type]
return message_cls(self, **message_dict)
def ask_children_for_capabilities(self, ctxt):
"""Tell child cells to send us capabilities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt,
'announce_capabilities',
dict(), 'down', child_cell)
message.process()
def ask_children_for_capacities(self, ctxt):
"""Tell child cells to send us capacities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt, 'announce_capacities',
dict(), 'down', child_cell)
message.process()
def tell_parents_our_capabilities(self, ctxt):
"""Send our capabilities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capabs = self.state_manager.get_our_capabilities()
LOG.debug(_("Updating parents with our capabilities: %(capabs)s"),
locals())
# We have to turn the sets into lists so they can potentially
# be json encoded when the raw message is sent.
for key, values in capabs.items():
capabs[key] = list(values)
method_kwargs = {'cell_name': my_cell_info.name,
'capabilities': capabs}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capabilities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def tell_parents_our_capacities(self, ctxt):
"""Send our capacities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capacities = self.state_manager.get_our_capacities()
LOG.debug(_("Updating parents with our capacities: %(capacities)s"),
locals())
method_kwargs = {'cell_name': my_cell_info.name,
'capacities': capacities}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capacities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def schedule_run_instance(self, ctxt, target_cell, host_sched_kwargs):
"""Called by the scheduler to tell a child cell to schedule
a new instance for build.
"""
method_kwargs = dict(host_sched_kwargs=host_sched_kwargs)
message = _TargetedMessage(self, ctxt, 'schedule_run_instance',
method_kwargs, 'down',
target_cell)
message.process()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
message = _TargetedMessage(self, ctxt, 'run_compute_api_method',
dict(method_info=method_info), 'down',
cell_name, need_response=call)
return message.process()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_update_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_destroy_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
method_kwargs = dict(instance=instance, delete_type=delete_type)
message = _BroadcastMessage(self, ctxt,
'instance_delete_everywhere',
method_kwargs, 'down',
run_locally=False)
message.process()
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
message = _BroadcastMessage(self, ctxt,
'instance_fault_create_at_top',
dict(instance_fault=instance_fault),
'up', run_locally=False)
message.process()
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
message = _BroadcastMessage(self, ctxt, 'bw_usage_update_at_top',
dict(bw_update_info=bw_update_info),
'up', run_locally=False)
message.process()
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
method_kwargs = dict(project_id=project_id,
updated_since=updated_since,
deleted=deleted)
message = _BroadcastMessage(self, ctxt, 'sync_instances',
method_kwargs, 'down',
run_locally=False)
message.process()
def service_get_all(self, ctxt, filters=None):
method_kwargs = dict(filters=filters)
message = _BroadcastMessage(self, ctxt, 'service_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def service_get_by_compute_host(self, ctxt, cell_name, host_name):
method_kwargs = dict(host_name=host_name)
message = _TargetedMessage(self, ctxt,
'service_get_by_compute_host',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def proxy_rpc_to_manager(self, ctxt, cell_name, host_name, topic,
rpc_message, call, timeout):
method_kwargs = {'host_name': host_name,
'topic': topic,
'rpc_message': rpc_message,
'timeout': timeout}
message = _TargetedMessage(self, ctxt,
'proxy_rpc_to_manager',
method_kwargs, 'down', cell_name,
need_response=call)
return message.process()
def task_log_get_all(self, ctxt, cell_name, task_name,
period_beginning, period_ending,
host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'cell_name' is None or '', get responses from all cells.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
Return a list of Response objects.
"""
method_kwargs = dict(task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state)
if cell_name:
message = _TargetedMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
cell_name, need_response=True)
# Caller should get a list of Responses.
return [message.process()]
message = _BroadcastMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all child cells."""
method_kwargs = dict(hypervisor_match=hypervisor_match)
message = _BroadcastMessage(self, ctxt, 'compute_node_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_stats(self, ctxt):
"""Return compute node stats from all child cells."""
method_kwargs = dict()
message = _BroadcastMessage(self, ctxt, 'compute_node_stats',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get(self, ctxt, cell_name, compute_id):
"""Return compute node entry from a specific cell by ID."""
method_kwargs = dict(compute_id=compute_id)
message = _TargetedMessage(self, ctxt, 'compute_node_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
@staticmethod
def get_message_types():
return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
class Response(object):
"""Holds a response from a cell. If there was a failure, 'failure'
will be True and 'response' will contain an encoded Exception.
"""
def __init__(self, cell_name, value, failure):
self.failure = failure
self.cell_name = cell_name
self.value = value
def to_json(self):
resp_value = self.value
if self.failure:
resp_value = rpc_common.serialize_remote_exception(resp_value,
log_failure=False)
_dict = {'cell_name': self.cell_name,
'value': resp_value,
'failure': self.failure}
return jsonutils.dumps(_dict)
@classmethod
def from_json(cls, json_message):
_dict = jsonutils.loads(json_message)
if _dict['failure']:
resp_value = rpc_common.deserialize_remote_exception(
CONF, _dict['value'])
_dict['value'] = resp_value
return cls(**_dict)
def value_or_raise(self):
if self.failure:
if isinstance(self.value, (tuple, list)):
raise self.value[0], self.value[1], self.value[2]
else:
raise self.value
return self.value
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cloudfiles
import httplib
import json
import mox
from django import http
from django.conf import settings
from django_openstack import api
from glance import client as glance_client
from mox import IsA
from novaclient import service_catalog, client as base_client
from novaclient.keystone import client as keystone_client
from novaclient.v1_1 import client as nova_client
from openstack import compute as OSCompute
from openstackx import admin as OSAdmin
from openstackx import auth as OSAuth
from openstackx import extras as OSExtras
from django_openstack import test
from django_openstack.middleware import keystone
TEST_CONSOLE_KIND = 'vnc'
TEST_EMAIL = 'test@test.com'
TEST_HOSTNAME = 'hostname'
TEST_INSTANCE_ID = '2'
TEST_PASSWORD = '12345'
TEST_PORT = 8000
TEST_RETURN = 'retValue'
TEST_TENANT_DESCRIPTION = 'tenantDescription'
TEST_TENANT_ID = '1234'
TEST_TENANT_NAME = 'foo'
TEST_TOKEN = 'aToken'
TEST_TOKEN_ID = 'userId'
TEST_URL = 'http://%s:%s/something/v1.0' % (TEST_HOSTNAME, TEST_PORT)
TEST_USERNAME = 'testUser'
class Server(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, image, attrs=None):
self.id = id
self.image = image
if attrs is not None:
self.attrs = attrs
def __eq__(self, other):
if self.id != other.id or \
self.image['id'] != other.image['id']:
return False
for k in self.attrs:
if other.attrs.__getattr__(k) != v:
return False
return True
def __ne__(self, other):
return not self == other
class Tenant(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, description, enabled):
self.id = id
self.description = description
self.enabled = enabled
def __eq__(self, other):
return self.id == other.id and \
self.description == other.description and \
self.enabled == other.enabled
def __ne__(self, other):
return not self == other
class Token(object):
""" More or less fakes what the api is looking for """
def __init__(self, id, username, tenant_id, tenant_name,
serviceCatalog=None):
self.id = id
self.user = {'name': username}
self.tenant = {'id': tenant_id, 'name': tenant_name}
self.serviceCatalog = serviceCatalog
def __eq__(self, other):
return self.id == other.id and \
self.user['name'] == other.user['name'] and \
self.tenant_id == other.tenant_id and \
self.serviceCatalog == other.serviceCatalog
def __ne__(self, other):
return not self == other
class APIResource(api.APIResourceWrapper):
""" Simple APIResource for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerObject=None):
if innerObject is None:
class InnerAPIResource(object):
pass
innerObject = InnerAPIResource()
innerObject.foo = 'foo'
innerObject.bar = 'bar'
return APIResource(innerObject)
class APIDict(api.APIDictWrapper):
""" Simple APIDict for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerDict=None):
if innerDict is None:
innerDict = {'foo': 'foo',
'bar': 'bar'}
return APIDict(innerDict)
class APITestCase(test.TestCase):
def setUp(self):
def fake_keystoneclient(request, username=None, password=None,
tenant_id=None, token_id=None, endpoint=None):
return self.stub_keystoneclient()
super(APITestCase, self).setUp()
self._original_keystoneclient = api.keystoneclient
self._original_novaclient = api.novaclient
api.keystoneclient = fake_keystoneclient
api.novaclient = lambda request: self.stub_novaclient()
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def tearDown(self):
super(APITestCase, self).tearDown()
api.novaclient = self._original_novaclient
api.keystoneclient = self._original_keystoneclient
class APIResourceWrapperTests(test.TestCase):
def test_get_attribute(self):
resource = APIResource.get_instance()
self.assertEqual(resource.foo, 'foo')
def test_get_invalid_attribute(self):
resource = APIResource.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
def test_get_inner_missing_attribute(self):
resource = APIResource.get_instance()
with self.assertRaises(AttributeError):
resource.baz
class APIDictWrapperTests(test.TestCase):
# APIDict allows for both attribute access and dictionary style [element]
# style access. Test both
def test_get_item(self):
resource = APIDict.get_instance()
self.assertEqual(resource.foo, 'foo')
self.assertEqual(resource['foo'], 'foo')
def test_get_invalid_item(self):
resource = APIDict.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
with self.assertRaises(KeyError):
resource['missing']
def test_get_inner_missing_attribute(self):
resource = APIDict.get_instance()
with self.assertRaises(AttributeError):
resource.baz
with self.assertRaises(KeyError):
resource['baz']
def test_get_with_default(self):
resource = APIDict.get_instance()
self.assertEqual(resource.get('foo'), 'foo')
self.assertIsNone(resource.get('baz'))
self.assertEqual('retValue', resource.get('baz', 'retValue'))
# Wrapper classes that only define _attrs don't need extra testing.
# Wrapper classes that have other attributes or methods need testing
class ImageWrapperTests(test.TestCase):
dict_with_properties = {
'properties':
{'image_state': 'running'},
'size': 100,
}
dict_without_properties = {
'size': 100,
}
def test_get_properties(self):
image = api.Image(self.dict_with_properties)
image_props = image.properties
self.assertIsInstance(image_props, api.ImageProperties)
self.assertEqual(image_props.image_state, 'running')
def test_get_other(self):
image = api.Image(self.dict_with_properties)
self.assertEqual(image.size, 100)
def test_get_properties_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
image.properties
def test_get_other_missing(self):
image = api.Image(self.dict_without_properties)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', image._attrs,
msg="Test assumption broken. Find new missing attribute")
image.missing
class ServerWrapperTests(test.TestCase):
HOST = 'hostname'
ID = '1'
IMAGE_NAME = 'imageName'
IMAGE_OBJ = {'id': '3', 'links': [{'href': '3', u'rel': u'bookmark'}]}
def setUp(self):
super(ServerWrapperTests, self).setUp()
# these are all objects "fetched" from the api
self.inner_attrs = {'host': self.HOST}
self.inner_server = Server(self.ID, self.IMAGE_OBJ, self.inner_attrs)
self.inner_server_no_attrs = Server(self.ID, self.IMAGE_OBJ)
#self.request = self.mox.CreateMock(http.HttpRequest)
def test_get_attrs(self):
server = api.Server(self.inner_server, self.request)
attrs = server.attrs
# for every attribute in the "inner" object passed to the api wrapper,
# see if it can be accessed through the api.ServerAttribute instance
for k in self.inner_attrs:
self.assertEqual(attrs.__getattr__(k), self.inner_attrs[k])
def test_get_other(self):
server = api.Server(self.inner_server, self.request)
self.assertEqual(server.id, self.ID)
def test_get_attrs_missing(self):
server = api.Server(self.inner_server_no_attrs, self.request)
with self.assertRaises(AttributeError):
server.attrs
def test_get_other_missing(self):
server = api.Server(self.inner_server, self.request)
with self.assertRaises(AttributeError):
self.assertNotIn('missing', server._attrs,
msg="Test assumption broken. Find new missing attribute")
server.missing
def test_image_name(self):
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
self.IMAGE_OBJ['id']
).AndReturn(api.Image({'name': self.IMAGE_NAME}))
server = api.Server(self.inner_server, self.request)
self.mox.ReplayAll()
image_name = server.image_name
self.assertEqual(image_name, self.IMAGE_NAME)
self.mox.VerifyAll()
class ApiHelperTests(test.TestCase):
""" Tests for functions that don't use one of the api objects """
def test_url_for(self):
GLANCE_URL = 'http://glance/glanceapi/'
NOVA_URL = 'http://nova/novapi/'
url = api.url_for(self.request, 'image')
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'image', admin=False)
self.assertEqual(url, GLANCE_URL + 'internal')
url = api.url_for(self.request, 'image', admin=True)
self.assertEqual(url, GLANCE_URL + 'admin')
url = api.url_for(self.request, 'compute')
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'compute', admin=False)
self.assertEqual(url, NOVA_URL + 'internal')
url = api.url_for(self.request, 'compute', admin=True)
self.assertEqual(url, NOVA_URL + 'admin')
self.assertNotIn('notAnApi', self.request.user.service_catalog,
'Select a new nonexistent service catalog key')
with self.assertRaises(api.ServiceCatalogException):
url = api.url_for(self.request, 'notAnApi')
class TenantAPITests(APITestCase):
def test_tenant_create(self):
DESCRIPTION = 'aDescription'
ENABLED = True
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.create(TEST_TENANT_ID, DESCRIPTION,
ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_create(self.request, TEST_TENANT_ID,
DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_get(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.get(TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_get(self.request, TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_tenant_list(self):
tenants = (TEST_RETURN, TEST_RETURN + '2')
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.list().AndReturn(tenants)
self.mox.ReplayAll()
ret_val = api.tenant_list(self.request)
self.assertEqual(len(ret_val), len(tenants))
for tenant in ret_val:
self.assertIsInstance(tenant, api.Tenant)
self.assertIn(tenant._apiresource, tenants)
self.mox.VerifyAll()
def test_tenant_update(self):
DESCRIPTION = 'aDescription'
ENABLED = True
keystoneclient = self.stub_keystoneclient()
keystoneclient.tenants = self.mox.CreateMockAnything()
keystoneclient.tenants.update(TEST_TENANT_ID, TEST_TENANT_NAME,
DESCRIPTION, ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.tenant_update(self.request, TEST_TENANT_ID,
TEST_TENANT_NAME, DESCRIPTION, ENABLED)
self.assertIsInstance(ret_val, api.Tenant)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class UserAPITests(APITestCase):
def test_user_create(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.create(TEST_USERNAME, TEST_PASSWORD, TEST_EMAIL,
TEST_TENANT_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_create(self.request, TEST_USERNAME, TEST_EMAIL,
TEST_PASSWORD, TEST_TENANT_ID, True)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_delete(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.delete(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_delete(self.request, TEST_USERNAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_user_get(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.get(TEST_USERNAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_get(self.request, TEST_USERNAME)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_list(self):
users = (TEST_USERNAME, TEST_USERNAME + '2')
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.list(tenant_id=None).AndReturn(users)
self.mox.ReplayAll()
ret_val = api.user_list(self.request)
self.assertEqual(len(ret_val), len(users))
for user in ret_val:
self.assertIsInstance(user, api.User)
self.assertIn(user._apiresource, users)
self.mox.VerifyAll()
def test_user_update_email(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_email(TEST_USERNAME,
TEST_EMAIL).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_email(self.request, TEST_USERNAME,
TEST_EMAIL)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_password(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_password(TEST_USERNAME,
TEST_PASSWORD).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_password(self.request, TEST_USERNAME,
TEST_PASSWORD)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_user_update_tenant(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.users = self.mox.CreateMockAnything()
keystoneclient.users.update_tenant(TEST_USERNAME,
TEST_TENANT_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.user_update_tenant(self.request, TEST_USERNAME,
TEST_TENANT_ID)
self.assertIsInstance(ret_val, api.User)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class RoleAPITests(APITestCase):
def test_role_add_for_tenant_user(self):
keystoneclient = self.stub_keystoneclient()
role = api.Role(APIResource.get_instance())
role.id = TEST_RETURN
role.name = TEST_RETURN
keystoneclient.roles = self.mox.CreateMockAnything()
keystoneclient.roles.add_user_to_tenant(TEST_TENANT_ID,
TEST_USERNAME,
TEST_RETURN).AndReturn(role)
api._get_role = self.mox.CreateMockAnything()
api._get_role(IsA(http.HttpRequest), IsA(str)).AndReturn(role)
self.mox.ReplayAll()
ret_val = api.role_add_for_tenant_user(self.request,
TEST_TENANT_ID,
TEST_USERNAME,
TEST_RETURN)
self.assertEqual(ret_val, role)
self.mox.VerifyAll()
class AdminApiTests(APITestCase):
def stub_admin_api(self, count=1):
self.mox.StubOutWithMock(api, 'admin_api')
admin_api = self.mox.CreateMock(OSAdmin.Admin)
for i in range(count):
api.admin_api(IsA(http.HttpRequest)).AndReturn(admin_api)
return admin_api
def test_get_admin_api(self):
self.mox.StubOutClassWithMocks(OSAdmin, 'Admin')
OSAdmin.Admin(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'compute', True).AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute', True).AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.admin_api(self.request))
self.mox.VerifyAll()
def test_flavor_create(self):
FLAVOR_DISK = 1000
FLAVOR_ID = 6
FLAVOR_MEMORY = 1024
FLAVOR_NAME = 'newFlavor'
FLAVOR_VCPU = 2
admin_api = self.stub_admin_api()
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.create(FLAVOR_NAME, FLAVOR_MEMORY, FLAVOR_VCPU,
FLAVOR_DISK, FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_create(self.request, FLAVOR_NAME,
str(FLAVOR_MEMORY), str(FLAVOR_VCPU),
str(FLAVOR_DISK), FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_flavor_delete(self):
FLAVOR_ID = 6
admin_api = self.stub_admin_api(count=2)
admin_api.flavors = self.mox.CreateMockAnything()
admin_api.flavors.delete(FLAVOR_ID, False).AndReturn(TEST_RETURN)
admin_api.flavors.delete(FLAVOR_ID, True).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_delete(self.request, FLAVOR_ID)
self.assertIsNone(ret_val)
ret_val = api.flavor_delete(self.request, FLAVOR_ID, purge=True)
self.assertIsNone(ret_val)
def test_service_get(self):
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.get(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_get(self.request, NAME)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_service_list(self):
services = (TEST_RETURN, TEST_RETURN + '2')
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.list().AndReturn(services)
self.mox.ReplayAll()
ret_val = api.service_list(self.request)
for service in ret_val:
self.assertIsInstance(service, api.Services)
self.assertIn(service._apiresource, services)
self.mox.VerifyAll()
def test_service_update(self):
ENABLED = True
NAME = 'serviceName'
admin_api = self.stub_admin_api()
admin_api.services = self.mox.CreateMockAnything()
admin_api.services.update(NAME, ENABLED).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.service_update(self.request, NAME, ENABLED)
self.assertIsInstance(ret_val, api.Services)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class TokenApiTests(APITestCase):
def setUp(self):
super(TokenApiTests, self).setUp()
self._prev_OPENSTACK_KEYSTONE_URL = getattr(settings,
'OPENSTACK_KEYSTONE_URL',
None)
settings.OPENSTACK_KEYSTONE_URL = TEST_URL
def tearDown(self):
super(TokenApiTests, self).tearDown()
settings.OPENSTACK_KEYSTONE_URL = self._prev_OPENSTACK_KEYSTONE_URL
def test_token_create(self):
catalog = {
'access': {
'token': {
'id': TEST_TOKEN_ID,
},
'user': {
'roles': [],
}
}
}
test_token = Token(TEST_TOKEN_ID, TEST_USERNAME,
TEST_TENANT_ID, TEST_TENANT_NAME)
keystoneclient = self.stub_keystoneclient()
keystoneclient.tokens = self.mox.CreateMockAnything()
keystoneclient.tokens.authenticate(username=TEST_USERNAME,
password=TEST_PASSWORD,
tenant=TEST_TENANT_ID
).AndReturn(test_token)
self.mox.ReplayAll()
ret_val = api.token_create(self.request, TEST_TENANT_ID,
TEST_USERNAME, TEST_PASSWORD)
self.assertEqual(test_token.tenant['id'], ret_val.tenant['id'])
self.mox.VerifyAll()
class ComputeApiTests(APITestCase):
def stub_compute_api(self, count=1):
self.mox.StubOutWithMock(api, 'compute_api')
compute_api = self.mox.CreateMock(OSCompute.Compute)
for i in range(count):
api.compute_api(IsA(http.HttpRequest)).AndReturn(compute_api)
return compute_api
def test_get_compute_api(self):
class ComputeClient(object):
__slots__ = ['auth_token', 'management_url']
self.mox.StubOutClassWithMocks(OSCompute, 'Compute')
compute_api = OSCompute.Compute(auth_token=TEST_TOKEN,
management_url=TEST_URL)
compute_api.client = ComputeClient()
self.mox.StubOutWithMock(api, 'url_for')
# called three times? Looks like a good place for optimization
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
self.mox.ReplayAll()
compute_api = api.compute_api(self.request)
self.assertIsNotNone(compute_api)
self.assertEqual(compute_api.client.auth_token, TEST_TOKEN)
self.assertEqual(compute_api.client.management_url, TEST_URL)
self.mox.VerifyAll()
def test_flavor_get(self):
FLAVOR_ID = 6
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.get(FLAVOR_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.flavor_get(self.request, FLAVOR_ID)
self.assertIsInstance(ret_val, api.Flavor)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_server_delete(self):
INSTANCE = 'anInstance'
compute_api = self.stub_compute_api()
compute_api.servers = self.mox.CreateMockAnything()
compute_api.servers.delete(INSTANCE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_delete(self.request, INSTANCE)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_reboot(self):
INSTANCE_ID = '2'
HARDNESS = 'diamond'
self.mox.StubOutWithMock(api, 'server_get')
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(OSCompute.servers.REBOOT_HARD).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
server = self.mox.CreateMock(OSCompute.Server)
server.reboot(HARDNESS).AndReturn(TEST_RETURN)
api.server_get(IsA(http.HttpRequest), INSTANCE_ID).AndReturn(server)
self.mox.ReplayAll()
ret_val = api.server_reboot(self.request, INSTANCE_ID)
self.assertIsNone(ret_val)
ret_val = api.server_reboot(self.request, INSTANCE_ID,
hardness=HARDNESS)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_server_create(self):
NAME = 'server'
IMAGE = 'anImage'
FLAVOR = 'cherry'
USER_DATA = {'nuts': 'berries'}
KEY = 'user'
SECGROUP = self.mox.CreateMock(api.SecurityGroup)
server = self.mox.CreateMock(OSCompute.Server)
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create(NAME, IMAGE, FLAVOR, userdata=USER_DATA,
security_groups=[SECGROUP], key_name=KEY)\
.AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_create(self.request, NAME, IMAGE, FLAVOR,
KEY, USER_DATA, [SECGROUP])
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class ExtrasApiTests(APITestCase):
def stub_extras_api(self, count=1):
self.mox.StubOutWithMock(api, 'extras_api')
extras_api = self.mox.CreateMock(OSExtras.Extras)
for i in range(count):
api.extras_api(IsA(http.HttpRequest)).AndReturn(extras_api)
return extras_api
def test_get_extras_api(self):
self.mox.StubOutClassWithMocks(OSExtras, 'Extras')
OSExtras.Extras(auth_token=TEST_TOKEN, management_url=TEST_URL)
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
api.url_for(IsA(http.HttpRequest), 'compute').AndReturn(TEST_URL)
self.mox.ReplayAll()
self.assertIsNotNone(api.extras_api(self.request))
self.mox.VerifyAll()
def test_console_create(self):
extras_api = self.stub_extras_api(count=2)
extras_api.consoles = self.mox.CreateMockAnything()
extras_api.consoles.create(
TEST_INSTANCE_ID, TEST_CONSOLE_KIND).AndReturn(TEST_RETURN)
extras_api.consoles.create(
TEST_INSTANCE_ID, 'text').AndReturn(TEST_RETURN + '2')
self.mox.ReplayAll()
ret_val = api.console_create(self.request,
TEST_INSTANCE_ID,
TEST_CONSOLE_KIND)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
ret_val = api.console_create(self.request, TEST_INSTANCE_ID)
self.assertIsInstance(ret_val, api.Console)
self.assertEqual(ret_val._apiresource, TEST_RETURN + '2')
self.mox.VerifyAll()
def test_flavor_list(self):
flavors = (TEST_RETURN, TEST_RETURN + '2')
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.list().AndReturn(flavors)
self.mox.ReplayAll()
ret_val = api.flavor_list(self.request)
self.assertEqual(len(ret_val), len(flavors))
for flavor in ret_val:
self.assertIsInstance(flavor, api.Flavor)
self.assertIn(flavor._apiresource, flavors)
self.mox.VerifyAll()
def test_server_list(self):
servers = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.list().AndReturn(servers)
self.mox.ReplayAll()
ret_val = api.server_list(self.request)
self.assertEqual(len(ret_val), len(servers))
for server in ret_val:
self.assertIsInstance(server, api.Server)
self.assertIn(server._apiresource, servers)
self.mox.VerifyAll()
def test_usage_get(self):
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.get(TEST_TENANT_ID, 'start',
'end').AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.usage_get(self.request, TEST_TENANT_ID, 'start', 'end')
self.assertIsInstance(ret_val, api.Usage)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_usage_list(self):
usages = (TEST_RETURN, TEST_RETURN + '2')
extras_api = self.stub_extras_api()
extras_api.usage = self.mox.CreateMockAnything()
extras_api.usage.list('start', 'end').AndReturn(usages)
self.mox.ReplayAll()
ret_val = api.usage_list(self.request, 'start', 'end')
self.assertEqual(len(ret_val), len(usages))
for usage in ret_val:
self.assertIsInstance(usage, api.Usage)
self.assertIn(usage._apiresource, usages)
self.mox.VerifyAll()
def test_server_get(self):
INSTANCE_ID = '2'
extras_api = self.stub_extras_api()
extras_api.servers = self.mox.CreateMockAnything()
extras_api.servers.get(INSTANCE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.server_get(self.request, INSTANCE_ID)
self.assertIsInstance(ret_val, api.Server)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
class VolumeTests(APITestCase):
def setUp(self):
super(VolumeTests, self).setUp()
volume = api.Volume(APIResource.get_instance())
volume.id = 1
volume.displayName = "displayName"
volume.attachments = [{"device": "/dev/vdb",
"serverId": 1,
"id": 1,
"volumeId": 1}]
self.volume = volume
self.volumes = [volume, ]
self.novaclient = self.stub_novaclient()
self.novaclient.volumes = self.mox.CreateMockAnything()
def test_volume_list(self):
self.novaclient.volumes.list().AndReturn(self.volumes)
self.mox.ReplayAll()
volumes = api.volume_list(self.request)
self.assertIsInstance(volumes[0], api.Volume)
self.mox.VerifyAll()
def test_volume_get(self):
self.novaclient.volumes.get(IsA(int)).AndReturn(self.volume)
self.mox.ReplayAll()
volume = api.volume_get(self.request, 1)
self.assertIsInstance(volume, api.Volume)
self.mox.VerifyAll()
def test_volume_instance_list(self):
self.novaclient.volumes.get_server_volumes(IsA(int)).AndReturn(
self.volume.attachments)
self.mox.ReplayAll()
attachments = api.volume_instance_list(self.request, 1)
self.assertEqual(attachments, self.volume.attachments)
self.mox.VerifyAll()
def test_volume_create(self):
self.novaclient.volumes.create(IsA(int), IsA(str), IsA(str)).AndReturn(
self.volume)
self.mox.ReplayAll()
new_volume = api.volume_create(self.request,
10,
"new volume",
"new description")
self.assertIsInstance(new_volume, api.Volume)
self.mox.VerifyAll()
def test_volume_delete(self):
self.novaclient.volumes.delete(IsA(int))
self.mox.ReplayAll()
ret_val = api.volume_delete(self.request, 1)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_volume_attach(self):
self.novaclient.volumes.create_server_volume(
IsA(int), IsA(int), IsA(str))
self.mox.ReplayAll()
ret_val = api.volume_attach(self.request, 1, 1, "/dev/vdb")
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_volume_detach(self):
self.novaclient.volumes.delete_server_volume(IsA(int), IsA(int))
self.mox.ReplayAll()
ret_val = api.volume_detach(self.request, 1, 1)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
class APIExtensionTests(APITestCase):
def setUp(self):
super(APIExtensionTests, self).setUp()
keypair = api.KeyPair(APIResource.get_instance())
keypair.id = 1
keypair.name = TEST_RETURN
self.keypair = keypair
self.keypairs = [keypair, ]
floating_ip = api.FloatingIp(APIResource.get_instance())
floating_ip.id = 1
floating_ip.fixed_ip = '10.0.0.4'
floating_ip.instance_id = 1
floating_ip.ip = '58.58.58.58'
self.floating_ip = floating_ip
self.floating_ips = [floating_ip, ]
server = api.Server(APIResource.get_instance(), self.request)
server.id = 1
self.server = server
self.servers = [server, ]
def test_server_snapshot_create(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.create_image(IsA(int), IsA(str)).\
AndReturn(self.server)
self.mox.ReplayAll()
server = api.snapshot_create(self.request, 1, 'test-snapshot')
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_tenant_floating_ip_list(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.list().AndReturn(self.floating_ips)
self.mox.ReplayAll()
floating_ips = api.tenant_floating_ip_list(self.request)
self.assertEqual(len(floating_ips), len(self.floating_ips))
self.assertIsInstance(floating_ips[0], api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_get(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_get(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_allocate(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.create().AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_allocate(self.request)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_tenant_floating_ip_release(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.floating_ips.delete(1).AndReturn(self.floating_ip)
self.mox.ReplayAll()
floating_ip = api.tenant_floating_ip_release(self.request, 1)
self.assertIsInstance(floating_ip, api.FloatingIp)
self.mox.VerifyAll()
def test_server_remove_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.remove_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_remove_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_server_add_floating_ip(self):
novaclient = self.stub_novaclient()
novaclient.floating_ips = self.mox.CreateMockAnything()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(IsA(int)).AndReturn(self.server)
novaclient.floating_ips.get(IsA(int)).AndReturn(self.floating_ip)
novaclient.servers.add_floating_ip(IsA(self.server.__class__),
IsA(self.floating_ip.__class__)) \
.AndReturn(self.server)
self.mox.ReplayAll()
server = api.server_add_floating_ip(self.request, 1, 1)
self.assertIsInstance(server, api.Server)
self.mox.VerifyAll()
def test_keypair_create(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.create(IsA(str)).AndReturn(self.keypair)
self.mox.ReplayAll()
ret_val = api.keypair_create(self.request, TEST_RETURN)
self.assertIsInstance(ret_val, api.KeyPair)
self.assertEqual(ret_val.name, self.keypair.name)
self.mox.VerifyAll()
def test_keypair_import(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.create(IsA(str), IsA(str)).AndReturn(self.keypair)
self.mox.ReplayAll()
ret_val = api.keypair_import(self.request, TEST_RETURN, TEST_RETURN)
self.assertIsInstance(ret_val, api.KeyPair)
self.assertEqual(ret_val.name, self.keypair.name)
self.mox.VerifyAll()
def test_keypair_delete(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.delete(IsA(int))
self.mox.ReplayAll()
ret_val = api.keypair_delete(self.request, self.keypair.id)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_keypair_list(self):
novaclient = self.stub_novaclient()
novaclient.keypairs = self.mox.CreateMockAnything()
novaclient.keypairs.list().AndReturn(self.keypairs)
self.mox.ReplayAll()
ret_val = api.keypair_list(self.request)
self.assertEqual(len(ret_val), len(self.keypairs))
for keypair in ret_val:
self.assertIsInstance(keypair, api.KeyPair)
self.mox.VerifyAll()
class GlanceApiTests(APITestCase):
def stub_glance_api(self, count=1):
self.mox.StubOutWithMock(api, 'glance_api')
glance_api = self.mox.CreateMock(glance_client.Client)
glance_api.token = TEST_TOKEN
for i in range(count):
api.glance_api(IsA(http.HttpRequest)).AndReturn(glance_api)
return glance_api
def test_get_glance_api(self):
self.mox.StubOutClassWithMocks(glance_client, 'Client')
client_instance = glance_client.Client(TEST_HOSTNAME, TEST_PORT,
auth_tok=TEST_TOKEN)
# Normally ``auth_tok`` is set in ``Client.__init__``, but mox doesn't
# duplicate that behavior so we set it manually.
client_instance.auth_tok = TEST_TOKEN
self.mox.StubOutWithMock(api, 'url_for')
api.url_for(IsA(http.HttpRequest), 'image').AndReturn(TEST_URL)
self.mox.ReplayAll()
ret_val = api.glance_api(self.request)
self.assertIsNotNone(ret_val)
self.assertEqual(ret_val.auth_tok, TEST_TOKEN)
self.mox.VerifyAll()
def test_image_create(self):
IMAGE_FILE = 'someData'
IMAGE_META = {'metadata': 'foo'}
glance_api = self.stub_glance_api()
glance_api.add_image(IMAGE_META, IMAGE_FILE).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_create(self.request, IMAGE_META, IMAGE_FILE)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
def test_image_delete(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.delete_image(IMAGE_ID).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_delete(self.request, IMAGE_ID)
self.assertEqual(ret_val, TEST_RETURN)
self.mox.VerifyAll()
def test_image_get(self):
IMAGE_ID = '1'
glance_api = self.stub_glance_api()
glance_api.get_image(IMAGE_ID).AndReturn([TEST_RETURN])
self.mox.ReplayAll()
ret_val = api.image_get(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
def test_image_list_detailed(self):
images = (TEST_RETURN, TEST_RETURN + '2')
glance_api = self.stub_glance_api()
glance_api.get_images_detailed().AndReturn(images)
self.mox.ReplayAll()
ret_val = api.image_list_detailed(self.request)
self.assertEqual(len(ret_val), len(images))
for image in ret_val:
self.assertIsInstance(image, api.Image)
self.assertIn(image._apidict, images)
self.mox.VerifyAll()
def test_image_update(self):
IMAGE_ID = '1'
IMAGE_META = {'metadata': 'foobar'}
glance_api = self.stub_glance_api(count=2)
glance_api.update_image(IMAGE_ID, image_meta={}).AndReturn(TEST_RETURN)
glance_api.update_image(IMAGE_ID,
image_meta=IMAGE_META).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.image_update(self.request, IMAGE_ID)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
ret_val = api.image_update(self.request,
IMAGE_ID,
image_meta=IMAGE_META)
self.assertIsInstance(ret_val, api.Image)
self.assertEqual(ret_val._apidict, TEST_RETURN)
self.mox.VerifyAll()
class SwiftApiTests(APITestCase):
def setUp(self):
self.mox = mox.Mox()
self.request = http.HttpRequest()
self.request.session = dict()
self.request.session['token'] = TEST_TOKEN
def tearDown(self):
self.mox.UnsetStubs()
def stub_swift_api(self, count=1):
self.mox.StubOutWithMock(api, 'swift_api')
swift_api = self.mox.CreateMock(cloudfiles.connection.Connection)
for i in range(count):
api.swift_api(IsA(http.HttpRequest)).AndReturn(swift_api)
return swift_api
def test_swift_get_containers(self):
containers = (TEST_RETURN, TEST_RETURN + '2')
swift_api = self.stub_swift_api()
swift_api.get_all_containers(limit=10000,
marker=None).AndReturn(containers)
self.mox.ReplayAll()
ret_val = api.swift_get_containers(self.request)
self.assertEqual(len(ret_val), len(containers))
for container in ret_val:
self.assertIsInstance(container, api.Container)
self.assertIn(container._apiresource, containers)
self.mox.VerifyAll()
def test_swift_create_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
self.mox.StubOutWithMock(api, 'swift_container_exists')
api.swift_container_exists(self.request,
NAME).AndReturn(False)
swift_api.create_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_create_container(self.request, NAME)
self.assertIsInstance(ret_val, api.Container)
self.assertEqual(ret_val._apiresource, TEST_RETURN)
self.mox.VerifyAll()
def test_swift_delete_container(self):
NAME = 'containerName'
swift_api = self.stub_swift_api()
swift_api.delete_container(NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_container(self.request, NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_objects(self):
NAME = 'containerName'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(limit=10000,
marker=None,
prefix=None).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request, NAME)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_get_objects_with_prefix(self):
NAME = 'containerName'
PREFIX = 'prefacedWith'
swift_objects = (TEST_RETURN, TEST_RETURN + '2')
container = self.mox.CreateMock(cloudfiles.container.Container)
container.get_objects(limit=10000,
marker=None,
prefix=PREFIX).AndReturn(swift_objects)
swift_api = self.stub_swift_api()
swift_api.get_container(NAME).AndReturn(container)
self.mox.ReplayAll()
ret_val = api.swift_get_objects(self.request,
NAME,
prefix=PREFIX)
self.assertEqual(len(ret_val), len(swift_objects))
for swift_object in ret_val:
self.assertIsInstance(swift_object, api.SwiftObject)
self.assertIn(swift_object._apiresource, swift_objects)
self.mox.VerifyAll()
def test_swift_upload_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'someData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.create_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.write(OBJECT_DATA).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_upload_object(self.request,
CONTAINER_NAME,
OBJECT_NAME,
OBJECT_DATA)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_delete_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.delete_object(OBJECT_NAME).AndReturn(TEST_RETURN)
self.mox.ReplayAll()
ret_val = api.swift_delete_object(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
def test_swift_get_object_data(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
OBJECT_DATA = 'objectData'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.storage_object.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.stream().AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
ret_val = api.swift_get_object_data(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertEqual(ret_val, OBJECT_DATA)
self.mox.VerifyAll()
def test_swift_object_exists(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
self.mox.ReplayAll()
ret_val = api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME)
self.assertTrue(ret_val)
self.mox.VerifyAll()
def test_swift_copy_object(self):
CONTAINER_NAME = 'containerName'
OBJECT_NAME = 'objectName'
swift_api = self.stub_swift_api()
container = self.mox.CreateMock(cloudfiles.container.Container)
self.mox.StubOutWithMock(api, 'swift_object_exists')
swift_object = self.mox.CreateMock(cloudfiles.Object)
swift_api.get_container(CONTAINER_NAME).AndReturn(container)
api.swift_object_exists(self.request,
CONTAINER_NAME,
OBJECT_NAME).AndReturn(False)
container.get_object(OBJECT_NAME).AndReturn(swift_object)
swift_object.copy_to(CONTAINER_NAME, OBJECT_NAME)
self.mox.ReplayAll()
ret_val = api.swift_copy_object(self.request, CONTAINER_NAME,
OBJECT_NAME, CONTAINER_NAME,
OBJECT_NAME)
self.assertIsNone(ret_val)
self.mox.VerifyAll()
| |
""" Connection pooling for Cassandra connections. """
from __future__ import with_statement
import time
import threading
import random
import socket
import sys
if 'gevent.monkey' in sys.modules:
from gevent import queue as Queue
else:
import Queue # noqa
from thrift import Thrift
from thrift.transport.TTransport import TTransportException
from connection import (Connection, default_socket_factory,
default_transport_factory)
from logging.pool_logger import PoolLogger
from util import as_interface
from cassandra.ttypes import TimedOutException, UnavailableException
_BASE_BACKOFF = 0.01
__all__ = ['QueuePool', 'ConnectionPool', 'PoolListener',
'ConnectionWrapper', 'AllServersUnavailable',
'MaximumRetryException', 'NoConnectionAvailable',
'InvalidRequestError']
class ConnectionWrapper(Connection):
"""
Creates a wrapper for a :class:`~.pycassa.connection.Connection`
object, adding pooling related functionality while still allowing
access to the thrift API calls.
These should not be created directly, only obtained through
Pool's :meth:`~.ConnectionPool.get()` method.
"""
# These mark the state of the connection so that we can
# check to see that they are not returned, checked out,
# or disposed twice (or from the wrong state).
_IN_QUEUE = 0
_CHECKED_OUT = 1
_DISPOSED = 2
def __init__(self, pool, max_retries, *args, **kwargs):
self._pool = pool
self._retry_count = 0
self.max_retries = max_retries
self.info = {}
self.starttime = time.time()
self.operation_count = 0
self._state = ConnectionWrapper._CHECKED_OUT
Connection.__init__(self, *args, **kwargs)
self._pool._notify_on_connect(self)
# For testing purposes only
self._should_fail = False
self._original_meth = self.send_batch_mutate
def return_to_pool(self):
"""
Returns this to the pool.
This has the same effect as calling :meth:`ConnectionPool.put()`
on the wrapper.
"""
self._pool.put(self)
def _checkin(self):
if self._state == ConnectionWrapper._IN_QUEUE:
raise InvalidRequestError("A connection has been returned to "
"the connection pool twice.")
elif self._state == ConnectionWrapper._DISPOSED:
raise InvalidRequestError("A disposed connection has been returned "
"to the connection pool.")
self._state = ConnectionWrapper._IN_QUEUE
def _checkout(self):
if self._state != ConnectionWrapper._IN_QUEUE:
raise InvalidRequestError("A connection has been checked "
"out twice.")
self._state = ConnectionWrapper._CHECKED_OUT
def _is_in_queue_or_disposed(self):
ret = self._state == ConnectionWrapper._IN_QUEUE or \
self._state == ConnectionWrapper._DISPOSED
return ret
def _dispose_wrapper(self, reason=None):
if self._state == ConnectionWrapper._DISPOSED:
raise InvalidRequestError("A connection has been disposed twice.")
self._state = ConnectionWrapper._DISPOSED
self.close()
self._pool._notify_on_dispose(self, msg=reason)
def _replace(self, new_conn_wrapper):
"""
Get another wrapper from the pool and replace our own contents
with its contents.
"""
self.server = new_conn_wrapper.server
self.transport = new_conn_wrapper.transport
self._iprot = new_conn_wrapper._iprot
self._oprot = new_conn_wrapper._oprot
self.info = new_conn_wrapper.info
self.starttime = new_conn_wrapper.starttime
self.operation_count = new_conn_wrapper.operation_count
self._state = ConnectionWrapper._CHECKED_OUT
self._should_fail = new_conn_wrapper._should_fail
@classmethod
def _retry(cls, f):
def new_f(self, *args, **kwargs):
self.operation_count += 1
self.info['request'] = {'method': f.__name__, 'args': args, 'kwargs': kwargs}
try:
allow_retries = kwargs.pop('allow_retries', True)
if kwargs.pop('reset', False):
self._pool._replace_wrapper() # puts a new wrapper in the queue
self._replace(self._pool.get()) # swaps out transport
result = f(self, *args, **kwargs)
self._retry_count = 0 # reset the count after a success
return result
except Thrift.TApplicationException:
self.close()
self._pool._decrement_overflow()
self._pool._clear_current()
raise
except (TimedOutException, UnavailableException,
TTransportException,
socket.error, IOError, EOFError), exc:
self._pool._notify_on_failure(exc, server=self.server, connection=self)
self.close()
self._pool._decrement_overflow()
self._pool._clear_current()
self._retry_count += 1
if (not allow_retries or
(self.max_retries != -1 and self._retry_count > self.max_retries)):
raise MaximumRetryException('Retried %d times. Last failure was %s: %s' %
(self._retry_count, exc.__class__.__name__, exc))
# Exponential backoff
time.sleep(_BASE_BACKOFF * (2 ** self._retry_count))
kwargs['reset'] = True
return new_f(self, *args, **kwargs)
new_f.__name__ = f.__name__
return new_f
def _fail_once(self, *args, **kwargs):
if self._should_fail:
self._should_fail = False
raise TimedOutException
else:
return self._original_meth(*args, **kwargs)
def get_keyspace_description(self, keyspace=None, use_dict_for_col_metadata=False):
"""
Describes the given keyspace.
If `use_dict_for_col_metadata` is ``True``, the column metadata will be stored
as a dictionary instead of a list
A dictionary of the form ``{column_family_name: CfDef}`` is returned.
"""
if keyspace is None:
keyspace = self.keyspace
ks_def = self.describe_keyspace(keyspace)
cf_defs = dict()
for cf_def in ks_def.cf_defs:
cf_defs[cf_def.name] = cf_def
if use_dict_for_col_metadata:
old_metadata = cf_def.column_metadata
new_metadata = dict()
for datum in old_metadata:
new_metadata[datum.name] = datum
cf_def.column_metadata = new_metadata
return cf_defs
def __str__(self):
return "<ConnectionWrapper %s@%s>" % (self.keyspace, self.server)
retryable = ('get', 'get_slice', 'multiget_slice', 'get_count', 'multiget_count',
'get_range_slices', 'get_indexed_slices', 'batch_mutate', 'add',
'insert', 'remove', 'remove_counter', 'truncate', 'describe_keyspace',
'atomic_batch_mutate')
for fname in retryable:
new_f = ConnectionWrapper._retry(getattr(Connection, fname))
setattr(ConnectionWrapper, fname, new_f)
class ConnectionPool(object):
"""A pool that maintains a queue of open connections."""
_max_overflow = 0
def _get_max_overflow(self):
return self._max_overflow
def _set_max_overflow(self, max_overflow):
with self._pool_lock:
self._max_overflow = max_overflow
self._overflow_enabled = max_overflow > 0 or max_overflow == -1
if max_overflow == -1:
self._max_conns = (2 ** 31) - 1
else:
self._max_conns = self._pool_size + max_overflow
max_overflow = property(_get_max_overflow, _set_max_overflow)
""" Whether or not a new connection may be opened when the
pool is empty is controlled by `max_overflow`. This specifies how many
additional connections may be opened after the pool has reached `pool_size`;
keep in mind that these extra connections will be discarded upon checkin
until the pool is below `pool_size`. This may be set to -1 to indicate no
overflow limit. The default value is 0, which does not allow for overflow. """
pool_timeout = 30
""" If ``pool_size + max_overflow`` connections have already been checked
out, an attempt to retrieve a new connection from the pool will wait
up to `pool_timeout` seconds for a connection to be returned to the
pool before giving up. Note that this setting is only meaningful when you
are accessing the pool concurrently, such as with multiple threads.
This may be set to 0 to fail immediately or -1 to wait forever.
The default value is 30. """
recycle = 10000
""" After performing `recycle` number of operations, connections will
be replaced when checked back in to the pool. This may be set to
-1 to disable connection recycling. The default value is 10,000. """
max_retries = 5
""" When an operation on a connection fails due to an :exc:`~.TimedOutException`
or :exc:`~.UnavailableException`, which tend to indicate single or
multiple node failure, the operation will be retried on different nodes
up to `max_retries` times before an :exc:`~.MaximumRetryException` is raised.
Setting this to 0 disables retries and setting to -1 allows unlimited retries.
The default value is 5. """
logging_name = None
""" By default, each pool identifies itself in the logs using ``id(self)``.
If multiple pools are in use for different purposes, setting `logging_name` will
help individual pools to be identified in the logs. """
socket_factory = default_socket_factory
""" A function that creates the socket for each connection in the pool.
This function should take two arguments: `host`, the host the connection is
being made to, and `port`, the destination port.
By default, this is function is :func:`~connection.default_socket_factory`.
"""
transport_factory = default_transport_factory
""" A function that creates the transport for each connection in the pool.
This function should take three arguments: `tsocket`, a TSocket object for the
transport, `host`, the host the connection is being made to, and `port`,
the destination port.
By default, this is function is :func:`~connection.default_transport_factory`.
"""
def __init__(self, keyspace,
server_list=['localhost:9160'],
credentials=None,
timeout=0.5,
use_threadlocal=True,
pool_size=5,
prefill=True,
socket_factory=default_socket_factory,
transport_factory=default_transport_factory,
**kwargs):
"""
All connections in the pool will be opened to `keyspace`.
`server_list` is a sequence of servers in the form ``"host:port"`` that
the pool will connect to. The port defaults to 9160 if excluded.
The list will be randomly shuffled before being drawn from sequentially.
`server_list` may also be a function that returns the sequence of servers.
If authentication or authorization is required, `credentials` must
be supplied. This should be a dictionary containing 'username' and
'password' keys with appropriate string values.
`timeout` specifies in seconds how long individual connections will
block before timing out. If set to ``None``, connections will never
timeout.
If `use_threadlocal` is set to ``True``, repeated calls to
:meth:`get()` within the same application thread will
return the same :class:`ConnectionWrapper` object if one is
already checked out from the pool. Be careful when setting `use_threadlocal`
to ``False`` in a multithreaded application, especially with retries enabled.
Synchronization may be required to prevent the connection from changing while
another thread is using it.
The pool will keep up to `pool_size` open connections in the pool
at any time. When a connection is returned to the pool, the
connection will be discarded if the pool already contains `pool_size`
connections. The total number of simultaneous connections the pool will
allow is ``pool_size + max_overflow``,
and the number of "sleeping" connections the pool will allow is ``pool_size``.
A good choice for `pool_size` is a multiple of the number of servers
passed to the Pool constructor. If a size less than this is chosen,
the last ``(len(server_list) - pool_size)`` servers may not be used until
either overflow occurs, a connection is recycled, or a connection
fails. Similarly, if a multiple of ``len(server_list)`` is not chosen,
those same servers would have a decreased load. By default, overflow
is disabled.
If `prefill` is set to ``True``, `pool_size` connections will be opened
when the pool is created.
Example Usage:
.. code-block:: python
>>> pool = pycassa.ConnectionPool(keyspace='Keyspace1', server_list=['10.0.0.4:9160', '10.0.0.5:9160'], prefill=False)
>>> cf = pycassa.ColumnFamily(pool, 'Standard1')
>>> cf.insert('key', {'col': 'val'})
1287785685530679
"""
self._pool_threadlocal = use_threadlocal
self.keyspace = keyspace
self.credentials = credentials
self.timeout = timeout
self.socket_factory = socket_factory
self.transport_factory = transport_factory
if use_threadlocal:
self._tlocal = threading.local()
self._pool_size = pool_size
self._q = Queue.Queue(pool_size)
self._pool_lock = threading.Lock()
self._current_conns = 0
# Listener groups
self.listeners = []
self._on_connect = []
self._on_checkout = []
self._on_checkin = []
self._on_dispose = []
self._on_recycle = []
self._on_failure = []
self._on_server_list = []
self._on_pool_dispose = []
self._on_pool_max = []
self.add_listener(PoolLogger())
if "listeners" in kwargs:
listeners = kwargs["listeners"]
for l in listeners:
self.add_listener(l)
self.logging_name = kwargs.get("logging_name", None)
if not self.logging_name:
self.logging_name = id(self)
if "max_overflow" not in kwargs:
self._set_max_overflow(0)
recognized_kwargs = ["pool_timeout", "recycle", "max_retries", "max_overflow"]
for kw in recognized_kwargs:
if kw in kwargs:
setattr(self, kw, kwargs[kw])
self.set_server_list(server_list)
self._prefill = prefill
if self._prefill:
self.fill()
def set_server_list(self, server_list):
"""
Sets the server list that the pool will make connections to.
`server_list` should be sequence of servers in the form ``"host:port"`` that
the pool will connect to. The list will be randomly permuted before
being used. `server_list` may also be a function that returns the
sequence of servers.
"""
if callable(server_list):
self.server_list = list(server_list())
else:
self.server_list = list(server_list)
random.shuffle(self.server_list)
self._list_position = 0
self._notify_on_server_list(self.server_list)
def _get_next_server(self):
"""
Gets the next 'localhost:port' combination from the list of
servers and increments the position. This is not thread-safe,
but client-side load-balancing isn't so important that this is
a problem.
"""
if self._list_position >= len(self.server_list):
self._list_position = 0
server = self.server_list[self._list_position]
self._list_position += 1
return server
def _create_connection(self):
"""Creates a ConnectionWrapper, which opens a
pycassa.connection.Connection."""
if not self.server_list:
raise AllServersUnavailable('Cannot connect to any servers as server list is empty!')
failure_count = 0
while failure_count < 2 * len(self.server_list):
try:
server = self._get_next_server()
wrapper = self._get_new_wrapper(server)
return wrapper
except (TTransportException, socket.error, IOError, EOFError), exc:
self._notify_on_failure(exc, server)
failure_count += 1
raise AllServersUnavailable('An attempt was made to connect to each of the servers ' +
'twice, but none of the attempts succeeded. The last failure was %s: %s' %
(exc.__class__.__name__, exc))
def fill(self):
"""
Adds connections to the pool until at least ``pool_size`` connections
exist, whether they are currently checked out from the pool or not.
.. versionadded:: 1.2.0
"""
with self._pool_lock:
while self._current_conns < self._pool_size:
conn = self._create_connection()
conn._checkin()
self._q.put(conn, False)
self._current_conns += 1
def _get_new_wrapper(self, server):
return ConnectionWrapper(self, self.max_retries,
self.keyspace, server,
timeout=self.timeout,
credentials=self.credentials,
socket_factory=self.socket_factory,
transport_factory=self.transport_factory)
def _replace_wrapper(self):
"""Try to replace the connection."""
if not self._q.full():
conn = self._create_connection()
conn._checkin()
try:
self._q.put(conn, False)
except Queue.Full:
conn._dispose_wrapper(reason="pool is already full")
else:
with self._pool_lock:
self._current_conns += 1
def _clear_current(self):
""" If using threadlocal, clear our threadlocal current conn. """
if self._pool_threadlocal:
self._tlocal.current = None
def put(self, conn):
""" Returns a connection to the pool. """
if not conn.transport.isOpen():
return
if self._pool_threadlocal:
if hasattr(self._tlocal, 'current') and self._tlocal.current:
conn = self._tlocal.current
self._tlocal.current = None
else:
conn = None
if conn:
conn._retry_count = 0
if conn._is_in_queue_or_disposed():
raise InvalidRequestError("Connection was already checked in or disposed")
if self.recycle > -1 and conn.operation_count > self.recycle:
new_conn = self._create_connection()
self._notify_on_recycle(conn, new_conn)
conn._dispose_wrapper(reason="recyling connection")
conn = new_conn
conn._checkin()
self._notify_on_checkin(conn)
try:
self._q.put_nowait(conn)
except Queue.Full:
conn._dispose_wrapper(reason="pool is already full")
self._decrement_overflow()
return_conn = put
def _decrement_overflow(self):
with self._pool_lock:
self._current_conns -= 1
def _new_if_required(self, max_conns, check_empty_queue=False):
""" Creates new connection if there is room """
with self._pool_lock:
if (not check_empty_queue or self._q.empty()) and self._current_conns < max_conns:
new_conn = True
self._current_conns += 1
else:
new_conn = False
if new_conn:
try:
return self._create_connection()
except:
with self._pool_lock:
self._current_conns -= 1
raise
return None
def get(self):
""" Gets a connection from the pool. """
conn = None
if self._pool_threadlocal:
try:
if self._tlocal.current:
conn = self._tlocal.current
if conn:
return conn
except AttributeError:
pass
conn = self._new_if_required(self._pool_size)
if not conn:
# if queue is empty and max_overflow is not reached, create new conn
conn = self._new_if_required(self._max_conns, check_empty_queue=True)
if not conn:
# We will have to fetch from the queue, and maybe block
timeout = self.pool_timeout
if timeout == -1:
timeout = None
try:
conn = self._q.get(timeout=timeout)
except Queue.Empty:
self._notify_on_pool_max(pool_max=self._max_conns)
size_msg = "size %d" % (self._pool_size, )
if self._overflow_enabled:
size_msg += "overflow %d" % (self._max_overflow)
message = "ConnectionPool limit of %s reached, unable to obtain connection after %d seconds" \
% (size_msg, self.pool_timeout)
raise NoConnectionAvailable(message)
else:
conn._checkout()
if self._pool_threadlocal:
self._tlocal.current = conn
self._notify_on_checkout(conn)
return conn
def execute(self, f, *args, **kwargs):
"""
Get a connection from the pool, execute
`f` on it with `*args` and `**kwargs`, return the
connection to the pool, and return the result of `f`.
"""
conn = None
try:
conn = self.get()
return getattr(conn, f)(*args, **kwargs)
finally:
if conn:
conn.return_to_pool()
def dispose(self):
""" Closes all checked in connections in the pool. """
while True:
try:
conn = self._q.get(False)
conn._dispose_wrapper(
reason="Pool %s is being disposed" % id(self))
self._decrement_overflow()
except Queue.Empty:
break
self._notify_on_pool_dispose()
def size(self):
""" Returns the capacity of the pool. """
return self._pool_size
def checkedin(self):
""" Returns the number of connections currently in the pool. """
return self._q.qsize()
def overflow(self):
""" Returns the number of overflow connections that are currently open. """
return max(self._current_conns - self._pool_size, 0)
def checkedout(self):
""" Returns the number of connections currently checked out from the pool. """
return self._current_conns - self.checkedin()
def add_listener(self, listener):
"""
Add a :class:`PoolListener`-like object to this pool.
`listener` may be an object that implements some or all of
:class:`PoolListener`, or a dictionary of callables containing implementations
of some or all of the named methods in :class:`PoolListener`.
"""
listener = as_interface(listener,
methods=('connection_created', 'connection_checked_out',
'connection_checked_in', 'connection_disposed',
'connection_recycled', 'connection_failed',
'obtained_server_list', 'pool_disposed',
'pool_at_max'))
self.listeners.append(listener)
if hasattr(listener, 'connection_created'):
self._on_connect.append(listener)
if hasattr(listener, 'connection_checked_out'):
self._on_checkout.append(listener)
if hasattr(listener, 'connection_checked_in'):
self._on_checkin.append(listener)
if hasattr(listener, 'connection_disposed'):
self._on_dispose.append(listener)
if hasattr(listener, 'connection_recycled'):
self._on_recycle.append(listener)
if hasattr(listener, 'connection_failed'):
self._on_failure.append(listener)
if hasattr(listener, 'obtained_server_list'):
self._on_server_list.append(listener)
if hasattr(listener, 'pool_disposed'):
self._on_pool_dispose.append(listener)
if hasattr(listener, 'pool_at_max'):
self._on_pool_max.append(listener)
def _notify_on_pool_dispose(self):
if self._on_pool_dispose:
dic = {'pool_id': self.logging_name,
'level': 'info'}
for l in self._on_pool_dispose:
l.pool_disposed(dic)
def _notify_on_pool_max(self, pool_max):
if self._on_pool_max:
dic = {'pool_id': self.logging_name,
'level': 'info',
'pool_max': pool_max}
for l in self._on_pool_max:
l.pool_at_max(dic)
def _notify_on_dispose(self, conn_record, msg=""):
if self._on_dispose:
dic = {'pool_id': self.logging_name,
'level': 'debug',
'connection': conn_record}
if msg:
dic['message'] = msg
for l in self._on_dispose:
l.connection_disposed(dic)
def _notify_on_server_list(self, server_list):
dic = {'pool_id': self.logging_name,
'level': 'debug',
'server_list': server_list}
if self._on_server_list:
for l in self._on_server_list:
l.obtained_server_list(dic)
def _notify_on_recycle(self, old_conn, new_conn):
if self._on_recycle:
dic = {'pool_id': self.logging_name,
'level': 'debug',
'old_conn': old_conn,
'new_conn': new_conn}
for l in self._on_recycle:
l.connection_recycled(dic)
def _notify_on_connect(self, conn_record, msg="", error=None):
if self._on_connect:
dic = {'pool_id': self.logging_name,
'level': 'debug',
'connection': conn_record}
if msg:
dic['message'] = msg
if error:
dic['error'] = error
dic['level'] = 'warn'
for l in self._on_connect:
l.connection_created(dic)
def _notify_on_checkin(self, conn_record):
if self._on_checkin:
dic = {'pool_id': self.logging_name,
'level': 'debug',
'connection': conn_record}
for l in self._on_checkin:
l.connection_checked_in(dic)
def _notify_on_checkout(self, conn_record):
if self._on_checkout:
dic = {'pool_id': self.logging_name,
'level': 'debug',
'connection': conn_record}
for l in self._on_checkout:
l.connection_checked_out(dic)
def _notify_on_failure(self, error, server, connection=None):
if self._on_failure:
dic = {'pool_id': self.logging_name,
'level': 'info',
'error': error,
'server': server,
'connection': connection}
for l in self._on_failure:
l.connection_failed(dic)
QueuePool = ConnectionPool
class PoolListener(object):
"""Hooks into the lifecycle of connections in a :class:`ConnectionPool`.
Usage::
class MyListener(PoolListener):
def connection_created(self, dic):
'''perform connect operations'''
# etc.
# create a new pool with a listener
p = ConnectionPool(..., listeners=[MyListener()])
# or add a listener after the fact
p.add_listener(MyListener())
Listeners receive a dictionary that contains event information and
is indexed by a string describing that piece of info. For example,
all event dictionaries include 'level', so dic['level'] will return
the prescribed logging level.
There is no need to subclass :class:`PoolListener` to handle events.
Any class that implements one or more of these methods can be used
as a pool listener. The :class:`ConnectionPool` will inspect the methods
provided by a listener object and add the listener to one or more
internal event queues based on its capabilities. In terms of
efficiency and function call overhead, you're much better off only
providing implementations for the hooks you'll be using.
Each of the :class:`PoolListener` methods wil be called with a
:class:`dict` as the single parameter. This :class:`dict` may
contain the following fields:
* `connection`: The :class:`ConnectionWrapper` object that persistently
manages the connection
* `message`: The reason this event happened
* `error`: The :class:`Exception` that caused this event
* `pool_id`: The id of the :class:`ConnectionPool` that this event came from
* `level`: The prescribed logging level for this event. Can be 'debug', 'info',
'warn', 'error', or 'critical'
Entries in the :class:`dict` that are specific to only one event type are
detailed with each method.
"""
def connection_created(self, dic):
"""Called once for each new Cassandra connection.
Fields: `pool_id`, `level`, and `connection`.
"""
def connection_checked_out(self, dic):
"""Called when a connection is retrieved from the Pool.
Fields: `pool_id`, `level`, and `connection`.
"""
def connection_checked_in(self, dic):
"""Called when a connection returns to the pool.
Fields: `pool_id`, `level`, and `connection`.
"""
def connection_disposed(self, dic):
"""Called when a connection is closed.
``dic['message']``: A reason for closing the connection, if any.
Fields: `pool_id`, `level`, `connection`, and `message`.
"""
def connection_recycled(self, dic):
"""Called when a connection is recycled.
``dic['old_conn']``: The :class:`ConnectionWrapper` that is being recycled
``dic['new_conn']``: The :class:`ConnectionWrapper` that is replacing it
Fields: `pool_id`, `level`, `old_conn`, and `new_conn`.
"""
def connection_failed(self, dic):
"""Called when a connection to a single server fails.
``dic['server']``: The server the connection was made to.
Fields: `pool_id`, `level`, `error`, `server`, and `connection`.
"""
def server_list_obtained(self, dic):
"""Called when the pool finalizes its server list.
``dic['server_list']``: The randomly permuted list of servers that the
pool will choose from.
Fields: `pool_id`, `level`, and `server_list`.
"""
def pool_disposed(self, dic):
"""Called when a pool is disposed.
Fields: `pool_id`, and `level`.
"""
def pool_at_max(self, dic):
"""
Called when an attempt is made to get a new connection from the
pool, but the pool is already at its max size.
``dic['pool_max']``: The max number of connections the pool will
keep open at one time.
Fields: `pool_id`, `pool_max`, and `level`.
"""
class AllServersUnavailable(Exception):
"""Raised when none of the servers given to a pool can be connected to."""
class NoConnectionAvailable(Exception):
"""Raised when there are no connections left in a pool."""
class MaximumRetryException(Exception):
"""
Raised when a :class:`ConnectionWrapper` has retried the maximum
allowed times before being returned to the pool; note that all of
the retries do not have to be on the same operation.
"""
class InvalidRequestError(Exception):
"""
Pycassa was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
| |
'''
Created on Apr 23, 2017
@author: jamie
'''
from PyQt5.QtWidgets import (QWidget, QGridLayout, QLabel, QLineEdit,
QPushButton, QGroupBox, QSplitter, QCheckBox,
QHBoxLayout, QVBoxLayout, QMessageBox)
from PyQt5.QtGui import (QDoubleValidator, QTextBlock)
from PyQt5.QtCore import Qt
# ==============================================================================
class PrMainWidget(QWidget):
hbox = None
obsGroupBox = None
tgtGroupBox = None
bottomGroupBox = None
splitter1 = None
splitter2 = None
obsGrid = None
obsAltitudeLabel = None
obsHeadingLabel = None
obsMachLabel = None
obsLoftAngleLabel = None
obsAltitudeEdit = None
obsHeadingEdit = None
obsMachEdit = None
obsLoftCheckBox = None
obsLoftAngleEdit = None
bottomVbox = None
bottomHbox = None
bottomRunButton = None
runMessageBox = None
logBox = None
# ==============================================================================
def __init__(self):
super().__init__()
self.initUI()
# ==============================================================================
def initUI(self):
self.hbox = QHBoxLayout(self)
self.setupObserverWidgets()
self.setupTargetWidgets()
self.setupBottomWidgets()
self.splitter1 = QSplitter(Qt.Horizontal)
self.splitter1.addWidget(self.obsGroupBox)
self.splitter1.addWidget(self.tgtGroupBox)
self.splitter2 = QSplitter(Qt.Vertical)
self.splitter2.addWidget(self.splitter1)
self.splitter2.addWidget(self.bottomGroupBox)
self.hbox.addWidget(self.splitter2)
self.setLayout(self.hbox)
# ==============================================================================
def setupObserverWidgets(self):
self.obsGroupBox = QGroupBox('Observer:', self)
self.obsAltitudeLabel = QLabel('Altitude (m)')
self.obsHeadingLabel = QLabel('Heading (0<sup>o</sup> = North) <br /> '
'+=cw (deg)')
self.obsMachLabel = QLabel('Initial Mach #')
self.obsLoftAngleLabel = QLabel('Loft Angle (deg)')
self.obsLoftAngleLabel.setEnabled(False)
self.obsAltitudeEdit = QLineEdit()
self.obsAltitudeEdit.setValidator(QDoubleValidator(100.0, 75000.0, 2))
self.obsAltitudeEdit.setText('6000.0')
self.obsHeadingEdit = QLineEdit()
self.obsHeadingEdit.setValidator(QDoubleValidator(-180.0, 180.0, 2))
self.obsHeadingEdit.setText('0.0')
self.obsMachEdit = QLineEdit()
self.obsMachEdit.setValidator(QDoubleValidator(0.5, 2.0, 2))
self.obsMachEdit.setText('0.9')
self.obsLoftCheckBox = QCheckBox()
self.obsLoftCheckBox.setText('Do Loft?')
self.obsLoftCheckBox.stateChanged.connect(self.doObsLoftCheckHandler)
self.obsLoftAngleEdit = QLineEdit()
self.obsLoftAngleEdit.setValidator(QDoubleValidator(0, 75, 2))
self.obsLoftAngleEdit.setText('0.0')
self.obsLoftAngleEdit.setEnabled(False)
self.obsGrid = QGridLayout()
self.obsGrid.setSpacing(10)
self.obsGrid.addWidget(self.obsAltitudeLabel, 1, 0)
self.obsGrid.addWidget(self.obsAltitudeEdit, 1, 1)
self.obsGrid.addWidget(self.obsHeadingLabel, 2, 0)
self.obsGrid.addWidget(self.obsHeadingEdit, 2, 1)
self.obsGrid.addWidget(self.obsMachLabel, 3, 0)
self.obsGrid.addWidget(self.obsMachEdit, 3, 1)
self.obsGrid.addWidget(self.obsLoftCheckBox, 4, 0, 1, 2)
self.obsGrid.addWidget(self.obsLoftAngleLabel, 5, 0)
self.obsGrid.addWidget(self.obsLoftAngleEdit, 5, 1)
self.obsGroupBox.setLayout(self.obsGrid)
# ==============================================================================
def setupTargetWidgets(self):
self.tgtGroupBox = QGroupBox('Target:', self)
self.tgtAltitudeLabel = QLabel('Altitude (m)')
self.tgtHeadingLabel = QLabel('Heading (0<sup>o</sup> = North) <br /> '
'+=cw (deg)')
self.tgtMachLabel = QLabel('Initial Mach #')
self.tgtLoftAngleLabel = QLabel('Loft Angle (deg)')
self.tgtLoftAngleLabel.setEnabled(False)
self.tgtAltitudeEdit = QLineEdit()
self.tgtAltitudeEdit.setValidator(QDoubleValidator(100.0, 75000.0, 2))
self.tgtAltitudeEdit.setText('6000.0')
self.tgtHeadingEdit = QLineEdit()
self.tgtHeadingEdit.setValidator(QDoubleValidator(-180.0, 180.0, 2))
self.tgtHeadingEdit.setText('0.0')
self.tgtMachEdit = QLineEdit()
self.tgtMachEdit.setValidator(QDoubleValidator(0.5, 2.0, 2))
self.tgtMachEdit.setText('0.9')
self.tgtLoftCheckBox = QCheckBox()
self.tgtLoftCheckBox.setText('Do Loft?')
self.tgtLoftCheckBox.stateChanged.connect(self.doTgtLoftCheckHandler)
self.tgtLoftAngleEdit = QLineEdit()
self.tgtLoftAngleEdit.setValidator(QDoubleValidator(0, 75, 2))
self.tgtLoftAngleEdit.setText('0.0')
self.tgtLoftAngleEdit.setEnabled(False)
self.tgtGrid = QGridLayout()
self.tgtGrid.setSpacing(10)
self.tgtGrid.addWidget(self.tgtAltitudeLabel, 1, 0)
self.tgtGrid.addWidget(self.tgtAltitudeEdit, 1, 1)
self.tgtGrid.addWidget(self.tgtHeadingLabel, 2, 0)
self.tgtGrid.addWidget(self.tgtHeadingEdit, 2, 1)
self.tgtGrid.addWidget(self.tgtMachLabel, 3, 0)
self.tgtGrid.addWidget(self.tgtMachEdit, 3, 1)
self.tgtGrid.addWidget(self.tgtLoftCheckBox, 4, 0, 1, 2)
self.tgtGrid.addWidget(self.tgtLoftAngleLabel, 5, 0)
self.tgtGrid.addWidget(self.tgtLoftAngleEdit, 5, 1)
self.tgtGroupBox.setLayout(self.tgtGrid)
# ==============================================================================
def setupBottomWidgets(self):
self.bottomGroupBox = QGroupBox(self)
self.bottomRunButton = QPushButton("Run")
self.bottomRunButton.clicked.connect(self.doBottomRunButtonHandler)
self.logBox = QTextBlock() # not sure about this datatype...
self.bottomHbox = QHBoxLayout()
self.bottomHbox.addStretch(1)
self.bottomHbox.addWidget(self.bottomRunButton)
self.bottomVbox = QVBoxLayout()
self.bottomVbox.addStretch(1)
self.bottomVbox.addLayout(self.bottomHbox)
self.bottomGroupBox.setLayout(self.bottomVbox)
# ==============================================================================
def doObsLoftCheckHandler(self, state):
if state == Qt.Checked:
self.obsLoftAngleEdit.setEnabled(True)
self.obsLoftAngleLabel.setEnabled(True)
else:
self.obsLoftAngleEdit.setEnabled(False)
self.obsLoftAngleLabel.setEnabled(False)
# ==============================================================================
def doTgtLoftCheckHandler(self, state):
if state == Qt.Checked:
self.tgtLoftAngleEdit.setEnabled(True)
self.tgtLoftAngleLabel.setEnabled(True)
else:
self.tgtLoftAngleEdit.setEnabled(False)
self.tgtLoftAngleLabel.setEnabled(False)
# ==============================================================================
def doBottomRunButtonHandler(self, state):
self.runMessageBox = QMessageBox(self)
self.runMessageBox.setWindowTitle('Running Log')
self.runMessageBox.show()
| |
from sqlalchemy.testing import eq_
from sqlalchemy.orm import mapper, relationship, create_session, \
clear_mappers, sessionmaker, aliased,\
Session, subqueryload
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.orm.session import _sessions
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy import MetaData, Integer, String, ForeignKey, \
Unicode, select
import sqlalchemy as sa
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.sql import column
from sqlalchemy.processors import to_decimal_processor_factory, \
to_unicode_processor_factory
from sqlalchemy.testing.util import gc_collect
import decimal
import gc
from sqlalchemy.testing import fixtures
import weakref
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
class ASub(A):
pass
def profile_memory(maxtimes=50):
def decorate(func):
# run the test N times. if length of gc.get_objects()
# keeps growing, assert false
def get_objects_skipping_sqlite_issue():
# pysqlite keeps adding weakref objects which only
# get reset after 220 iterations. We'd like to keep these
# tests under 50 iterations and ideally about ten, so
# just filter them out so that we get a "flatline" more quickly.
if testing.against("sqlite+pysqlite"):
return [o for o in gc.get_objects()
if not isinstance(o, weakref.ref)]
else:
return gc.get_objects()
def profile(*args):
gc_collect()
samples = []
success = False
for y in range(maxtimes // 5):
for x in range(5):
func(*args)
gc_collect()
samples.append(len(get_objects_skipping_sqlite_issue()))
print("sample gc sizes:", samples)
assert len(_sessions) == 0
# check for "flatline" - size is constant for
# 5 iterations
for x in samples[-4:]:
if x != samples[-5]:
break
else:
success = True
if not success:
# object count is bigger than when it started
if samples[-1] > samples[0]:
for x in samples[1:-2]:
# see if a spike bigger than the endpoint exists
if x > samples[-1]:
success = True
break
else:
success = True
# if we saw count go down or flatline,
# we're done
if success:
break
# else keep trying until maxtimes
else:
assert False, repr(samples)
return profile
return decorate
def assert_no_mappers():
clear_mappers()
gc_collect()
assert len(_mapper_registry) == 0
class EnsureZeroed(fixtures.ORMTest):
def setup(self):
_sessions.clear()
_mapper_registry.clear()
self.engine = engines.testing_engine(options={"use_reaper": False})
class MemUsageTest(EnsureZeroed):
__requires__ = 'cpython',
__backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo(object):
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData(self.engine)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
metadata.create_all()
m1 = mapper(A, table1, properties={
"bs":relationship(B, cascade="all, delete",
order_by=table2.c.col1)},
order_by=table1.c.col1)
m2 = mapper(B, table2)
m3 = mapper(A, table1, non_primary=True)
@profile_memory()
def go():
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
go()
metadata.drop_all()
del m1, m2, m3
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select([1]))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.crashes('sqlite', ':memory: connection not suitable here')
def test_orm_many_engines(self):
metadata = MetaData(self.engine)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
metadata.create_all()
m1 = mapper(A, table1, properties={
"bs":relationship(B, cascade="all, delete",
order_by=table2.c.col1)},
order_by=table1.c.col1,
_compiled_cache_size=10
)
m2 = mapper(B, table2,
_compiled_cache_size=10
)
m3 = mapper(A, table1, non_primary=True)
@profile_memory()
def go():
engine = engines.testing_engine(
options={'logging_name':'FOO',
'pool_logging_name':'BAR',
'use_reaper':False}
)
sess = create_session(bind=engine)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
engine.dispose()
go()
metadata.drop_all()
del m1, m2, m3
assert_no_mappers()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer, ),
(types.String, ),
(types.PickleType, ),
(types.Enum, 'a', 'b', 'c'),
(sqlite.DATETIME, ),
(postgresql.ENUM, 'a', 'b', 'c'),
(types.Interval, ),
(postgresql.INTERVAL, ),
(mysql.VARCHAR, ),
):
@profile_memory()
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
go()
assert not eng.dialect._type_memos
def test_many_updates(self):
metadata = MetaData(self.engine)
wide_table = Table('t', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
*[Column('col%d' % i, Integer) for i in range(10)]
)
class Wide(object):
pass
mapper(Wide, wide_table, _compiled_cache_size=10)
metadata.create_all()
session = create_session()
w1 = Wide()
session.add(w1)
session.flush()
session.close()
del session
counter = [1]
@profile_memory()
def go():
session = create_session()
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, 'col%d' % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.flush()
session.close()
counter[0] += 1
try:
go()
finally:
metadata.drop_all()
@testing.crashes('mysql+cymysql', 'blocking')
def test_unicode_warnings(self):
metadata = MetaData(self.engine)
table1 = Table('mytable', metadata, Column('col1', Integer,
primary_key=True,
test_needs_autoincrement=True), Column('col2',
Unicode(30)))
metadata.create_all()
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
self.engine.execute(table1.select().where(table1.c.col2
== 'foo%d' % i[0]))
i[0] += 1
try:
go()
finally:
metadata.drop_all()
def test_mapper_reset(self):
metadata = MetaData(self.engine)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
@profile_memory()
def go():
m1 = mapper(A, table1, properties={
"bs":relationship(B, order_by=table2.c.col1)
})
m2 = mapper(B, table2)
m3 = mapper(A, table1, non_primary=True)
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData(self.engine)
a = Table("a", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('bid', Integer, ForeignKey('b.id')),
Column('type', String(30))
)
asub = Table("asub", metadata,
Column('id', Integer, ForeignKey('a.id'),
primary_key=True),
Column('data', String(30)))
b = Table("b", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
)
mapper(A, a, polymorphic_identity='a',
polymorphic_on=a.c.type)
mapper(ASub, asub, inherits=A,polymorphic_identity='asub')
m1 = mapper(B, b, properties={
'as_':relationship(A)
})
metadata.create_all()
sess = Session()
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session()
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
try:
go()
finally:
metadata.drop_all()
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table("a", metadata,
Column('id', Integer, primary_key=True),
Column('foo', Integer),
Column('bar', Integer)
)
m1 = mapper(A, a)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.foo][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData(self.engine)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30))
)
table2 = Table("mytable2", metadata,
Column('col1', Integer, ForeignKey('mytable.col1'),
primary_key=True, test_needs_autoincrement=True),
Column('col3', String(30)),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(A, table1,
polymorphic_on=table1.c.col2,
polymorphic_identity='a')
mapper(B, table2,
inherits=A,
polymorphic_identity='b')
sess = create_session()
a1 = A()
a2 = A()
b1 = B(col3='b1')
b2 = B(col3='b2')
for x in [a1,a2,b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(), A(), B(col3='b1'), B(col3='b2')
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData(self.engine)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30))
)
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
)
table3 = Table('t1tot2', metadata,
Column('t1', Integer, ForeignKey('mytable.col1')),
Column('t2', Integer, ForeignKey('mytable2.col1')),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, table1, properties={
'bs':relationship(B, secondary=table3,
backref='as', order_by=table3.c.t1)
})
mapper(B, table2)
sess = create_session()
a1 = A(col2='a1')
a2 = A(col2='a2')
b1 = B(col2='b1')
b2 = B(col2='b2')
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1,a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(bs=[B(col2='b1')]), A(bs=[B(col2='b2')])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
@testing.provide_metadata
def test_key_fallback_result(self):
e = self.engine
m = self.metadata
t = Table('t', m, Column('x', Integer), Column('y', Integer))
m.create_all(e)
e.execute(t.insert(), {"x":1, "y":1})
@profile_memory()
def go():
r = e.execute(t.alias().select())
for row in r:
row[t.c.x]
go()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table('t1', m1, Column('id', Integer, primary_key=True))
t2 = Table(
't2', m1, Column('id', Integer, primary_key=True),
Column('t1id', ForeignKey('t1.id')))
class T1(object):
pass
t1_mapper = mapper(T1, t1)
@testing.emits_warning()
@profile_memory()
def go():
class T2(object):
pass
t2_mapper = mapper(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = Session()
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behvior
# in pysqlite itself. background at:
# http://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes('mysql+cymysql', 'blocking')
def test_join_cache(self):
metadata = MetaData(self.engine)
table1 = Table('table1', metadata, Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True), Column('data',
String(30)))
table2 = Table('table2', metadata, Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True), Column('data',
String(30)), Column('t1id', Integer,
ForeignKey('table1.id')))
class Foo(object):
pass
class Bar(object):
pass
mapper(Foo, table1, properties={'bars'
: relationship(mapper(Bar, table2))})
metadata.create_all()
session = sessionmaker()
@profile_memory()
def go():
s = table2.select()
sess = session()
sess.query(Foo).join((s, Foo.bars)).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all()
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column('x'), sa.Integer)
@profile_memory()
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory()
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory()
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_UnicodeResultProcessor_init(self):
@profile_memory()
def go():
to_unicode_processor_factory('utf8')
go()
| |
#!/usr/bin/env python
'''
Use matplotlib to generate performance charts
Copyright 2011 Joel Martin
Licensed under MPL-2.0 (see docs/LICENSE.MPL-2.0)
'''
# a bar plot with errorbars
import sys, json, pprint
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def usage():
print "%s json_file level1 level2 level3 [legend_height]\n\n" % sys.argv[0]
print "Description:\n"
print "level1, level2, and level3 are one each of the following:\n";
print " select=ITEM - select only ITEM at this level";
print " bar - each item on this level becomes a graph bar";
print " group - items on this level become groups of bars";
print "\n";
print "json_file is a file containing json data in the following format:\n"
print ' {';
print ' "conf": {';
print ' "order_l1": [';
print ' "level1_label1",';
print ' "level1_label2",';
print ' ...';
print ' ],';
print ' "order_l2": [';
print ' "level2_label1",';
print ' "level2_label2",';
print ' ...';
print ' ],';
print ' "order_l3": [';
print ' "level3_label1",';
print ' "level3_label2",';
print ' ...';
print ' ]';
print ' },';
print ' "stats": {';
print ' "level1_label1": {';
print ' "level2_label1": {';
print ' "level3_label1": [val1, val2, val3],';
print ' "level3_label2": [val1, val2, val3],';
print ' ...';
print ' },';
print ' "level2_label2": {';
print ' ...';
print ' },';
print ' },';
print ' "level1_label2": {';
print ' ...';
print ' },';
print ' ...';
print ' },';
print ' }';
sys.exit(2)
def error(msg):
print msg
sys.exit(1)
#colors = ['#ff0000', '#0863e9', '#00f200', '#ffa100',
# '#800000', '#805100', '#013075', '#007900']
colors = ['#ff0000', '#00ff00', '#0000ff',
'#dddd00', '#dd00dd', '#00dddd',
'#dd6622', '#dd2266', '#66dd22',
'#8844dd', '#44dd88', '#4488dd']
if len(sys.argv) < 5:
usage()
filename = sys.argv[1]
L1 = sys.argv[2]
L2 = sys.argv[3]
L3 = sys.argv[4]
if len(sys.argv) > 5:
legendHeight = float(sys.argv[5])
else:
legendHeight = 0.75
# Load the JSON data from the file
data = json.loads(file(filename).read())
conf = data['conf']
stats = data['stats']
# Sanity check data hierarchy
if len(conf['order_l1']) != len(stats.keys()):
error("conf.order_l1 does not match stats level 1")
for l1 in stats.keys():
if len(conf['order_l2']) != len(stats[l1].keys()):
error("conf.order_l2 does not match stats level 2 for %s" % l1)
if conf['order_l1'].count(l1) < 1:
error("%s not found in conf.order_l1" % l1)
for l2 in stats[l1].keys():
if len(conf['order_l3']) != len(stats[l1][l2].keys()):
error("conf.order_l3 does not match stats level 3")
if conf['order_l2'].count(l2) < 1:
error("%s not found in conf.order_l2" % l2)
for l3 in stats[l1][l2].keys():
if conf['order_l3'].count(l3) < 1:
error("%s not found in conf.order_l3" % l3)
#
# Generate the data based on the level specifications
#
bar_labels = None
group_labels = None
bar_vals = []
bar_sdvs = []
if L3.startswith("select="):
select_label = l3 = L3.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l2']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l2 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L2.startswith("select="):
select_label = l2 = L2.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L1.startswith("select="):
select_label = l1 = L1.split("=")[1]
bar_labels = conf['order_l2']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l2 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
else:
usage()
# If group is before bar then flip (zip) the data
if [L1, L2, L3].index("group") < [L1, L2, L3].index("bar"):
bar_labels, group_labels = group_labels, bar_labels
bar_vals = zip(*bar_vals)
bar_sdvs = zip(*bar_sdvs)
print "bar_vals:", bar_vals
#
# Now render the bar graph
#
ind = np.arange(len(group_labels)) # the x locations for the groups
width = 0.8 * (1.0/len(bar_labels)) # the width of the bars
fig = plt.figure(figsize=(10,6), dpi=80)
plot = fig.add_subplot(1, 1, 1)
rects = []
for i in range(len(bar_vals)):
rects.append(plot.bar(ind+width*i, bar_vals[i], width, color=colors[i],
yerr=bar_sdvs[i], align='center'))
# add some
plot.set_ylabel('Milliseconds (less is better)')
plot.set_title("Javascript array test: %s" % select_label)
plot.set_xticks(ind+width)
plot.set_xticklabels( group_labels )
fontP = FontProperties()
fontP.set_size('small')
plot.legend( [r[0] for r in rects], bar_labels, prop=fontP,
loc = 'center right', bbox_to_anchor = (1.0, legendHeight))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
if np.isnan(height):
height = 0.0
plot.text(rect.get_x()+rect.get_width()/2., height+20, '%d'%int(height),
ha='center', va='bottom', size='7')
for rect in rects:
autolabel(rect)
# Adjust axis sizes
axis = list(plot.axis())
axis[0] = -width # Make sure left side has enough for bar
#axis[1] = axis[1] * 1.20 # Add 20% to the right to make sure it fits
axis[2] = 0 # Make y-axis start at 0
axis[3] = axis[3] * 1.10 # Add 10% to the top
plot.axis(axis)
plt.show()
| |
#!/usr/bin/python
# Nicolas Seriot
# 2011-01-06 -> 2011-12-16
# https://github.com/nst/objc_dep/
"""
Input: path of an Objective-C project
Output: import dependencies Graphviz format
Typical usage: $ python objc_dep.py /path/to/project [-x regex] [-i subfolder [subfolder ...]] > graph.dot
The .dot file can be opened with Graphviz or OmniGraffle.
- red arrows: .pch imports
- blue arrows: two ways imports
"""
import sys
import os
from sets import Set
import re
from os.path import basename
import argparse
regex_import = re.compile("^#(import|include) \"(?P<filename>\S*)\.h")
def gen_filenames_imported_in_file(path, regex_exclude):
for line in open(path):
results = re.search(regex_import, line)
if results:
filename = results.group('filename')
if regex_exclude is not None and regex_exclude.search(filename):
continue
yield filename
def dependencies_in_project(path, ext, exclude, ignore):
d = {}
regex_exclude = None
if exclude:
regex_exclude = re.compile(exclude)
for root, dirs, files in os.walk(path):
if ignore:
for subfolder in ignore:
if subfolder in dirs:
dirs.remove(subfolder)
objc_files = (f for f in files if f.endswith(ext))
for f in objc_files:
filename = os.path.splitext(f)[0]
if regex_exclude is not None and regex_exclude.search(filename):
continue
if filename not in d:
d[filename] = Set()
path = os.path.join(root, f)
for imported_filename in gen_filenames_imported_in_file(path, regex_exclude):
if imported_filename != filename and '+' not in imported_filename and '+' not in filename:
d[filename].add(imported_filename)
return d
def dependencies_in_project_with_file_extensions(path, exts, exclude, ignore):
d = {}
for ext in exts:
d2 = dependencies_in_project(path, ext, exclude, ignore)
for (k, v) in d2.iteritems():
if not k in d:
d[k] = Set()
d[k] = d[k].union(v)
return d
def two_ways_dependencies(d):
two_ways = Set()
# d is {'a1':[b1, b2], 'a2':[b1, b3, b4], ...}
for a, l in d.iteritems():
for b in l:
if b in d and a in d[b]:
if (a, b) in two_ways or (b, a) in two_ways:
continue
if a != b:
two_ways.add((a, b))
return two_ways
def untraversed_files(d):
dead_ends = Set()
for file_a, file_a_dependencies in d.iteritems():
for file_b in file_a_dependencies:
if not file_b in dead_ends and not file_b in d:
dead_ends.add(file_b)
return dead_ends
def category_files(d):
d2 = {}
l = []
for k, v in d.iteritems():
if not v and '+' in k:
l.append(k)
else:
d2[k] = v
return l, d2
def referenced_classes_from_dict(d):
d2 = {}
for k, deps in d.iteritems():
for x in deps:
d2.setdefault(x, Set())
d2[x].add(k)
return d2
def print_frequencies_chart(d):
lengths = map(lambda x:len(x), d.itervalues())
if not lengths: return
max_length = max(lengths)
for i in range(0, max_length+1):
s = "%2d | %s\n" % (i, '*'*lengths.count(i))
sys.stderr.write(s)
sys.stderr.write("\n")
l = [Set() for i in range(max_length+1)]
for k, v in d.iteritems():
l[len(v)].add(k)
for i in range(0, max_length+1):
s = "%2d | %s\n" % (i, ", ".join(sorted(list(l[i]))))
sys.stderr.write(s)
def dependencies_in_dot_format(path, exclude, ignore):
d = dependencies_in_project_with_file_extensions(path, ['.h', '.hpp', '.m', '.mm', '.c', '.cc', '.cpp'], exclude, ignore)
two_ways_set = two_ways_dependencies(d)
untraversed_set = untraversed_files(d)
category_list, d = category_files(d)
pch_set = dependencies_in_project(path, '.pch', exclude, ignore)
#
sys.stderr.write("# number of imports\n\n")
print_frequencies_chart(d)
sys.stderr.write("\n# times the class is imported\n\n")
d2 = referenced_classes_from_dict(d)
print_frequencies_chart(d2)
#
l = []
l.append("digraph G {")
l.append("\tnode [shape=box];")
for k, deps in d.iteritems():
if deps:
deps.discard(k)
if len(deps) == 0:
l.append("\t\"%s\" -> {};" % (k))
for k2 in deps:
if not ((k, k2) in two_ways_set or (k2, k) in two_ways_set):
l.append("\t\"%s\" -> \"%s\";" % (k, k2))
l.append("\t")
for (k, v) in pch_set.iteritems():
l.append("\t\"%s\" [color=red];" % k)
for x in v:
l.append("\t\"%s\" -> \"%s\" [color=red];" % (k, x))
l.append("\t")
l.append("\tedge [color=blue, dir=both];")
for (k, k2) in two_ways_set:
l.append("\t\"%s\" -> \"%s\";" % (k, k2))
for k in untraversed_set:
l.append("\t\"%s\" [color=gray, style=dashed, fontcolor=gray]" % k)
if category_list:
l.append("\t")
l.append("\tedge [color=black];")
l.append("\tnode [shape=plaintext];")
l.append("\t\"Categories\" [label=\"%s\"];" % "\\n".join(category_list))
if ignore:
l.append("\t")
l.append("\tnode [shape=box, color=blue];")
l.append("\t\"Ignored\" [label=\"%s\"];" % "\\n".join(ignore))
l.append("}\n")
return '\n'.join(l)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("project_path", help="path to folder hierarchy containing Objective-C files")
parser.add_argument("-x", "--exclude", nargs='?', default='' ,help="regular expression of substrings to exclude from module names")
parser.add_argument("-i", "--ignore", nargs='*', help="list of subfolder names to ignore")
args= parser.parse_args()
print dependencies_in_dot_format(args.project_path, args.exclude, args.ignore)
if __name__=='__main__':
main()
| |
import sqlite3
import time
import errno
from json import dumps
import logging
from abc import ABCMeta, abstractmethod
from genericpath import exists
from os import makedirs, sep
logger = logging.getLogger(__name__)
_POSTS_CREATE_TABLE = '''CREATE TABLE IF NOT EXISTS posts(
no INTEGER PRIMARY KEY,
resto INTEGER,
sticky INTEGER,
closed INTEGER,
archived INTEGER,
now TEXT,
time INTEGER,
name TEXT,
trip TEXT,
id TEXT,
capcode TEXT,
country TEXT,
country_name TEXT,
sub TEXT,
com TEXT,
tim INTEGER,
filename TEXT,
ext TEXT,
fsize INTEGER,
md5 TEXT,
w INTEGER,
h INTEGER,
tn_w INTEGER,
tn_h INTEGER,
filedeleted INTEGER,
spoiler INTEGER,
custom_spoiler INTEGER,
omitted_posts INTEGER,
omitted_images INTEGER,
replies INTEGER,
images INTEGER,
bumplimit INTEGER,
imagelimit INTEGER,
capcode_replies TEXT,
last_modified INTEGER,
tag TEXT,
semantic_url TEXT
);'''
_POSTS_INSERT = 'INSERT INTO posts VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'
def _get_date_string():
return time.strftime('%Y%m%d')
class Handler(object):
__metaclass__ = ABCMeta
@abstractmethod
def post(self, thread_id, new_post):
"""Handle a new post.
thread_id -- the thread's ID
new_post -- the new post dict that has just arrived. Check 4chan API (https://github.com/4chan/4chan-API)
for full possible contents, but most notable keys are
no : post number
resto : number of post that this post is a response to
time : a UNIX timestamp of post's time
com : the text, contains escaped HTML
"""
raise NotImplementedError('Implement post(thread_id, new_post) in subclass')
@abstractmethod
def pruned(self, thread_id):
"""Handles thread pruning.
thread_id -- the thread ID that was pruned from 4chan
"""
raise NotImplementedError('Implement pruned(thread_id) in subclass')
@abstractmethod
def img(self, thread_id, filename, data):
"""Handles image downloads.
thread_id -- ID of thread in which image was posted
filename -- the image's filename with extensions
data -- bytes, image content
"""
raise NotImplementedError('Implement img(thread_id, filename, data) in subclass')
@abstractmethod
def download_img(self, thread_id, filename):
"""Checks whether the image needs to be downloaded.
Needed to avoid downloading the same image multiple times. Prime use-case is resumed operation.
thread_id -- the thread ID for thread containing the image
filename -- the image's file name
"""
raise NotImplementedError('Implement download_img(thread_id, filename) in subclass')
class SQLitePersister(object):
def __init__(self, db_path):
self.connection = sqlite3.connect(db_path)
self.cursor = self.connection.cursor()
self._init_db()
def _init_db(self):
self.cursor.execute(_POSTS_CREATE_TABLE)
self.connection.commit()
@staticmethod
def _post_to_data_tuple(post):
return (
post.get('no'),
post.get('resto'),
post.get('sticky'),
post.get('closed'),
post.get('archived'),
post.get('now'),
post.get('time'),
post.get('name'),
post.get('trip'),
post.get('id'),
post.get('capcode'),
post.get('country'),
post.get('country_name'),
post.get('sub'),
post.get('com'),
post.get('tim'),
post.get('filename'),
post.get('ext'),
post.get('fsize'),
post.get('md5'),
post.get('w'),
post.get('h'),
post.get('tn_w'),
post.get('tn_h'),
post.get('filedeleted'),
post.get('spoiler'),
post.get('custom_spoiler'),
post.get('omitted_posts'),
post.get('omitted_images'),
post.get('replies'),
post.get('images'),
post.get('bumplimit'),
post.get('imagelimit'),
dumps(post['capcode_replies']) if post.get('capcode_replies') else None,
post.get('last_modified'),
post.get('tag'),
post.get('semantic_url'),
)
def persist(self, posts):
_posts = None
if isinstance(posts, dict):
_posts = [posts]
else:
try:
_ = iter(posts)
except TypeError:
_posts = [posts]
else:
_posts = posts
data = map(self._post_to_data_tuple, _posts)
try:
self.cursor.executemany(_POSTS_INSERT, data)
self.connection.commit()
except sqlite3.IntegrityError:
pass
except sqlite3.Error:
logger.exception('SQLite error')
def close(self):
self.connection.commit()
self.connection.close()
class RotatingSQLitePostPersister(object):
def __init__(self, root_dir):
if root_dir.endswith(sep):
root_dir = root_dir[:len(sep)]
try:
makedirs(root_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.curr_date_str = _get_date_string()
self.root_dir = root_dir
self.db = SQLitePersister(self._make_db_path())
def _make_db_path(self):
return '{root}{sep}{date_str}.db'.format(root=self.root_dir, sep=sep, date_str=self.curr_date_str)
def _rotate_db_if_new_day(self):
date_str = _get_date_string()
if self.curr_date_str != date_str:
self.curr_date_str = date_str
self.db.close()
self.db = SQLitePersister(self._make_db_path())
def persist_post(self, post):
self._rotate_db_if_new_day()
self.db.persist(post)
class SQLiteHandler(Handler):
"""Handler that writes posts to an SQLite DB. Doesn't handle images.
Posts are persisted as they come in.
"""
def __init__(self, root_dir):
self.persister = RotatingSQLitePostPersister(root_dir)
def pruned(self, thread_id):
pass
def post(self, thread_id, new_post):
self.persister.persist_post(new_post)
def download_img(self, thread_id, filename):
raise TypeError('SQLiteHandler does not support images')
def img(self, thread_id, filename, data):
raise TypeError('SQLiteHandler does not support images')
class FileHandler(Handler):
"""Handler that writes things to the file system.
Threads are purged to disk when they get pruned from 4chan and held in-memory before they are.
The directory structure is root/thread_id/[thread_id.json | thread images].
"""
def __init__(self, file_root):
"""Makes a new FileHandler
file_root -- the directory to which data will be purged. If it does not exist, it will be created.
"""
if file_root.endswith(sep):
file_root = file_root[:len(sep)]
self._file_root = file_root
try:
makedirs(self._file_root)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
self._active_threads = dict()
self._thread_roots = dict()
def _get_thread_root(self, thread_id):
if thread_id not in self._thread_roots:
return '{file_root}{sep}{date}{sep}{thread_id}'.format(
file_root=self._file_root,
thread_id=thread_id,
date=_get_date_string(),
sep=sep
)
else:
return self._thread_roots[thread_id]
def post(self, thread_id, new_post):
if thread_id not in self._active_threads:
thread_file_root = self._get_thread_root(thread_id)
try:
makedirs(thread_file_root)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
self._active_threads[thread_id] = []
self._thread_roots[thread_id] = thread_file_root
self._active_threads[thread_id].append(new_post)
def pruned(self, thread_id):
"""Handles thread pruning, writes content to JSON on disk.
thread_id -- the thread ID that was pruned from 4chan
"""
# this is necessary for the edge-case when the thread was pruned between seeing it in the main list and fetching
# it's content json
if thread_id in self._active_threads:
filename = '{thread_root}{sep}{thread_id}.json'.format(
thread_root=self._get_thread_root(thread_id),
thread_id=thread_id,
sep=sep
)
with open(filename, 'w') as f:
f.write(dumps({'posts': self._active_threads[thread_id]}))
del self._active_threads[thread_id]
del self._thread_roots[thread_id]
def img(self, thread_id, filename, data):
"""Handles image downloads, writes content to disk in thread_id directory.
thread_id -- ID of thread in which image was posted
filename -- the image's filename with extensions
data -- bytes, image content
"""
full_filename = '{thread_root}{sep}{img_filename}'.format(
thread_root=self._get_thread_root(thread_id),
img_filename=filename,
sep=sep
)
with open(full_filename, 'w') as f:
f.write(data)
def download_img(self, thread_id, filename):
"""Checks whether the image already exists.
Needed to avoid downloading the same image multiple times. Prime use-case is resumed operation.
thread_id -- the thread ID for thread containing the image
filename -- the image's file name
"""
return not exists('{file_root}{sep}{thread_id}{sep}{filename}'.format(
file_root=self._file_root,
thread_id=thread_id,
filename=filename,
sep=sep
))
| |
import isodate
from lxml import etree
from ulmo import util
def parse_site_values(content_io, namespace, query_isodate=None):
"""parses values out of a waterml file; content_io should be a file-like object"""
data_dict = {}
metadata_elements = [
# (element name, name of collection,
# key from element dict to use as for a key in the collections dict)
('censorCode', 'censor_codes', 'censor_code'),
('method', 'methods', 'id'),
('offset', 'offsets', 'id'),
('qualifier', 'qualifiers', 'id'),
('qualityControlLevel', 'quality_control_levels', 'id'),
('source', 'sources', 'id')
]
for (event, ele) in etree.iterparse(content_io):
if ele.tag == namespace + "timeSeries":
source_info_element = ele.find(namespace + 'sourceInfo')
site_info = _parse_site_info(source_info_element, namespace)
values_element = ele.find(namespace + 'values')
values = _parse_values(values_element, namespace)
var_element = ele.find(namespace + 'variable')
variable = _parse_variable(var_element, namespace)
code = variable['code']
if 'statistic' in variable:
code += ":" + variable['statistic']['code']
data_dict[code] = {
'site': site_info,
'values': values,
'variable': variable,
}
for tag, collection_name, key in metadata_elements:
underscored_tag = util.camel_to_underscore(tag)
collection = [
_scrub_prefix(_element_dict(element, namespace),
underscored_tag)
for element in values_element.findall(namespace + tag)
]
if len(filter(lambda x: len(x), collection)):
collection_dict = dict([
(item[key], item)
for item in collection
])
data_dict[code][collection_name] = collection_dict
if query_isodate:
data_dict[code]['last_refresh'] = query_isodate
return data_dict
def parse_site_infos(content_io, namespace, site_info_names):
"""parses information contained in site info elements out of a waterml file;
content_io should be a file-like object
"""
site_infos = {}
for site_info_name in site_info_names:
content_io.seek(0)
site_info_elements = [
element
for (event, element) in etree.iterparse(content_io)
if element.tag == namespace + site_info_name
]
site_info_dicts = [
_parse_site_info(site_info_element, namespace)
for site_info_element in site_info_elements
]
site_infos.update(dict([(d['code'], d) for d in site_info_dicts]))
return site_infos
def parse_sites(content_io, namespace):
"""parses information contained in site elements (including seriesCatalogs)
out of a waterml file; content_io should be a file-like object
"""
content_io.seek(0)
site_elements = [
ele for (event, ele) in etree.iterparse(content_io)
if ele.tag == namespace + 'site']
site_dicts = [
_parse_site(site_element, namespace)
for site_element in site_elements]
sites = dict(
[(site_dict['code'], site_dict)
for site_dict in site_dicts])
return sites
def parse_variables(content_io, namespace):
"""parses information contained in variables elements out of a waterml file;
content_io should be a file-like object
"""
content_io.seek(0)
variable_elements = [
element
for (event, element) in etree.iterparse(content_io)
if element.tag == namespace + 'variable'
]
variable_dicts = [
_parse_variable(variable_element, namespace)
for variable_element in variable_elements
]
variables = dict([
(variable_dict['code'], variable_dict)
for variable_dict in variable_dicts
])
return variables
def _element_dict(element, exclude_children=None, prepend_attributes=True):
"""converts an element to a dict representation with CamelCase tag names and
attributes converted to underscores; this is a generic converter for cases
where special parsing isn't necessary. In most cases you will want to
update with this dict. If prepend_element_name is True (default), then
attributes and children will be prepended with the parent element's tag
name.
Note: does not handle sibling elements
"""
if element is None:
return {}
if exclude_children is None:
exclude_children = []
element_dict = {}
element_name = util.camel_to_underscore(element.tag.split('}')[-1])
if len(element) == 0 and not element.text is None:
element_dict[element_name] = element.text
element_dict.update(dict([
(_element_dict_attribute_name(key, element_name,
prepend_element_name=prepend_attributes), value)
for key, value in element.attrib.iteritems()
if value.split(':')[0] not in ['xsd', 'xsi']
]))
for child in element.iterchildren():
if not child.tag.split('}')[-1] in exclude_children:
element_dict.update(_element_dict(child))
return element_dict
def _element_dict_attribute_name(attribute_name, element_name,
prepend_element_name=True):
attribute_only = util.camel_to_underscore(attribute_name.split('}')[-1])
if attribute_only.startswith(element_name) or not prepend_element_name:
return attribute_only
else:
return element_name + '_' + attribute_only
def _find_unit(element, namespace):
unit_element = element.find(namespace + 'unit')
if unit_element is None:
unit_element = element.find(namespace + 'units')
return unit_element
def _parse_datetime(datetime_str):
"""returns an iso 8601 datetime string; USGS returns fractions of a second
which are usually all 0s. ISO 8601 does not limit the number of decimal
places but we have to cut them off at some point
"""
#XXX: this could be sped up if need be
#XXX: also, we need to document that we are throwing away fractions of
# seconds
return isodate.datetime_isoformat(isodate.parse_datetime(datetime_str))
def _parse_geog_location(geog_location, namespace):
"""returns a dict representation of a geogLocation etree element"""
return_dict = {
'latitude': geog_location.find(namespace + 'latitude').text,
'longitude': geog_location.find(namespace + 'longitude').text,
}
srs = geog_location.attrib.get('srs')
if not srs is None:
return_dict['srs'] = srs
return return_dict
def _parse_method(method, namespace):
return _element_dict(method, namespace, prepend_attributes=False)
def _parse_series(series, namespace):
include_elements = [
'method',
'Method',
'source',
'Source',
'QualityControlLevel',
'qualityControlLevel',
'variableTimeInterval',
'valueCount',
]
series_dict = {}
variable_element = series.find(namespace + 'variable')
series_dict['variable'] = _parse_variable(variable_element, namespace)
for include_element in include_elements:
element = series.find(namespace + include_element)
if not element is None:
name = util.camel_to_underscore(element.tag)
element_dict = _scrub_prefix(_element_dict(element), name)
series_dict[name] = element_dict
return series_dict
def _parse_site(site, namespace):
"""returns a dict representation of a site given an etree object
representing a site element
"""
site_dict = _parse_site_info(site.find(namespace + 'siteInfo'), namespace)
series_elements = site.iter(namespace + 'series')
site_dict['series'] = [
_parse_series(series_element, namespace)
for series_element in series_elements
]
return site_dict
def _parse_site_info(site_info, namespace):
"""returns a dict representation of a site given an etree object
representing a siteInfo element
"""
site_code = site_info.find(namespace + "siteCode")
return_dict = {
'code': site_code.text,
'name': site_info.find(namespace + "siteName").text,
'network': site_code.attrib.get('network'),
}
agency = site_code.attrib.get('agencyCode')
if agency:
return_dict['agency'] = agency
geog_location = site_info.find(
namespace.join(["", "geoLocation/", "geogLocation"]))
if not geog_location is None:
return_dict['location'] = _parse_geog_location(geog_location, namespace)
timezone_info = site_info.find(namespace + "timeZoneInfo")
if not timezone_info is None:
return_dict['timezone_info'] = _parse_timezone_info(timezone_info, namespace)
elevation_m = site_info.find(namespace + 'elevation_m')
if not elevation_m is None:
return_dict['elevation_m'] = elevation_m.text
# WaterML 1.0 notes
notes = dict([
(util.camel_to_underscore(note.attrib['title'].replace(' ', '')),
note.text)
for note in site_info.findall(namespace + 'note')
])
if notes:
return_dict['notes'] = notes
# WaterML 1.1 siteProperties
site_properties = dict([
(util.camel_to_underscore(
site_property.attrib['name'].replace(' ', '')),
site_property.text)
for site_property in site_info.findall(namespace + 'siteProperty')
])
if site_properties:
return_dict['site_property'] = site_properties
return return_dict
def _parse_timezone_element(timezone_element):
"""returns a dict representation of a timezone etree element (either
defaultTimeZone or daylightSavingsTimeZone)
"""
return {
'abbreviation': timezone_element.attrib.get('zoneAbbreviation'),
'offset': timezone_element.attrib.get('zoneOffset'),
}
def _parse_timezone_info(timezone_info, namespace):
"""returns a dict representation of a timeZoneInfo etree element"""
return_dict = {}
uses_dst_str = timezone_info.attrib.get('siteUsesDaylightSavingsTime', "false")
if uses_dst_str == "true":
return_dict['uses_dst'] = True
else:
return_dict['uses_dst'] = False
dst_element = timezone_info.find(namespace + 'daylightSavingsTimeZone')
if not dst_element is None:
return_dict['dst_tz'] = _parse_timezone_element(dst_element)
return_dict['default_tz'] = _parse_timezone_element(
timezone_info.find(namespace + 'defaultTimeZone'))
return return_dict
def _parse_time_info(time_info_element, namespace):
"""returns a dict that represents a parsed WOF 1.0 timeSupport or WOF 1.1
timeScale element
"""
return_dict = {}
is_regular = time_info_element.attrib.get('isRegular')
if not is_regular is None:
if is_regular.lower() == 'true':
is_regular = True
elif is_regular.lower() == 'false':
is_regular = False
return_dict['is_regular'] = is_regular
if '1.0' in namespace:
interval_tag = 'timeInterval'
elif '1.1' in namespace:
interval_tag = 'timeSupport'
interval_element = time_info_element.find(namespace + interval_tag)
if not interval_element is None:
return_dict['interval'] = interval_element.text
unit_element = _find_unit(time_info_element, namespace)
if not unit_element is None:
return_dict['units'] = _parse_unit(unit_element, namespace)
return return_dict
def _parse_unit(unit_element, namespace):
"""returns a list of dicts that represent the values for a given unit or
units element
"""
unit_dict = _element_dict(unit_element)
tag_name = unit_element.tag.split('}')[-1]
return_dict = {}
if '1.0' in namespace:
return_dict['name'] = unit_element.text
keys = [
'abbreviation',
'code',
'name',
'type',
]
for key in keys:
dict_key = tag_name + '_' + key
if dict_key in unit_dict:
return_dict[key] = unit_dict[dict_key]
return return_dict
def _parse_value(value_element, namespace):
value_dict = _element_dict(value_element, prepend_attributes=False)
datetime = _parse_datetime(value_dict.pop('date_time'))
value_dict['datetime'] = datetime
return value_dict
def _parse_values(values_element, namespace):
"""returns a list of dicts that represent the values for a given etree
values element
"""
return [
_parse_value(value, namespace)
for value in values_element.findall(namespace + 'value')
]
def _parse_variable(variable_element, namespace):
"""returns a dict that represents a variable for a given etree variable element"""
return_dict = _element_dict(variable_element,
exclude_children=['options', 'timeScale', 'timeSupport', 'unit', 'units',
'variableCode', 'variableDescription', 'variableName'])
variable_code = variable_element.find(namespace + 'variableCode')
return_dict.update({
'code': variable_code.text,
'id': variable_code.attrib.get('variableID'),
'name': variable_element.find(namespace + 'variableName').text,
'vocabulary': variable_code.attrib.get('vocabulary'),
})
network = variable_code.attrib.get('network')
if network:
return_dict['network'] = network
statistic = variable_element.find(namespace + 'options/' + namespace + "option[@name='Statistic']")
if statistic is not None:
return_dict['statistic'] = {
'code': statistic.attrib.get('optionCode'),
'name': statistic.text,
}
if '1.0' in namespace:
time_info_name = 'timeSupport'
elif '1.1' in namespace:
time_info_name = 'timeScale'
time_info_element = variable_element.find(namespace + time_info_name)
if not time_info_element is None:
return_dict['time'] = _parse_time_info(time_info_element, namespace)
unit_element = _find_unit(variable_element, namespace)
if not unit_element is None:
return_dict['units'] = _parse_unit(unit_element, namespace)
variable_description = variable_element.find(
namespace + 'variableDescription')
if not variable_description is None:
return_dict['description'] = variable_description.text
return return_dict
def _scrub_prefix(element_dict, prefix):
"returns a dict with prefix scrubbed from the keys"
return dict([
(k.split(prefix + '_')[-1], v)
for k, v in element_dict.items()
])
| |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from ..elements import Attribute
from ..elements.elementbase import LogicElement
from ..tags.context import ContextElementBase, DataSetter
from .. import logic
from ..urlmapper import URLMapper, MissingURLParameter, RouteError
from ..context.expressiontime import ExpressionDateTime
from ..render import render_object
from .. import http
from ..http import StatusCode, standard_response, RespondWith
from .. import errors
from ..template.errors import MissingTemplateError
from ..template.rendercontainer import RenderContainer
from .. import trace
from .. import __version__
from ..content import Content
from ..tags.content import ContentElementMixin
from ..tools import get_return
from .. import syntax
from ..timezone import Timezone
from ..context.tools import to_expression, set_dynamic
from ..sites import LocaleProxy
from ..compat import text_type, itervalues, py2bytes, iteritems
from .. import db
from ..response import MoyaResponse
from ..request import ReplaceRequest
from ..urltools import urlencode as moya_urlencode
from .. import tools
from .. import pilot
from .. import namespaces
from webob import Response
from fs.path import splitext
from fs.errors import NoSysPath
import pytz
import sys
import logging
log = logging.getLogger("moya.runtime")
startup_log = logging.getLogger("moya.startup")
class Mountpoint(LogicElement):
"""
A [i]mountpoint[/i] defines a collection of URL *routes* which map incoming requests on to moya code.
An app will typically have at least one mountpoint with [c]name="main"[/c] (the default) which is used when the app is mounted. Moya will check each enclosed <url> in turn until it finds a route which matches.
An app may contain multiple mountpoints, which can be [i]mounted[/i] separately.
"""
class Help:
synopsis = "define a collection of url routes"
example = """
<mountpoint name="main">
<!-- should contain <url> tags -->
</mountpoint>
"""
name = Attribute(
"Mountpoint name unique to the application", default="main", map_to="_name"
)
preserve_attributes = ["urlmapper", "middleware", "name"]
def post_build(self, context):
self.urlmapper = URLMapper(self.libid)
self.middleware = dict(request=URLMapper(), response=URLMapper())
self.name = self._name(context)
class URL(LogicElement):
"""
Add a URL route to a [tag]mountpoint[/tag].
"""
class Help:
synopsis = """add a url to a mountpoint"""
mountpoint = Attribute("Name of the parent mount point", required=False)
mount = Attribute("Mountpoint to mount on this url", required=False, default=None)
route = Attribute("URL route", required=True)
view = Attribute("View element", required=False, map_to="target", example="#post")
methods = Attribute(
"A list of comma separated HTTP methods",
type="commalist",
evaldefault=True,
required=False,
default="GET,POST",
example="GET,POST",
map_to="_methods",
)
handler = Attribute(
"A list of comma separated http status codes",
type="commalist",
evaldefault=False,
required=False,
default=[],
example="404",
map_to="_handlers",
)
name = Attribute("An optional name", required=False, default=None)
final = Attribute(
"Ignore further URLs if this route matches?", type="boolean", default=False
)
def lib_finalize(self, context):
if not self.check(context):
return
defaults = self.get_let_map(context)
params = self.get_parameters(context)
methods = params._methods
handlers = []
for h in params._handlers:
try:
handlers.append(StatusCode(h))
except KeyError:
raise errors.ElementError(
""""{}" is not a valid http status code""".format(h), element=self
)
target = params.target
url_target = self.document.lib.qualify_libname(self.libname)
try:
if target is None:
target = (url_target,)
else:
target = (
url_target,
self.document.qualify_element_ref(target, lib=self.lib),
)
except errors.ElementNotFoundError:
raise errors.ElementError(
"No view called '{}' in the project".format(target), element=self
)
if params.mountpoint is None:
mount_point = self.get_ancestor("mountpoint")
else:
_, mount_point = self.get_element(params.mountpoint)
if params.mount:
try:
_, element = self.archive.get_element(params.mount, lib=self.lib)
if not hasattr(element, "urlmapper"):
raise ValueError("element {} is not mountable".format(element))
mount_point.urlmapper.map(
params.route.rstrip("/") + "/*",
[url_target],
methods=methods,
handlers=handlers or None,
defaults=defaults,
)
mount_point.urlmapper.mount(
params.route, element.urlmapper, name=params.name, defaults=defaults
)
except Exception as e:
raise errors.ElementError(
text_type(e), element=self, diagnosis=getattr(e, "diagnosis", None)
)
else:
try:
mount_point.urlmapper.map(
params.route,
target,
methods=methods,
handlers=handlers or None,
name=params.name,
defaults=defaults,
final=params.final,
)
except ValueError as e:
raise errors.ElementError(text_type(e), element=self)
class Middleware(LogicElement):
"""Add middleware to a mountpoint"""
class Help:
synopsis = "add middleware to a mountpoint"
route = Attribute("Route", required=True)
methods = Attribute(
"A list of comma separated HTTP methods",
required=False,
type="commalist",
evaldefault=True,
default="*",
example="GET,POST",
map_to="_methods",
)
mountpoint = Attribute("Mount point", required=False)
stage = Attribute(
"Stage in request handling",
required=False,
default="request",
metavar="STAGE",
choices=["request", "response"],
)
macro = Attribute("Macro to call", required=False, default=None)
name = Attribute("An optional name", required=False, default=None)
def lib_finalize(self, context):
if not self.check(context):
return
params = self.get_parameters(context)
methods = params._methods
target = params.macro
url_target = self.document.lib.qualify_libname(self.libname)
if target is None:
target = (url_target,)
else:
target = (url_target, self.document.qualify_element_ref(target))
if params.mountpoint is None:
mount_point = self.get_ancestor("mountpoint")
else:
_, mount_point = self.get_element(params.mountpoint)
mapper = mount_point.middleware[params.stage]
_route = mapper.map(params.route, target, methods=methods, name=params.name)
class Mount(LogicElement):
"""Mount a library."""
class Help:
synopsis = "mount a library on a given URL"
app = Attribute("Application", required=True)
url = Attribute("Url", required=True)
mountpoint = Attribute("Mount point", required=False, default="main")
priority = Attribute(
"Priority (highest priority is checked first)",
type="integer",
required=False,
default=0,
)
def logic(self, context):
if self.archive.test_build:
return
self.archive.build_libs()
params = self.get_parameters(context)
app = self.archive.find_app(params.app)
server = self.get_ancestor("server")
url_params = self.get_let_map(context, check_missing=False)
url_params["app"] = app.name
mountpoint = app.lib.get_element_by_type_and_attribute(
"mountpoint", "name", params.mountpoint
)
app.mounts.append((params.mountpoint, params.url))
server.urlmapper.mount(
params.url,
mountpoint.urlmapper,
defaults=url_params,
name=app.name,
priority=params.priority,
)
for stage, urlmapper in server.middleware.items():
urlmapper.mount(
params.url,
mountpoint.middleware[stage],
defaults=url_params,
name=app.name,
priority=params.priority,
)
startup_log.debug(
"%s (%s) mounted on %s",
app,
params.mountpoint,
tools.normalize_url_path(params.url),
)
class GetURL(DataSetter):
"""Get a named URL."""
class Help:
synopsis = "get a named URL"
name = Attribute("URL name", required=True)
_from = Attribute("Application", type="application", default=None, evaldefault=True)
query = Attribute(
"Mapping expression to use as a query string",
metavar="EXPRESSION",
required=False,
default=None,
type="expression",
missing=False,
)
_with = Attribute(
"Extract URL values from this object",
type="expression",
required=False,
default=None,
)
base = Attribute("Base (protocol and domain) of the URL", default=None)
def get_value(self, context):
params = self.get_parameters(context)
query = params.query
app = self.get_app(context)
try:
if self.has_parameter("with"):
url_params = self.get_let_map(context)
url_params.update(params["with"])
else:
url_params = {
k: text_type(v) for k, v in iteritems(self.get_let_map(context))
}
for k, v in iteritems(url_params):
if not v:
self.throw(
"bad-value.parameter",
"URL parameter '{}' must not be blank or missing (it is {})".format(
k, to_expression(context, v)
),
)
url = context[".server"].get_url(app.name, params.name, url_params)
except MissingURLParameter as e:
self.throw("get-url.missing-parameter", text_type(e))
except RouteError as e:
self.throw("get-url.no-route", text_type(e))
if query and hasattr(query, "items"):
qs = moya_urlencode(query)
if qs:
url += "?" + qs
url = self.qualify(context, url)
return url
def qualify(self, context, url):
base = self.base(context)
if base is not None:
url = base.rstrip("/") + "/" + url.lstrip("/")
return url
class GetFqURL(GetURL):
"""Get a [i]fully qualified[/i] (including domain name and scheme) named URL."""
base = Attribute("Base (protocol and domain) of the URL", default=None)
class Help:
synopsis = "get a fully qualified URL"
def qualify(self, context, url):
base = self.base(context)
if base is None:
base = context[".sys.site.host"] or context[".request.host_url"]
url = base + url
return url
class Trace(DataSetter):
"""
Extract route information from a URL path.
Returns route matches in a list of dictionaries. Route matches have three keys;
[c]data[/c] is the url data (as returned in [c].url[/c]), [c]targets[/c] is a list of element references,
[c]name[/c] is the name of the matching URL.
If [c]app[/c] or [c]name[/c] is provided, this tag will return the first url route matching the given app / named url.
"""
class Help:
synopsis = "extract routing information from mounted URL paths"
example = """
<trace path=".request.path" dst="matches"/>
"""
server = Attribute(
"Server containing URL routes",
type="expression",
default=".server",
evaldefault=True,
)
path = Attribute(
"URL path to parse", type="expression", required=True, missing=False
)
method = Attribute("HTTP method", type="text", default="GET")
app = Attribute("Application name", required=False, default=None, type="text")
name = Attribute(
"Route name to find", required=False, type="commalist", default=None
)
def get_value(self, context):
server, path, method, app, name = self.get_parameters(
context, "server", "path", "method", "app", "name"
)
if "://" in path:
_, _, path = path.partition("://")
if not path.startswith("/"):
path = "/" + path
if app is None and name is None:
routes = []
for route_match in server.urlmapper.iter_routes(path, method):
if route_match is not None:
data, targets, name = route_match
routes.append({"data": data, "targets": targets, "name": name})
return routes
else:
for route_match in server.urlmapper.iter_routes(path, method):
data, targets, _name = route_match
if app is not None:
if data.get("app", None) != app:
continue
if name is not None:
if _name not in name:
continue
return {"data": data, "targets": targets, "name": _name}
else:
return None
def wrap_element_error(f):
def deco(self, context):
try:
for node in f(self, context):
yield node
except (errors.ElementError, logic.LogicFlowException):
raise
except Exception as e:
# import traceback; traceback.print_exc(e)
raise errors.ElementError(
text_type(e), self, diagnosis=getattr(e, "diagnosis", None)
)
return deco
class View(ContextElementBase, ContentElementMixin):
"""Define a view to handle a URL"""
class Help:
synopsis = "define a view to handle a URL"
content = Attribute("Content", type="elementref", required=False, default=None)
template = Attribute("Template", type="templates", required=False, default=None)
requires = Attribute(
"Permission expression", type="expression", required=False, default=None
)
withscope = Attribute(
"Use scope as template / content data?",
type="boolean",
required=False,
default=True,
)
def extend_context(self, context):
"""Hook to extend the context."""
@wrap_element_error
def run(self, context):
(content, templates, requires, withscope) = self.get_parameters(
context, "content", "template", "requires", "withscope"
)
if self.has_parameter("requires"):
if not requires:
raise logic.EndLogic(http.RespondForbidden())
self.extend_context(context)
yield logic.DeferNodeContents(self)
if "_return" in context:
scope = get_return(context.get("_return"))
else:
if withscope:
scope = context[".call"]
else:
scope = {}
if scope is not None and not isinstance(scope, Content):
app = self.get_app(context)
template = self.resolve_templates(app, templates)
# if content is None and self.younger_sibling.check_type(namespaces.default, 'content'):
# content = self.younger_sibling
if content is not None:
if not hasattr(scope, "items"):
self.throw(
"view.bad-return",
"View should return a dict or other mapping object (not {})".format(
to_expression(scope)
),
)
for defer in self.generate_content(context, content, app, td=scope):
yield defer
context.copy("_content", "_return")
elif template is not None:
render_container = RenderContainer.create(app, template=template)
render_container.update(scope)
context["_return"] = render_container
class AppUrlsProxy(object):
def __moyacontext__(self, context):
urls = context.get(".urls")
app = context[".app"]
return urls[app.name]
class Trace(object):
def __init__(self, target, app=None, route_data=None, response=None):
self.target = target
self.app = app
self.route_data = route_data
if isinstance(response, http.RespondWith):
self.response = text_type(response)
else:
self.response = None
def __moyarepr__(self, context):
return "<trace>"
@property
def target_html(self):
return syntax.highlight("target", self.target, line_numbers=False)
class GetLocale(DataSetter):
"""Get an object containing locale information"""
class Help:
synopsis = "get locale information"
locale = Attribute("Locale name")
def logic(self, context):
_locale = self.locale(context)
try:
locale = LocaleProxy(_locale)
except:
self.throw(
"get-locale.unknown-locale",
'''Couldn't get locale information for "{}"'''.format(_locale),
)
self.set_context(context, self.dst(context), locale)
class SetLocale(LogicElement):
"""Switches the current locale"""
class Help:
synopsis = "switch the current locale"
locale = Attribute("Locale name")
def logic(self, context):
_locale = self.locale(context)
try:
locale = LocaleProxy(_locale)
except:
self.throw(
"change-locale.unknown-locale",
'''Couldn't get locale information for "{}"'''.format(_locale),
)
context[".locale"] = locale
class SetLanguage(LogicElement):
"""Set the current language"""
class Help:
synopsis = "set the current language"
language = Attribute("Language code")
def logic(self, context):
language = self.language(context)
if not isinstance(language, list):
language = [language]
context[".languages"] = language
class Server(LogicElement):
"""Defines a server"""
class Help:
synopsis = "define a server"
def post_build(self, context):
self.urlmapper = URLMapper()
self.middleware = {"request": URLMapper(), "response": URLMapper()}
self.fs = None
super(Server, self).post_build(context)
def startup(self, archive, context, fs, breakpoint=False):
self.fs = fs
archive.build_libs()
try:
if breakpoint:
logic.debug(archive, context, logic.DeferNodeContents(self))
else:
logic.run_logic(archive, context, logic.DeferNodeContents(self))
except Exception as e:
# import traceback
# traceback.print_exc(e)
raise
archive.build_libs()
def get_url(self, app_name, url_name, params=None):
app_routes = self.urlmapper.get_routes(app_name)
url = None
# Could be multiple routes for this name
# Try each one and return the url that doesn't fail
for route in app_routes[:-1]:
try:
url = route.target.get_url(url_name, params, base_route=route)
except RouteError:
continue
else:
break
else:
# Last one, if this throws an exception, we want it to propagate
route = app_routes[-1]
url = route.target.get_url(url_name, params, base_route=route)
return url
def trace(self, archive, url, method="GET"):
for route_match in self.urlmapper.iter_routes(url, method):
route_data = route_match.data
target = route_match.target
if target:
for element_ref in target:
app = archive.get_app(route_data.get("app", None))
yield (route_data, archive.get_element(element_ref, app))
def process_response(self, context, response):
cookies = context.root.get("cookiejar", {})
for cookie in itervalues(cookies):
cookie.set(response)
for cookie_name in cookies.deleted_cookies:
response.delete_cookie(cookie_name)
try:
if not response.date and "now" in context.root:
response.date = context.root["now"]._dt
except:
# Don't want to discard the response here, so log exception
log.exception("error setting response date")
return response
def render_response(self, archive, context, obj, status=StatusCode.ok):
response = Response(
charset=py2bytes("utf8"), status=int(getattr(obj, "http_status", status))
)
result = render_object(obj, archive, context, "html")
response.text = text_type(result)
return self.process_response(context, response)
def _dispatch_result(self, archive, context, request, result, status=StatusCode.ok):
if result is None:
return None
if isinstance(result, ReplaceRequest):
return result
if isinstance(result, RespondWith):
return self.dispatch_handler(
archive, context, request, status=result.status, headers=result.headers
)
if not isinstance(result, Response):
status = int(getattr(result, "http_status", None) or status)
response = MoyaResponse(charset=py2bytes("utf8"), status=status)
html = render_object(result, archive, context, "html")
response.text = html
else:
response = result
return self.process_response(context, response)
def handle_error(self, archive, context, request, error, exc_info):
context.safe_delete("._callstack")
context.safe_delete(".call")
return self.dispatch_handler(
archive,
context,
request,
status=StatusCode.internal_error,
error=error,
exc_info=exc_info,
)
def _dispatch_mapper(
self, archive, context, mapper, url, method="GET", status=None, breakpoint=False
):
"""Loop to call targets for a url/method/status combination"""
dispatch_trace = context.root.get("_urltrace", [])
if breakpoint:
call = archive.debug_call
else:
call = archive.call
root = context.root
for route_data, target, name in mapper.iter_routes(url, method, status):
root.update(urlname=name, headers={})
if target:
for element_ref in target:
app, element = archive.get_element(element_ref)
if element:
app = app or archive.get_app(route_data.get("app", None))
context.root.update(url=route_data)
result = call(element_ref, context, app, url=route_data)
dispatch_trace.append(
Trace(element_ref, app, route_data, result)
)
if result is not None:
yield result
else:
dispatch_trace.append(Trace(element_ref))
else:
dispatch_trace.append(Trace(element_ref))
@classmethod
def set_site(cls, archive, context, request):
"""Set site data for a request"""
domain = request.host
if ":" in domain:
domain = domain.split(":", 1)[0]
site_instance = archive.sites.match(domain, context=context)
if site_instance is None:
log.error(
'no site matching domain "{domain}", consider adding [site:{domain}] to settings'.format(
domain=domain
)
)
return None
context.root["sys"]["site"] = site_instance
try:
context.root["sys"]["base"] = archive.project_fs.getsyspath("/")
except NoSysPath:
context.root["sys"]["base"] = None
context.root["site"] = site_instance._data
return site_instance
@classmethod
def _get_tz(self, context, default_timezone="UTC", user_timezone=False):
"""lazy insertion of .tz"""
if context is None:
context = pilot.context
tz = None
if user_timezone:
tz = context.get(".user.timezone", None)
if not tz:
tz = context.get(".sys.site.timezone", None)
if not tz:
tz = default_timezone
if not tz:
return None
try:
return Timezone(tz)
except pytz.UnknownTimeZoneError:
log.error("invalid value for timezone '%s', defaulting to UTC", tz)
return Timezone("UTC")
def run_middleware(self, stage, archive, context, request, url, method):
middleware = self.middleware[stage]
try:
for result in self._dispatch_mapper(
archive, context, middleware, url, method
):
response = self._dispatch_result(archive, context, request, result)
if response:
return response
except Exception as e:
return self.handle_error(archive, context, request, e, sys.exc_info())
def _populate_context(self, archive, context, request):
"""Add standard values to context."""
populate_context = {
"permissions": {},
"libs": archive.libs,
"apps": archive.apps,
"debug": archive.debug,
"develop": archive.develop,
"sys": {},
"server": self,
"urls": self.urlmapper,
"now": ExpressionDateTime.moya_utcnow(),
"appurls": AppUrlsProxy(),
"moya": {"version": __version__},
"enum": archive.enum,
"accept_language": list(request.accept_language),
"media_url": archive.media_url,
"filters": archive.filters,
"secret": archive.secret,
}
context.root.update(populate_context)
set_dynamic(context)
def dispatch(self, archive, context, request, breakpoint=False):
"""Dispatch a request to the server and return a response object."""
url = request.path_info
method = request.method
self._populate_context(archive, context, request)
site = self.set_site(archive, context, request)
if site is None:
# No site match, return a 404
return self.dispatch_handler(
archive, context, request, StatusCode.not_found
)
root = context.root
if site.head_as_get and method == "HEAD":
# Treat HEAD requests as GET requests
request = request.copy()
request.method = "GET"
root["request"] = request
method = "GET"
root["locale"] = site.locale
context.set_lazy(
".tz",
self._get_tz,
None,
user_timezone=site.user_timezone,
default_timezone=site.timezone,
)
# Request middleware
response = self.run_middleware(
"request", archive, context, request, url, method
)
if response is not None:
return response
def response_middleware(response):
context.safe_delete("._callstack", ".call")
context.root["response"] = response
new_response = self.run_middleware(
"response", archive, context, request, url, method
)
return new_response or response
# Run main views
root["urltrace"] = root["_urltrace"] = []
context.safe_delete("._callstack", ".call")
response = None
try:
for result in self._dispatch_mapper(
archive, context, self.urlmapper, url, method, breakpoint=breakpoint
):
response = self._dispatch_result(archive, context, request, result)
if response:
response = response_middleware(response)
db.commit_sessions(context)
return response
else:
db.commit_sessions(context)
except Exception as e:
db.rollback_sessions(context, close=False)
return self.handle_error(archive, context, request, e, sys.exc_info())
finally:
for thread in context.get("._threads", []):
thread.wait()
context.safe_delete("._threads")
db.close_sessions(context)
root["_urltrace"] = []
# Append slash and redirect if url doesn't end in a slash
if not url.endswith("/") and site.append_slash:
# Check in advance if the url ending with / actually maps to anything
if method in ("HEAD", "GET") and self.urlmapper.has_route(
url + "/", method, None
):
_, ext = splitext(url)
# Don't redirect when the filename has an extension
if not ext:
response = MoyaResponse(
status=StatusCode.temporary_redirect, location=url + "/"
)
return response
if request.method in ["GET", "POST", "HEAD"]:
status_code = StatusCode.not_found
else:
status_code = StatusCode.method_not_allowed
# No response returned, handle 404
return self.dispatch_handler(archive, context, request, status=status_code)
def dispatch_handler(
self,
archive,
context,
request,
status=404,
headers=None,
error=None,
exc_info=None,
):
"""Respond to a status code"""
context.safe_delete(
"._callstack",
".call",
".td",
"._td",
".contentstack",
".content",
".headers",
)
if headers is not None:
context.root["headers"] = headers
moya_trace = None
error2 = None
moya_trace2 = None
if error is not None:
moya_trace = getattr(error, "moya_trace", None)
if moya_trace is None:
try:
moya_trace = trace.build(
context, None, None, error, exc_info, request
)
except Exception as e:
# import traceback; traceback.print_exc(e)
raise
try:
url = request.path_info
method = request.method
for result in self._dispatch_mapper(
archive, context, self.urlmapper, url, method, status
):
if not isinstance(result, RespondWith):
return self._dispatch_result(
archive, context, request, result, status=status
)
except Exception as e:
log.exception("error in dispatch_handler")
# from traceback import print_exc
# print_exc()
if status != StatusCode.internal_error:
return self.handle_error(archive, context, request, e, sys.exc_info())
error2 = e
moya_trace2 = getattr(error2, "moya_trace", None)
if moya_trace2 is None:
moya_trace2 = trace.build(
context, None, None, error2, sys.exc_info(), request
)
if error is not None:
log.error("unhandled exception ({})".format(text_type(error).lstrip()))
try:
context[".console"].obj(context, moya_trace)
except:
pass
context.reset()
context.safe_delete(
"._callstack",
".call",
".td",
"._td",
".contentstack",
".content",
"_funccalls",
"._for",
"_for_stack",
)
# pilot.context = context
# No handlers have been defined for this status code
# We'll look for a template named <status code>.html and render that
template_filename = "{}.html".format(int(status))
try:
response = MoyaResponse(charset=py2bytes("utf8"), status=status)
rc = RenderContainer.create(None, template=template_filename)
rc["request"] = request
rc["status"] = status
rc["error"] = error
rc["trace"] = moya_trace
rc["error2"] = error
rc["trace2"] = moya_trace2
rc["moya_error"] = (
getattr(moya_trace.exception, "type", None) if moya_trace else None
)
if status == 500:
archive.fire(context, "sys.unhandled-exception", data=rc)
response.text = render_object(rc, archive, context, "html")
return response
except MissingTemplateError:
pass
except Exception as e:
# import traceback
# traceback.print_exc(e)
# print(e)
log.error("unable to render %s (%s)", template_filename, text_type(e))
# Render a very basic response
response = Response(charset=py2bytes("utf8"), status=status)
url = request.path_info
try:
response.text = standard_response(
status, url, error, moya_trace, debug=archive.debug
)
except Exception as e:
log.exception("error generating standard response")
return response
| |
import datetime
import cassiopeia.riotapi
import cassiopeia.type.core.common
import cassiopeia.type.dto.game
try:
from future.builtins.misc import super
except ImportError:
pass
@cassiopeia.type.core.common.inheritdocs
class Stats(cassiopeia.type.core.common.CassiopeiaObject):
dto_type = cassiopeia.type.dto.game.RawStats
def __str__(self):
return "Stats"
@property
def kda(self):
"""float the participant's kda"""
return (self.kills + self.assists) / (self.deaths if self.deaths else 1)
@property
def assists(self):
"""int the total number of assists this participant had"""
return self.data.assists
@property
def inhibitor_kills(self):
"""int the total number of inhibitors this participant killed"""
return self.data.barracksKilled
@property
def kills(self):
"""int the total number of kills this participant had"""
return self.data.championsKilled
@property
def combat_score(self):
"""int dominion only. the part of the participant's score that came from combat-related activities"""
return self.data.combatPlayerScore
@property
def consumables_bought(self):
"""list<Item> the consumables that the participant bought (careful, they might have just sold them back or hit undo?)"""
return self.data.consumablesPurchased
@property
def damage_dealt_player(self):
"""int well, we don't know what this one is. let us know if you figure it out."""
return self.data.damageDealtPlayer
@property
def double_kills(self):
"""int the number of double kills this participant had"""
return self.data.doubleKills
@property
def first_blood(self):
"""bool whether this participant got first blood"""
return self.data.firstBlood
@property
def gold(self):
"""int the participant's current gold"""
return self.data.gold
@property
def gold_earned(self):
"""int the participant's total gold"""
return self.data.goldEarned
@property
def gold_spent(self):
"""int the participant's spent gold"""
return self.data.goldSpent
@property
def item0(self):
"""Item the participant's first item"""
return cassiopeia.riotapi.get_item(self.data.item0) if self.data.item0 else None
@property
def item1(self):
"""Item the participant's second item"""
return cassiopeia.riotapi.get_item(self.data.item1) if self.data.item1 else None
@property
def item2(self):
"""Item the participant's third item"""
return cassiopeia.riotapi.get_item(self.data.item2) if self.data.item2 else None
@property
def item3(self):
"""Item the participant's fourth item"""
return cassiopeia.riotapi.get_item(self.data.item3) if self.data.item3 else None
@property
def item4(self):
"""Item the participant's fifth item"""
return cassiopeia.riotapi.get_item(self.data.item4) if self.data.item4 else None
@property
def item5(self):
"""Item the participant's sixth item"""
return cassiopeia.riotapi.get_item(self.data.item5) if self.data.item5 else None
@property
def item6(self):
"""Item the participant's seventh item (i.e. their ward)"""
return cassiopeia.riotapi.get_item(self.data.item6) if self.data.item6 else None
@property
def items(self):
"""list<Item> the participant's items"""
return [self.item0, self.item1, self.item2, self.item3, self.item4, self.item5, self.item6]
@property
def killing_sprees(self):
"""int the number of killing sprees this participant had"""
return self.data.killingSprees
@property
def largest_critical_strike(self):
"""int the largest critical strike this participant had"""
return self.data.largestCriticalStrike
@property
def largest_killing_spree(self):
"""int the larges killing spree this participant had"""
return self.data.largestKillingSpree
@property
def largest_multi_kill(self):
"""int the largest multikill this participant had"""
return self.data.largestMultiKill
@property
def tier_3_items_bought(self):
"""int the number of tier 3 items built"""
return self.data.legendaryItemsCreated
@property
def level(self):
"""int the participant's champion level"""
return self.data.level
@property
def magic_damage_dealt(self):
"""int the total magic damage this participant dealt"""
return self.data.magicDamageDealtPlayer
@property
def magic_damage_dealt_to_champions(self):
"""int the total magic damage this participant dealt to champions"""
return self.data.magicDamageDealtToChampions
@property
def magic_damage_taken(self):
"""int the total magic damage this participant received"""
return self.data.magicDamageTaken
@property
def minion_denies(self):
"""int the number of minions this participant denied to the enemy. let us know if you figure out what this actually is"""
return self.data.minionsDenied
@property
def minion_kills(self):
"""int the number of minions this participant killed"""
return self.data.minionsKilled
@property
def monster_kills(self):
"""int the number of neutral minions this participant killed"""
return self.data.neutralMinionsKilled
@property
def enemy_monster_kills(self):
"""int the number of neutral enemy minions this participant killed"""
return self.data.neutralMinionsKilledEnemyJungle
@property
def ally_monster_kills(self):
"""int the number of neutral ally minions this participant killed"""
return self.data.neutralMinionsKilledYourJungle
@property
def nexus_killed(self):
"""int the number of nexuses this participant killed"""
return self.data.nexusKilled
@property
def node_captured(self):
"""int dominion only. the number of nodes this participant captured"""
return self.data.nodeCapture
@property
def node_capture_assists(self):
"""int dominion only. the number of nodes this participant assisted in capturing"""
return self.data.nodeCaptureAssist
@property
def node_neutralizations(self):
"""int dominion only. the number of nodes this participant neutralized"""
return self.data.nodeNeutralize
@property
def node_neutralization_assists(self):
"""int dominion only. the number of nodes this participant assisted in neutralizing"""
return self.data.nodeNeutralizeAssist
@property
def deaths(self):
"""int the number of deaths this participant had"""
return self.data.numDeaths
@property
def items_bought(self):
"""int the number of items this participant bought"""
return self.data.numItemsBought
@property
def objective_score(self):
"""int dominion only. the part of the participant's score that came from objective-related activities"""
return self.data.objectivePlayerScore
@property
def penta_kills(self):
"""int the number of penta kills this participant had"""
return self.data.pentaKills
@property
def physical_damage_dealt(self):
"""int the total physical damage this participant dealt"""
return self.data.physicalDamageDealtPlayer
@property
def physical_damage_dealt_to_champions(self):
"""int the total physical damage this participant dealt to champions"""
return self.data.physicalDamageDealtToChampions
@property
def physical_damage_taken(self):
"""int the total physical damage this participant received"""
return self.data.physicalDamageTaken
@property
def lane(self):
"""Lane the lane this participant was in"""
return cassiopeia.type.core.common.Lane.for_id(self.data.playerPosition) if self.data.playerPosition else None
@property
def role(self):
"""Role the role of this particiant"""
return cassiopeia.type.core.common.Role.for_id(self.data.playerRole) if self.data.playerRole else None
@property
def quadra_kills(self):
"""int the number of quadra kills this participant had"""
return self.data.quadraKills
@property
def sight_wards_bought(self):
"""int the number of sight wards this participant bought"""
return self.data.sightWardsBought
@property
def q_casts(self):
"""int the number of times this participant cast his Q"""
return self.data.spell1Cast
@property
def w_casts(self):
"""int the number of tiems this participant cast his W"""
return self.data.spell2Cast
@property
def e_casts(self):
"""int the number of times this participant cast his E"""
return self.data.spell3Cast
@property
def r_casts(self):
"""int the number of times this participant cast his R"""
return self.data.spell4Cast
@property
def d_casts(self):
"""int the number of times this participant cast his D summoner spell"""
return self.data.summonSpell1Cast
@property
def f_casts(self):
"""int the number of times this participant cast his F summoner spell"""
return self.data.summonSpell2Cast
@property
def elite_monsters_kills(self):
"""int the number of elite monsters this participant killed"""
return self.data.superMonsterKilled
@property
def side(self):
"""Side the side the participant was on"""
return cassiopeia.type.core.common.Side(self.data.team) if self.data.team else None
@property
def objectives(self):
"""int well, we don't know what this one is. let us know if you figure it out."""
return self.data.teamObjective
@property
def time_played(self):
"""int the amount of time this participant played"""
return self.data.timePlayed
@property
def damage_dealt(self):
"""int the total damage this participant dealt"""
return self.data.totalDamageDealt
@property
def damage_dealt_to_champions(self):
"""int the total damage this participant dealt to champions"""
return self.data.totalDamageDealtToChampions
@property
def damage_taken(self):
"""int the total damage this participant received"""
return self.data.totalDamageTaken
@property
def healing_done(self):
"""int the amount of healing this participant did"""
return self.data.totalHeal
@property
def score(self):
"""int the score for this participant"""
return self.data.totalPlayerScore
@property
def score_rank(self):
"""int if game was a dominion game, team rank of the player's total score (e.g., 1-5)"""
return self.data.totalScoreRank
@property
def crowd_control_dealt(self):
"""int the total amount of crowd control this participant dealt (in seconds)"""
return self.data.totalTimeCrowdControlDealt
@property
def units_healed(self):
"""int the number of units this participant healed"""
return self.data.totalUnitsHealed
@property
def triple_kills(self):
"""int the number of triple kills this participant had"""
return self.data.tripleKills
@property
def true_damage_dealt(self):
"""int the total true damage this participant dealth"""
return self.data.trueDamageDealtPlayer
@property
def true_damage_dealt_to_champions(self):
"""int the total damage this participant dealt to champions"""
return self.data.trueDamageDealtToChampions
@property
def true_damage_taken(self):
"""int the total true damage this participant received"""
return self.data.trueDamageTaken
@property
def turret_kills(self):
"""int the number of turret kills this participant had"""
return self.data.turretsKilled
@property
def unreal_kills(self):
"""int the number of unreal kills this participant had"""
return self.data.unrealKills
@property
def victory_points(self):
"""int the number of victory points this participant gained from winning or losing this game"""
return self.data.victoryPointTotal
@property
def vision_wards_bought(self):
"""int the number of vision wards sprees this participant bought"""
return self.data.visionWardsBought
@property
def ward_kills(self):
"""int the number of wards sprees this participant killed"""
return self.data.wardKilled
@property
def wards_placed(self):
"""int the number of wards this participant placed"""
return self.data.wardPlaced
@property
def win(self):
"""bool whether the participant won the game or not"""
return self.data.win
@cassiopeia.type.core.common.inheritdocs
class Participant(cassiopeia.type.core.common.CassiopeiaObject):
dto_type = cassiopeia.type.dto.game.Player
def __str__(self):
return "{player} ({champ})".format(player=self.summoner, champ=self.champion)
@property
def champion(self):
"""Champion the champion for this participant"""
return cassiopeia.riotapi.get_champion_by_id(self.data.championId) if self.data.championId else None
@property
def summoner(self):
"""Summoner the summoner for this participant"""
return cassiopeia.riotapi.get_summoner_by_id(self.data.summonerId) if self.data.summonerId else None
@property
def side(self):
"""Side the side this participant was on"""
return cassiopeia.type.core.common.Side(self.data.teamId) if self.data.teamId else None
@cassiopeia.type.core.common.inheritdocs
class Game(cassiopeia.type.core.common.CassiopeiaObject):
dto_type = cassiopeia.type.dto.game.Game
def __init__(self, data, summoner_id):
super().__init__(data)
self.__summoner_id = summoner_id
def __str__(self):
return "Game #{id}".format(id=self.id)
def __iter__(self):
return iter(self.participants)
def __len__(self):
return len(self.participants)
def __getitem__(self, index):
return self.participants[index]
@property
def summoner(self):
"""Summoner the summoner that this game was pulled using"""
return cassiopeia.riotapi.get_summoner_by_id(self.__summoner_id) if self.__summoner_id else None
@property
def champion(self):
"""Champion the champion for the participant that this game was pulled using"""
return cassiopeia.riotapi.get_champion_by_id(self.data.championId) if self.data.championId else None
@cassiopeia.type.core.common.lazyproperty
def creation(self):
"""datetime the time when this game was created"""
return datetime.datetime.utcfromtimestamp(self.data.createDate / 1000) if self.data.createDate else None
@cassiopeia.type.core.common.lazyproperty
def participants(self):
"""list<Participant> the participants in this game"""
parts = [Participant(player) for player in self.data.fellowPlayers]
parts.append(Participant(cassiopeia.type.dto.game.Player({
"championId": self.data.championId,
"summonerId": self.__summoner_id,
"teamId": self.data.teamId
})))
return parts
@property
def id(self):
"""int the match ID"""
return self.data.gameId
@property
def mode(self):
"""GameMode the game mode"""
return cassiopeia.type.core.common.GameMode(self.data.gameMode) if self.data.gameMode else None
@property
def type(self):
"""GameType the game type"""
return cassiopeia.type.core.common.GameType(self.data.gameType) if self.data.gameType else None
@property
def invalid(self):
"""bool well, we don't know what this one is. let us know if you figure it out."""
return self.data.invalid
@property
def ip(self):
"""int the amount of IP the participant gained for this game (the one that this game was pulled using)"""
return self.data.ipEarned
@property
def level(self):
"""int the level of the participant (the one that this game was pulled using)"""
return self.data.level
@property
def map(self):
"""Map the map this game was played on"""
return cassiopeia.type.core.common.Map(self.data.mapId) if self.data.mapId else None
@property
def summoner_spell_d(self):
"""SummonerSpell the particpant's first summoner spell (the one that this game was pulled using)"""
return cassiopeia.riotapi.get_summoner_spell(self.data.spell1) if self.data.spell1 else None
@property
def summoner_spell_f(self):
"""SummonerSpell the participant's second summoner spell (the one that this game was pulled using)"""
return cassiopeia.riotapi.get_summoner_spell(self.data.spell2) if self.data.spell2 else None
@cassiopeia.type.core.common.lazyproperty
def stats(self):
"""Stats the participant's stats (the one that this game was pulled using)"""
return Stats(self.data.stats) if self.data.stats else None
@property
def sub_type(self):
"""SubType the game's sub-type"""
return cassiopeia.type.core.common.SubType(self.data.subType) if self.data.subType else None
@property
def side(self):
"""Side the side that the particpant was on (the one that this game was pulled using)"""
return cassiopeia.type.core.common.Side(self.data.teamId) if self.data.teamId else None
###############################
# Dynamic SQLAlchemy bindings #
###############################
def _sa_rebind_all():
Stats.dto_type = cassiopeia.type.dto.game.RawStats
Participant.dto_type = cassiopeia.type.dto.game.Player
Game.dto_type = cassiopeia.type.dto.game.Game
| |
# coding=utf-8
# Copyright 2010, The T5 Authors and HuggingFace Inc.
# Copyright 2020 Google LLC
# Modified from the original HuggingFace version.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import datasets
import json
import logging
import os
import numpy as np
from pathlib import Path
from third_party.models import T5Config, T5ForConditionalGeneration
from third_party.trainers import T5Trainer
from third_party.utils import (
check_output_dir
)
from transformers import AutoTokenizer, HfArgumentParser, set_seed
from transformers.trainer_utils import EvaluationStrategy
from seq2seq.adapters import AdapterController, AutoAdapterConfig
from seq2seq.data import AutoTask
from seq2seq.third_party.utils import TaskCollator
from seq2seq.metrics import build_compute_metrics_fn
from seq2seq.training_args import Seq2SeqTrainingArguments, ModelArguments, DataTrainingArguments, \
AdapterTrainingArguments
from seq2seq.utils import T5CheckpointCallback, freezing_params, get_last_checkpoint_path, create_dir, \
handle_metrics, get_training_args
logger = logging.getLogger(__name__)
def main():
# See all possible arguments in src/transformers/training_args.py or by passing
# the --help flag to this script. We now keep distinct sets of args, for a cleaner
# separation of concerns.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments, AdapterTrainingArguments))
# For running on multiple gpus with torch.distributed.launch, it adds a local_rank paramter, to allow the parser
# still use the config file, we add the local_rank to the config file.
if len(sys.argv) == 3 and sys.argv[1].startswith("--local_rank") and sys.argv[2].endswith(".json"):
args_dict = json.loads(Path(sys.argv[2]).read_text())
args_dict.update({'local_rank': int(sys.argv[1].split('=')[-1])})
model_args, data_args, training_args, adapter_args = parser.parse_dict(args_dict)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, adapter_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args, adapter_args = parser.parse_args_into_dataclasses()
check_output_dir(training_args)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = T5Config.from_pretrained(
model_args.config_name if model_args.config_name else \
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout",
"attention_dropout", "fixed_length_emb",
"encoder_projection", "encoder_pooling",
"projection_length", "only_projection_bottleneck",
"concat_projection_token", "train_adapters")
for p in extra_model_params:
if getattr(training_args, p, None):
assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(config, p, getattr(training_args, p))
# Gets the adapter config and updates the specified parameters.
if training_args.train_adapters:
adapter_config = AutoAdapterConfig.get(adapter_args.adapter_config_name)
adapter_config.input_dim = config.d_model
adapter_config.tasks = data_args.tasks
extra_adapter_params = ("task_embedding_dir",
"task_embedding_dim",
"add_layer_norm_before_adapter",
"add_layer_norm_after_adapter",
"reduction_factor",
"hidden_dim",
"non_linearity",
"train_task_embeddings",
"projected_task_embedding_dim",
"add_adapters_in_decoder",
"add_adapter_in_feed_forward",
"add_adapter_in_self_attention",
"task_hidden_dim",
"conditional_layer_norm",
"one_layer_adapter_hyper_net",
"adapter_hyper_net_with_bias",
"one_layer_adapter_hyper_net_with_linear",
"parametric_task_embedding",
"conditional_layer_norm_for_T5",
"train_adapters_blocks",
"remove_original_layer_norms",
"unique_hyper_net",
"unique_hyper_net_layer_norm")
for p in extra_adapter_params:
if hasattr(adapter_args, p) and hasattr(adapter_config, p):
setattr(adapter_config, p, getattr(adapter_args, p))
else:
logger.warning(f"({adapter_config.__class__.__name__}) doesn't have a `{p}` attribute")
adapter_config.device = training_args.device
else:
adapter_config = None
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else \
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
if model_args.not_load_t5_checkpoint:
model = T5ForConditionalGeneration(config=config, adapter_config=adapter_config)
else:
model = T5ForConditionalGeneration.from_pretrained(
model_args.model_name_or_path,
from_tf=".ckpt" in model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
adapter_config=adapter_config
)
# set num_beams for evaluation
if data_args.eval_beams is None:
data_args.eval_beams = model.config.num_beams
# freezing the parameters.
if training_args.do_train:
freezing_params(model, training_args, model_args, adapter_args)
if training_args.print_num_parameters:
logger.info(model)
for name, param in model.named_parameters():
if param.requires_grad:
logger.info("Parameter name %s", name)
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info("Total trainable parameters %s", total_trainable_params)
# Gets the training/test/validation datasets.
dataset_class = AutoTask
if training_args.do_train:
train_datasets = [dataset_class.get(task, seed=data_args.data_seed).get_dataset(
split="train", n_obs=data_args.n_train, add_prefix=False if training_args.train_adapters else True)
for task in data_args.tasks]
dataset_sizes = [len(train_dataset) for train_dataset in train_datasets]
train_dataset = datasets.concatenate_datasets(train_datasets)
training_args.remove_unused_columns = False
eval_datasets = ({task: dataset_class.get(task, seed=data_args.data_seed).get_dataset(
split="validation", n_obs=data_args.n_val,
add_prefix=False if training_args.train_adapters else True,
split_validation_test=training_args.split_validation_test)
for task in data_args.eval_tasks}
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
test_dataset = (
{task: dataset_class.get(task, seed=data_args.data_seed).get_dataset(
split="test", n_obs=data_args.n_test,
add_prefix=False if training_args.train_adapters else True,
split_validation_test=training_args.split_validation_test)
for task in data_args.eval_tasks} if training_args.do_test else None
)
# Defines the metrics for evaluation.
compute_metrics_fn = (
build_compute_metrics_fn(data_args.eval_tasks, tokenizer) if training_args.predict_with_generate else None
)
# Defines the trainer.
trainer = T5Trainer(
model=model,
config=config,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_datasets,
data_collator=TaskCollator(tokenizer, data_args, tpu_num_cores=training_args.tpu_num_cores),
compute_metrics=None,
multi_task_compute_metrics=compute_metrics_fn,
data_args=data_args,
dataset_sizes=dataset_sizes if training_args.do_train else None,
callbacks=[T5CheckpointCallback()],
adapter_config=adapter_config
)
if trainer.is_world_process_zero():
arguments = get_training_args([model_args, data_args, training_args, adapter_args])
handle_metrics("arguments", arguments, training_args.output_dir, training_args.gcs_bucket)
# Trains the model.
if training_args.do_train:
trainer.train(
model_path=get_last_checkpoint_path(training_args.output_dir) \
if (os.path.isdir(training_args.output_dir) and not training_args.optimize_from_scratch) else None,
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
tokenizer.save_pretrained(training_args.output_dir)
if training_args.save_task_embeddings:
for task, task_embedding in model.task_embedding_controller.task_to_embeddings.items():
create_dir(training_args.save_task_embeddings_dir)
np.save(os.path.join(training_args.save_task_embeddings_dir,
'{}.npy'.format(task)), task_embedding.data.detach().cpu().numpy())
# Evaluation
all_metrics = {}
if training_args.do_eval or training_args.do_test:
if trainer.is_world_process_zero():
# By default we load the model from last checkpoint path,
# in case of saving the model with the best metrics, make sure to
# set save_total = 1 so the best model is loaded here.
# if not exists returns the path to the output_dir.
last_checkpoint_path = get_last_checkpoint_path(training_args.output_dir)
config = T5Config.from_pretrained(
last_checkpoint_path,
cache_dir=model_args.cache_dir)
model = T5ForConditionalGeneration.from_pretrained(
last_checkpoint_path,
from_tf=".ckpt" in training_args.output_dir,
config=config,
cache_dir=model_args.cache_dir,
adapter_config=adapter_config
)
# NOTE: if trainer is not re-defined, there is a bug in the codes, that making
# huggingface codes does not using the best checkpoint.
trainer = T5Trainer(
model=model,
config=config,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_datasets,
data_collator=TaskCollator(tokenizer, data_args, tpu_num_cores=training_args.tpu_num_cores),
compute_metrics=None,
multi_task_compute_metrics=compute_metrics_fn,
data_args=data_args,
dataset_sizes=dataset_sizes if training_args.do_train else None,
callbacks=[T5CheckpointCallback()],
adapter_config=adapter_config
)
if training_args.train_adapters:
if adapter_args.adapter_config_name == "adapter" and data_args.adapters is not None:
for name, sub_module in model.named_modules():
task_to_adapter = {eval_task: adapter for eval_task, adapter in
zip(data_args.eval_tasks, data_args.adapters)}
if isinstance(sub_module, AdapterController):
sub_module.set_task_to_adapter_map(task_to_adapter)
if adapter_args.adapter_config_name in ["meta-adapter"]:
# If this is parametric, then the evaluation task should be part of tasks
# and the embeddings needs to be trained.
if not adapter_args.parametric_task_embedding:
model.task_embedding_controller.set_task_embeddings(eval_datasets.keys(),
parametric=adapter_args.parametric_task_embedding)
if training_args.do_eval:
metrics = trainer.evaluate(metric_key_prefix="val")
if trainer.is_world_process_zero():
handle_metrics("val", metrics, training_args.output_dir, training_args.gcs_bucket)
all_metrics.update(metrics)
if training_args.do_test:
metrics = trainer.evaluate(test_dataset, metric_key_prefix="test")
if trainer.is_world_process_zero():
handle_metrics("test", metrics, training_args.output_dir, training_args.gcs_bucket)
all_metrics.update(metrics)
return all_metrics
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._subscription_client_enums import *
class AcceptOwnershipRequest(msrest.serialization.Model):
"""The parameters required to accept subscription ownership.
:ivar properties: Accept subscription ownership request properties.
:vartype properties: ~azure.mgmt.subscription.models.AcceptOwnershipRequestProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'AcceptOwnershipRequestProperties'},
}
def __init__(
self,
*,
properties: Optional["AcceptOwnershipRequestProperties"] = None,
**kwargs
):
"""
:keyword properties: Accept subscription ownership request properties.
:paramtype properties: ~azure.mgmt.subscription.models.AcceptOwnershipRequestProperties
"""
super(AcceptOwnershipRequest, self).__init__(**kwargs)
self.properties = properties
class AcceptOwnershipRequestProperties(msrest.serialization.Model):
"""Accept subscription ownership request properties.
All required parameters must be populated in order to send to Azure.
:ivar display_name: Required. The friendly name of the subscription.
:vartype display_name: str
:ivar management_group_id: Management group Id for the subscription.
:vartype management_group_id: str
:ivar tags: A set of tags. Tags for the subscription.
:vartype tags: dict[str, str]
"""
_validation = {
'display_name': {'required': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'management_group_id': {'key': 'managementGroupId', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
display_name: str,
management_group_id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword display_name: Required. The friendly name of the subscription.
:paramtype display_name: str
:keyword management_group_id: Management group Id for the subscription.
:paramtype management_group_id: str
:keyword tags: A set of tags. Tags for the subscription.
:paramtype tags: dict[str, str]
"""
super(AcceptOwnershipRequestProperties, self).__init__(**kwargs)
self.display_name = display_name
self.management_group_id = management_group_id
self.tags = tags
class AcceptOwnershipStatusResponse(msrest.serialization.Model):
"""Subscription Accept Ownership Response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subscription_id: Newly created subscription Id.
:vartype subscription_id: str
:ivar accept_ownership_state: The accept ownership state of the resource. Possible values
include: "Pending", "Completed", "Expired".
:vartype accept_ownership_state: str or ~azure.mgmt.subscription.models.AcceptOwnership
:ivar billing_owner: UPN of the billing owner.
:vartype billing_owner: str
:ivar subscription_tenant_id: Tenant Id of the subscription.
:vartype subscription_tenant_id: str
:ivar display_name: The display name of the subscription.
:vartype display_name: str
:ivar tags: A set of tags. Tags for the subscription.
:vartype tags: dict[str, str]
"""
_validation = {
'subscription_id': {'readonly': True},
'accept_ownership_state': {'readonly': True},
'billing_owner': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'accept_ownership_state': {'key': 'acceptOwnershipState', 'type': 'str'},
'billing_owner': {'key': 'billingOwner', 'type': 'str'},
'subscription_tenant_id': {'key': 'subscriptionTenantId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
subscription_tenant_id: Optional[str] = None,
display_name: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword subscription_tenant_id: Tenant Id of the subscription.
:paramtype subscription_tenant_id: str
:keyword display_name: The display name of the subscription.
:paramtype display_name: str
:keyword tags: A set of tags. Tags for the subscription.
:paramtype tags: dict[str, str]
"""
super(AcceptOwnershipStatusResponse, self).__init__(**kwargs)
self.subscription_id = None
self.accept_ownership_state = None
self.billing_owner = None
self.subscription_tenant_id = subscription_tenant_id
self.display_name = display_name
self.tags = tags
class BillingAccountPoliciesResponse(msrest.serialization.Model):
"""Billing account policies information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified ID for the policy.
:vartype id: str
:ivar name: Policy name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar properties: Billing account policies response properties.
:vartype properties: ~azure.mgmt.subscription.models.BillingAccountPoliciesResponseProperties
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.subscription.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BillingAccountPoliciesResponseProperties'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
properties: Optional["BillingAccountPoliciesResponseProperties"] = None,
**kwargs
):
"""
:keyword properties: Billing account policies response properties.
:paramtype properties: ~azure.mgmt.subscription.models.BillingAccountPoliciesResponseProperties
"""
super(BillingAccountPoliciesResponse, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
self.system_data = None
class BillingAccountPoliciesResponseProperties(msrest.serialization.Model):
"""Put billing account policies response properties.
:ivar service_tenants: Service tenant for the billing account.
:vartype service_tenants: list[~azure.mgmt.subscription.models.ServiceTenantResponse]
:ivar allow_transfers: Determine if the transfers are allowed for the billing account.
:vartype allow_transfers: bool
"""
_attribute_map = {
'service_tenants': {'key': 'serviceTenants', 'type': '[ServiceTenantResponse]'},
'allow_transfers': {'key': 'allowTransfers', 'type': 'bool'},
}
def __init__(
self,
*,
service_tenants: Optional[List["ServiceTenantResponse"]] = None,
allow_transfers: Optional[bool] = None,
**kwargs
):
"""
:keyword service_tenants: Service tenant for the billing account.
:paramtype service_tenants: list[~azure.mgmt.subscription.models.ServiceTenantResponse]
:keyword allow_transfers: Determine if the transfers are allowed for the billing account.
:paramtype allow_transfers: bool
"""
super(BillingAccountPoliciesResponseProperties, self).__init__(**kwargs)
self.service_tenants = service_tenants
self.allow_transfers = allow_transfers
class CanceledSubscriptionId(msrest.serialization.Model):
"""The ID of the canceled subscription.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subscription_id: The ID of the canceled subscription.
:vartype subscription_id: str
"""
_validation = {
'subscription_id': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(CanceledSubscriptionId, self).__init__(**kwargs)
self.subscription_id = None
class EnabledSubscriptionId(msrest.serialization.Model):
"""The ID of the subscriptions that is being enabled.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subscription_id: The ID of the subscriptions that is being enabled.
:vartype subscription_id: str
"""
_validation = {
'subscription_id': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(EnabledSubscriptionId, self).__init__(**kwargs)
self.subscription_id = None
class ErrorResponse(msrest.serialization.Model):
"""Describes the format of Error response.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword code: Error code.
:paramtype code: str
:keyword message: Error message indicating why the operation failed.
:paramtype message: str
"""
super(ErrorResponse, self).__init__(**kwargs)
self.code = code
self.message = message
class ErrorResponseBody(msrest.serialization.Model):
"""Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message.
:ivar error: The details of the error.
:vartype error: ~azure.mgmt.subscription.models.ErrorResponse
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponse'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
error: Optional["ErrorResponse"] = None,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword error: The details of the error.
:paramtype error: ~azure.mgmt.subscription.models.ErrorResponse
:keyword code: Error code.
:paramtype code: str
:keyword message: Error message indicating why the operation failed.
:paramtype message: str
"""
super(ErrorResponseBody, self).__init__(**kwargs)
self.error = error
self.code = code
self.message = message
class GetTenantPolicyListResponse(msrest.serialization.Model):
"""Tenant policy information list.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of tenant policies.
:vartype value: list[~azure.mgmt.subscription.models.GetTenantPolicyResponse]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GetTenantPolicyResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(GetTenantPolicyListResponse, self).__init__(**kwargs)
self.value = None
self.next_link = None
class GetTenantPolicyResponse(msrest.serialization.Model):
"""Tenant policy Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Policy Id.
:vartype id: str
:ivar name: Policy name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar properties: Tenant policy properties.
:vartype properties: ~azure.mgmt.subscription.models.TenantPolicy
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.subscription.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'TenantPolicy'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
properties: Optional["TenantPolicy"] = None,
**kwargs
):
"""
:keyword properties: Tenant policy properties.
:paramtype properties: ~azure.mgmt.subscription.models.TenantPolicy
"""
super(GetTenantPolicyResponse, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
self.system_data = None
class Location(msrest.serialization.Model):
"""Location information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The fully qualified ID of the location. For example,
/subscriptions/00000000-0000-0000-0000-000000000000/locations/westus.
:vartype id: str
:ivar subscription_id: The subscription ID.
:vartype subscription_id: str
:ivar name: The location name.
:vartype name: str
:ivar display_name: The display name of the location.
:vartype display_name: str
:ivar latitude: The latitude of the location.
:vartype latitude: str
:ivar longitude: The longitude of the location.
:vartype longitude: str
"""
_validation = {
'id': {'readonly': True},
'subscription_id': {'readonly': True},
'name': {'readonly': True},
'display_name': {'readonly': True},
'latitude': {'readonly': True},
'longitude': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'latitude': {'key': 'latitude', 'type': 'str'},
'longitude': {'key': 'longitude', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Location, self).__init__(**kwargs)
self.id = None
self.subscription_id = None
self.name = None
self.display_name = None
self.latitude = None
self.longitude = None
class LocationListResult(msrest.serialization.Model):
"""Location list operation response.
:ivar value: An array of locations.
:vartype value: list[~azure.mgmt.subscription.models.Location]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Location]'},
}
def __init__(
self,
*,
value: Optional[List["Location"]] = None,
**kwargs
):
"""
:keyword value: An array of locations.
:paramtype value: list[~azure.mgmt.subscription.models.Location]
"""
super(LocationListResult, self).__init__(**kwargs)
self.value = value
class Operation(msrest.serialization.Model):
"""REST API operation.
:ivar name: Operation name: {provider}/{resource}/{operation}.
:vartype name: str
:ivar is_data_action: Indicates whether the operation is a data action.
:vartype is_data_action: bool
:ivar display: The object that represents the operation.
:vartype display: ~azure.mgmt.subscription.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
name: Optional[str] = None,
is_data_action: Optional[bool] = None,
display: Optional["OperationDisplay"] = None,
**kwargs
):
"""
:keyword name: Operation name: {provider}/{resource}/{operation}.
:paramtype name: str
:keyword is_data_action: Indicates whether the operation is a data action.
:paramtype is_data_action: bool
:keyword display: The object that represents the operation.
:paramtype display: ~azure.mgmt.subscription.models.OperationDisplay
"""
super(Operation, self).__init__(**kwargs)
self.name = name
self.is_data_action = is_data_action
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:ivar provider: Service provider: Microsoft.Subscription.
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Profile, endpoint, etc.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
:ivar description: Localized friendly description for the operation.
:vartype description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword provider: Service provider: Microsoft.Subscription.
:paramtype provider: str
:keyword resource: Resource on which the operation is performed: Profile, endpoint, etc.
:paramtype resource: str
:keyword operation: Operation type: Read, write, delete, etc.
:paramtype operation: str
:keyword description: Localized friendly description for the operation.
:paramtype description: str
"""
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list operations. It contains a list of operations and a URL link to get the next set of results.
:ivar value: List of operations.
:vartype value: list[~azure.mgmt.subscription.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: List of operations.
:paramtype value: list[~azure.mgmt.subscription.models.Operation]
:keyword next_link: URL to get the next set of operation list results if there are any.
:paramtype next_link: str
"""
super(OperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class PutAliasRequest(msrest.serialization.Model):
"""The parameters required to create a new subscription.
:ivar properties: Put alias request properties.
:vartype properties: ~azure.mgmt.subscription.models.PutAliasRequestProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'PutAliasRequestProperties'},
}
def __init__(
self,
*,
properties: Optional["PutAliasRequestProperties"] = None,
**kwargs
):
"""
:keyword properties: Put alias request properties.
:paramtype properties: ~azure.mgmt.subscription.models.PutAliasRequestProperties
"""
super(PutAliasRequest, self).__init__(**kwargs)
self.properties = properties
class PutAliasRequestAdditionalProperties(msrest.serialization.Model):
"""Put subscription additional properties.
:ivar management_group_id: Management group Id for the subscription.
:vartype management_group_id: str
:ivar subscription_tenant_id: Tenant Id of the subscription.
:vartype subscription_tenant_id: str
:ivar subscription_owner_id: Owner Id of the subscription.
:vartype subscription_owner_id: str
:ivar tags: A set of tags. Tags for the subscription.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'management_group_id': {'key': 'managementGroupId', 'type': 'str'},
'subscription_tenant_id': {'key': 'subscriptionTenantId', 'type': 'str'},
'subscription_owner_id': {'key': 'subscriptionOwnerId', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
management_group_id: Optional[str] = None,
subscription_tenant_id: Optional[str] = None,
subscription_owner_id: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword management_group_id: Management group Id for the subscription.
:paramtype management_group_id: str
:keyword subscription_tenant_id: Tenant Id of the subscription.
:paramtype subscription_tenant_id: str
:keyword subscription_owner_id: Owner Id of the subscription.
:paramtype subscription_owner_id: str
:keyword tags: A set of tags. Tags for the subscription.
:paramtype tags: dict[str, str]
"""
super(PutAliasRequestAdditionalProperties, self).__init__(**kwargs)
self.management_group_id = management_group_id
self.subscription_tenant_id = subscription_tenant_id
self.subscription_owner_id = subscription_owner_id
self.tags = tags
class PutAliasRequestProperties(msrest.serialization.Model):
"""Put subscription properties.
:ivar display_name: The friendly name of the subscription.
:vartype display_name: str
:ivar workload: The workload type of the subscription. It can be either Production or DevTest.
Possible values include: "Production", "DevTest".
:vartype workload: str or ~azure.mgmt.subscription.models.Workload
:ivar billing_scope: Billing scope of the subscription.
For CustomerLed and FieldLed -
/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}
For PartnerLed - /billingAccounts/{billingAccountName}/customers/{customerName}
For Legacy EA -
/billingAccounts/{billingAccountName}/enrollmentAccounts/{enrollmentAccountName}.
:vartype billing_scope: str
:ivar subscription_id: This parameter can be used to create alias for existing subscription Id.
:vartype subscription_id: str
:ivar reseller_id: Reseller Id.
:vartype reseller_id: str
:ivar additional_properties: Put alias request additional properties.
:vartype additional_properties:
~azure.mgmt.subscription.models.PutAliasRequestAdditionalProperties
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'workload': {'key': 'workload', 'type': 'str'},
'billing_scope': {'key': 'billingScope', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'reseller_id': {'key': 'resellerId', 'type': 'str'},
'additional_properties': {'key': 'additionalProperties', 'type': 'PutAliasRequestAdditionalProperties'},
}
def __init__(
self,
*,
display_name: Optional[str] = None,
workload: Optional[Union[str, "Workload"]] = None,
billing_scope: Optional[str] = None,
subscription_id: Optional[str] = None,
reseller_id: Optional[str] = None,
additional_properties: Optional["PutAliasRequestAdditionalProperties"] = None,
**kwargs
):
"""
:keyword display_name: The friendly name of the subscription.
:paramtype display_name: str
:keyword workload: The workload type of the subscription. It can be either Production or
DevTest. Possible values include: "Production", "DevTest".
:paramtype workload: str or ~azure.mgmt.subscription.models.Workload
:keyword billing_scope: Billing scope of the subscription.
For CustomerLed and FieldLed -
/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}
For PartnerLed - /billingAccounts/{billingAccountName}/customers/{customerName}
For Legacy EA -
/billingAccounts/{billingAccountName}/enrollmentAccounts/{enrollmentAccountName}.
:paramtype billing_scope: str
:keyword subscription_id: This parameter can be used to create alias for existing subscription
Id.
:paramtype subscription_id: str
:keyword reseller_id: Reseller Id.
:paramtype reseller_id: str
:keyword additional_properties: Put alias request additional properties.
:paramtype additional_properties:
~azure.mgmt.subscription.models.PutAliasRequestAdditionalProperties
"""
super(PutAliasRequestProperties, self).__init__(**kwargs)
self.display_name = display_name
self.workload = workload
self.billing_scope = billing_scope
self.subscription_id = subscription_id
self.reseller_id = reseller_id
self.additional_properties = additional_properties
class PutTenantPolicyRequestProperties(msrest.serialization.Model):
"""Put tenant policy request properties.
:ivar block_subscriptions_leaving_tenant: Blocks the leaving of subscriptions from user's
tenant.
:vartype block_subscriptions_leaving_tenant: bool
:ivar block_subscriptions_into_tenant: Blocks the entering of subscriptions into user's tenant.
:vartype block_subscriptions_into_tenant: bool
:ivar exempted_principals: List of user objectIds that are exempted from the set subscription
tenant policies for the user's tenant.
:vartype exempted_principals: list[str]
"""
_attribute_map = {
'block_subscriptions_leaving_tenant': {'key': 'blockSubscriptionsLeavingTenant', 'type': 'bool'},
'block_subscriptions_into_tenant': {'key': 'blockSubscriptionsIntoTenant', 'type': 'bool'},
'exempted_principals': {'key': 'exemptedPrincipals', 'type': '[str]'},
}
def __init__(
self,
*,
block_subscriptions_leaving_tenant: Optional[bool] = None,
block_subscriptions_into_tenant: Optional[bool] = None,
exempted_principals: Optional[List[str]] = None,
**kwargs
):
"""
:keyword block_subscriptions_leaving_tenant: Blocks the leaving of subscriptions from user's
tenant.
:paramtype block_subscriptions_leaving_tenant: bool
:keyword block_subscriptions_into_tenant: Blocks the entering of subscriptions into user's
tenant.
:paramtype block_subscriptions_into_tenant: bool
:keyword exempted_principals: List of user objectIds that are exempted from the set
subscription tenant policies for the user's tenant.
:paramtype exempted_principals: list[str]
"""
super(PutTenantPolicyRequestProperties, self).__init__(**kwargs)
self.block_subscriptions_leaving_tenant = block_subscriptions_leaving_tenant
self.block_subscriptions_into_tenant = block_subscriptions_into_tenant
self.exempted_principals = exempted_principals
class RenamedSubscriptionId(msrest.serialization.Model):
"""The ID of the subscriptions that is being renamed.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subscription_id: The ID of the subscriptions that is being renamed.
:vartype subscription_id: str
"""
_validation = {
'subscription_id': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RenamedSubscriptionId, self).__init__(**kwargs)
self.subscription_id = None
class ServiceTenantResponse(msrest.serialization.Model):
"""Billing account service tenant.
:ivar tenant_id: Service tenant id.
:vartype tenant_id: str
:ivar tenant_name: Service tenant name.
:vartype tenant_name: str
"""
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'tenant_name': {'key': 'tenantName', 'type': 'str'},
}
def __init__(
self,
*,
tenant_id: Optional[str] = None,
tenant_name: Optional[str] = None,
**kwargs
):
"""
:keyword tenant_id: Service tenant id.
:paramtype tenant_id: str
:keyword tenant_name: Service tenant name.
:paramtype tenant_name: str
"""
super(ServiceTenantResponse, self).__init__(**kwargs)
self.tenant_id = tenant_id
self.tenant_name = tenant_name
class Subscription(msrest.serialization.Model):
"""Subscription information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The fully qualified ID for the subscription. For example,
/subscriptions/00000000-0000-0000-0000-000000000000.
:vartype id: str
:ivar subscription_id: The subscription ID.
:vartype subscription_id: str
:ivar display_name: The subscription display name.
:vartype display_name: str
:ivar state: The subscription state. Possible values are Enabled, Warned, PastDue, Disabled,
and Deleted. Possible values include: "Enabled", "Warned", "PastDue", "Disabled", "Deleted".
:vartype state: str or ~azure.mgmt.subscription.models.SubscriptionState
:ivar subscription_policies: The subscription policies.
:vartype subscription_policies: ~azure.mgmt.subscription.models.SubscriptionPolicies
:ivar authorization_source: The authorization source of the request. Valid values are one or
more combinations of Legacy, RoleBased, Bypassed, Direct and Management. For example, 'Legacy,
RoleBased'.
:vartype authorization_source: str
"""
_validation = {
'id': {'readonly': True},
'subscription_id': {'readonly': True},
'display_name': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'subscription_policies': {'key': 'subscriptionPolicies', 'type': 'SubscriptionPolicies'},
'authorization_source': {'key': 'authorizationSource', 'type': 'str'},
}
def __init__(
self,
*,
subscription_policies: Optional["SubscriptionPolicies"] = None,
authorization_source: Optional[str] = None,
**kwargs
):
"""
:keyword subscription_policies: The subscription policies.
:paramtype subscription_policies: ~azure.mgmt.subscription.models.SubscriptionPolicies
:keyword authorization_source: The authorization source of the request. Valid values are one or
more combinations of Legacy, RoleBased, Bypassed, Direct and Management. For example, 'Legacy,
RoleBased'.
:paramtype authorization_source: str
"""
super(Subscription, self).__init__(**kwargs)
self.id = None
self.subscription_id = None
self.display_name = None
self.state = None
self.subscription_policies = subscription_policies
self.authorization_source = authorization_source
class SubscriptionAliasListResult(msrest.serialization.Model):
"""The list of aliases.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of alias.
:vartype value: list[~azure.mgmt.subscription.models.SubscriptionAliasResponse]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SubscriptionAliasResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SubscriptionAliasListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SubscriptionAliasResponse(msrest.serialization.Model):
"""Subscription Information with the alias.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified ID for the alias resource.
:vartype id: str
:ivar name: Alias ID.
:vartype name: str
:ivar type: Resource type, Microsoft.Subscription/aliases.
:vartype type: str
:ivar properties: Subscription Alias response properties.
:vartype properties: ~azure.mgmt.subscription.models.SubscriptionAliasResponseProperties
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.subscription.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'SubscriptionAliasResponseProperties'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
properties: Optional["SubscriptionAliasResponseProperties"] = None,
**kwargs
):
"""
:keyword properties: Subscription Alias response properties.
:paramtype properties: ~azure.mgmt.subscription.models.SubscriptionAliasResponseProperties
"""
super(SubscriptionAliasResponse, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
self.system_data = None
class SubscriptionAliasResponseProperties(msrest.serialization.Model):
"""Put subscription creation result properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subscription_id: Newly created subscription Id.
:vartype subscription_id: str
:ivar display_name: The display name of the subscription.
:vartype display_name: str
:ivar provisioning_state: The provisioning state of the resource. Possible values include:
"Accepted", "Succeeded", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.subscription.models.ProvisioningState
:ivar accept_ownership_url: Url to accept ownership of the subscription.
:vartype accept_ownership_url: str
:ivar accept_ownership_state: The accept ownership state of the resource. Possible values
include: "Pending", "Completed", "Expired".
:vartype accept_ownership_state: str or ~azure.mgmt.subscription.models.AcceptOwnership
:ivar billing_scope: Billing scope of the subscription.
For CustomerLed and FieldLed -
/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}
For PartnerLed - /billingAccounts/{billingAccountName}/customers/{customerName}
For Legacy EA -
/billingAccounts/{billingAccountName}/enrollmentAccounts/{enrollmentAccountName}.
:vartype billing_scope: str
:ivar workload: The workload type of the subscription. It can be either Production or DevTest.
Possible values include: "Production", "DevTest".
:vartype workload: str or ~azure.mgmt.subscription.models.Workload
:ivar reseller_id: Reseller Id.
:vartype reseller_id: str
:ivar subscription_owner_id: Owner Id of the subscription.
:vartype subscription_owner_id: str
:ivar management_group_id: The Management Group Id.
:vartype management_group_id: str
:ivar created_time: Created Time.
:vartype created_time: str
:ivar tags: A set of tags. Tags for the subscription.
:vartype tags: dict[str, str]
"""
_validation = {
'subscription_id': {'readonly': True},
'accept_ownership_url': {'readonly': True},
'accept_ownership_state': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'accept_ownership_url': {'key': 'acceptOwnershipUrl', 'type': 'str'},
'accept_ownership_state': {'key': 'acceptOwnershipState', 'type': 'str'},
'billing_scope': {'key': 'billingScope', 'type': 'str'},
'workload': {'key': 'workload', 'type': 'str'},
'reseller_id': {'key': 'resellerId', 'type': 'str'},
'subscription_owner_id': {'key': 'subscriptionOwnerId', 'type': 'str'},
'management_group_id': {'key': 'managementGroupId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
display_name: Optional[str] = None,
provisioning_state: Optional[Union[str, "ProvisioningState"]] = None,
billing_scope: Optional[str] = None,
workload: Optional[Union[str, "Workload"]] = None,
reseller_id: Optional[str] = None,
subscription_owner_id: Optional[str] = None,
management_group_id: Optional[str] = None,
created_time: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword display_name: The display name of the subscription.
:paramtype display_name: str
:keyword provisioning_state: The provisioning state of the resource. Possible values include:
"Accepted", "Succeeded", "Failed".
:paramtype provisioning_state: str or ~azure.mgmt.subscription.models.ProvisioningState
:keyword billing_scope: Billing scope of the subscription.
For CustomerLed and FieldLed -
/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoiceSections/{invoiceSectionName}
For PartnerLed - /billingAccounts/{billingAccountName}/customers/{customerName}
For Legacy EA -
/billingAccounts/{billingAccountName}/enrollmentAccounts/{enrollmentAccountName}.
:paramtype billing_scope: str
:keyword workload: The workload type of the subscription. It can be either Production or
DevTest. Possible values include: "Production", "DevTest".
:paramtype workload: str or ~azure.mgmt.subscription.models.Workload
:keyword reseller_id: Reseller Id.
:paramtype reseller_id: str
:keyword subscription_owner_id: Owner Id of the subscription.
:paramtype subscription_owner_id: str
:keyword management_group_id: The Management Group Id.
:paramtype management_group_id: str
:keyword created_time: Created Time.
:paramtype created_time: str
:keyword tags: A set of tags. Tags for the subscription.
:paramtype tags: dict[str, str]
"""
super(SubscriptionAliasResponseProperties, self).__init__(**kwargs)
self.subscription_id = None
self.display_name = display_name
self.provisioning_state = provisioning_state
self.accept_ownership_url = None
self.accept_ownership_state = None
self.billing_scope = billing_scope
self.workload = workload
self.reseller_id = reseller_id
self.subscription_owner_id = subscription_owner_id
self.management_group_id = management_group_id
self.created_time = created_time
self.tags = tags
class SubscriptionListResult(msrest.serialization.Model):
"""Subscription list operation response.
All required parameters must be populated in order to send to Azure.
:ivar value: An array of subscriptions.
:vartype value: list[~azure.mgmt.subscription.models.Subscription]
:ivar next_link: Required. The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Subscription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: str,
value: Optional[List["Subscription"]] = None,
**kwargs
):
"""
:keyword value: An array of subscriptions.
:paramtype value: list[~azure.mgmt.subscription.models.Subscription]
:keyword next_link: Required. The URL to get the next set of results.
:paramtype next_link: str
"""
super(SubscriptionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SubscriptionName(msrest.serialization.Model):
"""The new name of the subscription.
:ivar subscription_name: New subscription name.
:vartype subscription_name: str
"""
_attribute_map = {
'subscription_name': {'key': 'subscriptionName', 'type': 'str'},
}
def __init__(
self,
*,
subscription_name: Optional[str] = None,
**kwargs
):
"""
:keyword subscription_name: New subscription name.
:paramtype subscription_name: str
"""
super(SubscriptionName, self).__init__(**kwargs)
self.subscription_name = subscription_name
class SubscriptionPolicies(msrest.serialization.Model):
"""Subscription policies.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location_placement_id: The subscription location placement ID. The ID indicates which
regions are visible for a subscription. For example, a subscription with a location placement
Id of Public_2014-09-01 has access to Azure public regions.
:vartype location_placement_id: str
:ivar quota_id: The subscription quota ID.
:vartype quota_id: str
:ivar spending_limit: The subscription spending limit. Possible values include: "On", "Off",
"CurrentPeriodOff".
:vartype spending_limit: str or ~azure.mgmt.subscription.models.SpendingLimit
"""
_validation = {
'location_placement_id': {'readonly': True},
'quota_id': {'readonly': True},
'spending_limit': {'readonly': True},
}
_attribute_map = {
'location_placement_id': {'key': 'locationPlacementId', 'type': 'str'},
'quota_id': {'key': 'quotaId', 'type': 'str'},
'spending_limit': {'key': 'spendingLimit', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(SubscriptionPolicies, self).__init__(**kwargs)
self.location_placement_id = None
self.quota_id = None
self.spending_limit = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Possible values include:
"User", "Application", "ManagedIdentity", "Key".
:vartype created_by_type: str or ~azure.mgmt.subscription.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.subscription.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:paramtype created_by_type: str or ~azure.mgmt.subscription.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:paramtype last_modified_by_type: str or ~azure.mgmt.subscription.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class TenantIdDescription(msrest.serialization.Model):
"""Tenant Id information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The fully qualified ID of the tenant. For example,
/tenants/00000000-0000-0000-0000-000000000000.
:vartype id: str
:ivar tenant_id: The tenant ID. For example, 00000000-0000-0000-0000-000000000000.
:vartype tenant_id: str
"""
_validation = {
'id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(TenantIdDescription, self).__init__(**kwargs)
self.id = None
self.tenant_id = None
class TenantListResult(msrest.serialization.Model):
"""Tenant Ids information.
All required parameters must be populated in order to send to Azure.
:ivar value: An array of tenants.
:vartype value: list[~azure.mgmt.subscription.models.TenantIdDescription]
:ivar next_link: Required. The URL to use for getting the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TenantIdDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: str,
value: Optional[List["TenantIdDescription"]] = None,
**kwargs
):
"""
:keyword value: An array of tenants.
:paramtype value: list[~azure.mgmt.subscription.models.TenantIdDescription]
:keyword next_link: Required. The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super(TenantListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class TenantPolicy(msrest.serialization.Model):
"""Tenant policy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar policy_id: Policy Id.
:vartype policy_id: str
:ivar block_subscriptions_leaving_tenant: Blocks the leaving of subscriptions from user's
tenant.
:vartype block_subscriptions_leaving_tenant: bool
:ivar block_subscriptions_into_tenant: Blocks the entering of subscriptions into user's tenant.
:vartype block_subscriptions_into_tenant: bool
:ivar exempted_principals: List of user objectIds that are exempted from the set subscription
tenant policies for the user's tenant.
:vartype exempted_principals: list[str]
"""
_validation = {
'policy_id': {'readonly': True},
}
_attribute_map = {
'policy_id': {'key': 'policyId', 'type': 'str'},
'block_subscriptions_leaving_tenant': {'key': 'blockSubscriptionsLeavingTenant', 'type': 'bool'},
'block_subscriptions_into_tenant': {'key': 'blockSubscriptionsIntoTenant', 'type': 'bool'},
'exempted_principals': {'key': 'exemptedPrincipals', 'type': '[str]'},
}
def __init__(
self,
*,
block_subscriptions_leaving_tenant: Optional[bool] = None,
block_subscriptions_into_tenant: Optional[bool] = None,
exempted_principals: Optional[List[str]] = None,
**kwargs
):
"""
:keyword block_subscriptions_leaving_tenant: Blocks the leaving of subscriptions from user's
tenant.
:paramtype block_subscriptions_leaving_tenant: bool
:keyword block_subscriptions_into_tenant: Blocks the entering of subscriptions into user's
tenant.
:paramtype block_subscriptions_into_tenant: bool
:keyword exempted_principals: List of user objectIds that are exempted from the set
subscription tenant policies for the user's tenant.
:paramtype exempted_principals: list[str]
"""
super(TenantPolicy, self).__init__(**kwargs)
self.policy_id = None
self.block_subscriptions_leaving_tenant = block_subscriptions_leaving_tenant
self.block_subscriptions_into_tenant = block_subscriptions_into_tenant
self.exempted_principals = exempted_principals
| |
# Copyright 2016 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from datetime import timedelta
import mock
from neutron.tests.unit import testlib_api
from neutron_lib import context
from oslo_db import exception
from networking_mlnx.db import db
from networking_mlnx.db.models import sdn_journal_db
from networking_mlnx.db.models import sdn_maintenance_db
from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const
class DbTestCase(testlib_api.SqlTestCaseLight):
UPDATE_ROW = [sdn_const.NETWORK, 'id', sdn_const.PUT,
{'test': 'data'}]
def setUp(self):
super(DbTestCase, self).setUp()
self.db_context = context.get_admin_context()
self.db_session = self.db_context.session
self.addCleanup(self._db_cleanup)
def _db_cleanup(self):
self.db_session.query(sdn_journal_db.SdnJournal).delete()
def _update_row(self, row):
self.db_session.merge(row)
self.db_session.flush()
def _test_validate_updates(self, rows, time_deltas, expected_validations):
for row in rows:
db.create_pending_row(self.db_session, *row)
# update row created_at
rows = db.get_all_db_rows(self.db_session)
now = datetime.now()
for row, time_delta in zip(rows, time_deltas):
row.created_at = now - timedelta(hours=time_delta)
self._update_row(row)
# validate if there are older rows
for row, expected_valid in zip(rows, expected_validations):
valid = not db.check_for_older_ops(self.db_session, row)
self.assertEqual(expected_valid, valid)
def _test_retry_count(self, retry_num, max_retry,
expected_retry_count, expected_state):
# add new pending row
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
# update the row with the requested retry_num
row = db.get_all_db_rows(self.db_session)[0]
row.retry_count = retry_num - 1
db.update_pending_db_row_retry(self.db_session, row, max_retry)
# validate the state and the retry_count of the row
row = db.get_all_db_rows(self.db_session)[0]
self.assertEqual(expected_state, row.state)
self.assertEqual(expected_retry_count, row.retry_count)
def _test_update_row_state(self, from_state, to_state):
# add new pending row
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
row = db.get_all_db_rows(self.db_session)[0]
for state in [from_state, to_state]:
# update the row state
db.update_db_row_state(self.db_session, row, state)
# validate the new state
row = db.get_all_db_rows(self.db_session)[0]
self.assertEqual(state, row.state)
def test_validate_updates_same_object_uuid(self):
self._test_validate_updates(
[self.UPDATE_ROW, self.UPDATE_ROW], [1, 0], [True, False])
def test_validate_updates_same_created_time(self):
self._test_validate_updates(
[self.UPDATE_ROW, self.UPDATE_ROW], [0, 0], [True, True])
def test_validate_updates_different_object_uuid(self):
other_row = list(self.UPDATE_ROW)
other_row[1] += 'a'
self._test_validate_updates(
[self.UPDATE_ROW, other_row], [1, 0], [True, True])
def test_validate_updates_different_object_type(self):
other_row = list(self.UPDATE_ROW)
other_row[0] = sdn_const.PORT
other_row[1] += 'a'
self._test_validate_updates(
[self.UPDATE_ROW, other_row], [1, 0], [True, True])
def test_get_oldest_pending_row_none_when_no_rows(self):
row = db.get_oldest_pending_db_row_with_lock(self.db_session)
self.assertIsNone(row)
def _test_get_oldest_pending_row_none(self, state):
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
row = db.get_all_db_rows(self.db_session)[0]
row.state = state
self._update_row(row)
row = db.get_oldest_pending_db_row_with_lock(self.db_session)
self.assertIsNone(row)
def test_get_oldest_pending_row_none_when_row_processing(self):
self._test_get_oldest_pending_row_none(sdn_const.PROCESSING)
def test_get_oldest_pending_row_none_when_row_failed(self):
self._test_get_oldest_pending_row_none(sdn_const.FAILED)
def test_get_oldest_pending_row_none_when_row_completed(self):
self._test_get_oldest_pending_row_none(sdn_const.COMPLETED)
def test_get_oldest_pending_row_none_when_row_monitoring(self):
self._test_get_oldest_pending_row_none(sdn_const.MONITORING)
def test_get_oldest_pending_row(self):
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
row = db.get_oldest_pending_db_row_with_lock(self.db_session)
self.assertIsNotNone(row)
self.assertEqual(sdn_const.PROCESSING, row.state)
def test_get_oldest_pending_row_order(self):
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
older_row = db.get_all_db_rows(self.db_session)[0]
older_row.last_retried -= timedelta(minutes=1)
self._update_row(older_row)
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
row = db.get_oldest_pending_db_row_with_lock(self.db_session)
self.assertEqual(older_row, row)
def test_get_all_monitoring_db_row_by_oldest_order(self):
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
older_row = db.get_all_db_rows(self.db_session)[1]
older_row.last_retried -= timedelta(minutes=1)
older_row.state = sdn_const.MONITORING
self._update_row(older_row)
newer_row = db.get_all_db_rows(self.db_session)[0]
newer_row.state = sdn_const.MONITORING
self._update_row(newer_row)
rows = db.get_all_monitoring_db_row_by_oldest(self.db_session)
self.assertEqual(older_row, rows[0])
self.assertEqual(newer_row, rows[1])
def test_get_oldest_pending_row_when_deadlock(self):
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
update_mock = (
mock.MagicMock(side_effect=(exception.DBDeadlock, mock.DEFAULT)))
# Mocking is mandatory to achieve a deadlock regardless of the DB
# backend being used when running the tests
with mock.patch.object(db, 'update_db_row_state', new=update_mock):
row = db.get_oldest_pending_db_row_with_lock(self.db_session)
self.assertIsNotNone(row)
self.assertEqual(2, update_mock.call_count)
def _test_delete_rows_by_state_and_time(self, last_retried, row_retention,
state, expected_rows):
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
# update state and last retried
row = db.get_all_db_rows(self.db_session)[0]
row.state = state
row.last_retried = row.last_retried - timedelta(seconds=last_retried)
self._update_row(row)
db.delete_rows_by_state_and_time(self.db_session,
sdn_const.COMPLETED,
timedelta(seconds=row_retention))
# validate the number of rows in the journal
rows = db.get_all_db_rows(self.db_session)
self.assertEqual(expected_rows, len(rows))
def test_delete_completed_rows_no_new_rows(self):
self._test_delete_rows_by_state_and_time(0, 10, sdn_const.COMPLETED, 1)
def test_delete_completed_rows_one_new_row(self):
self._test_delete_rows_by_state_and_time(6, 5, sdn_const.COMPLETED, 0)
def test_delete_completed_rows_wrong_state(self):
self._test_delete_rows_by_state_and_time(10, 8, sdn_const.PENDING, 1)
def test_valid_retry_count(self):
self._test_retry_count(1, 1, 1, sdn_const.PENDING)
def test_invalid_retry_count(self):
self._test_retry_count(2, 1, 1, sdn_const.FAILED)
def test_update_row_state_to_pending(self):
self._test_update_row_state(sdn_const.PROCESSING, sdn_const.PENDING)
def test_update_row_state_to_processing(self):
self._test_update_row_state(sdn_const.PENDING, sdn_const.PROCESSING)
def test_update_row_state_to_failed(self):
self._test_update_row_state(sdn_const.PROCESSING, sdn_const.FAILED)
def test_update_row_state_to_monitoring(self):
self._test_update_row_state(sdn_const.PROCESSING, sdn_const.MONITORING)
def test_update_row_state_to_completed(self):
self._test_update_row_state(sdn_const.PROCESSING, sdn_const.COMPLETED)
def test_update_row_job_id(self):
# add new pending row
expected_job_id = 'job_id'
db.create_pending_row(self.db_session, *self.UPDATE_ROW)
row = db.get_all_db_rows(self.db_session)[0]
db.update_db_row_job_id(self.db_session, row, expected_job_id)
row = db.get_all_db_rows(self.db_session)[0]
self.assertEqual(expected_job_id, row.job_id)
def _test_maintenance_lock_unlock(self, db_func, existing_state,
expected_state, expected_result):
row = sdn_maintenance_db.SdnMaintenance(id='test',
state=existing_state)
self.db_session.add(row)
self.db_session.flush()
self.assertEqual(expected_result, db_func(self.db_session))
row = self.db_session.query(sdn_maintenance_db.SdnMaintenance).one()
self.assertEqual(expected_state, row['state'])
def test_lock_maintenance(self):
self._test_maintenance_lock_unlock(db.lock_maintenance,
sdn_const.PENDING,
sdn_const.PROCESSING,
True)
def test_lock_maintenance_fails_when_processing(self):
self._test_maintenance_lock_unlock(db.lock_maintenance,
sdn_const.PROCESSING,
sdn_const.PROCESSING,
False)
def test_unlock_maintenance(self):
self._test_maintenance_lock_unlock(db.unlock_maintenance,
sdn_const.PROCESSING,
sdn_const.PENDING,
True)
def test_unlock_maintenance_fails_when_pending(self):
self._test_maintenance_lock_unlock(db.unlock_maintenance,
sdn_const.PENDING,
sdn_const.PENDING,
False)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
import jsonpickle
from cairis.core.Vulnerability import Vulnerability
from cairis.core.VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
import os
from cairis.mio.ModelImport import importModelFile
__author__ = 'Robin Quetin, Shamal Faily'
class VulnerabilityAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
self.logger = logging.getLogger(__name__)
self.existing_vulnerability_name = 'Replay vulnerability'
self.existing_environment_name = 'Stroke'
self.existing_asset_names = ['Clinical data', 'Data node']
self.vulnerability_class = Vulnerability.__module__+'.'+Vulnerability.__name__
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/vulnerabilities?session_id=test')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
vulnerabilities = jsonpickle.decode(responseData)
self.assertIsNotNone(vulnerabilities, 'No results after deserialization')
self.assertIsInstance(vulnerabilities, list, 'The result is not a list as expected')
self.assertGreater(len(vulnerabilities), 0, 'No vulnerabilities in the dictionary')
self.logger.info('[%s] Vulnerabilities found: %d', method, len(vulnerabilities))
vulnerability = vulnerabilities[0]
self.logger.info('[%s] First vulnerability: %s\n', method, vulnerability['theName'])
def test_get_by_name(self):
method = 'test_get_by_name'
url = '/api/vulnerabilities/name/%s?session_id=test' % quote(self.existing_vulnerability_name)
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
vulnerability = jsonpickle.decode(responseData)
self.assertIsNotNone(vulnerability, 'No results after deserialization')
self.logger.info('[%s] Vulnerability: %s\n', method, vulnerability['theName'])
def test_delete(self):
method = 'test_delete'
url = '/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theName)
new_vulnerability_body = self.prepare_json()
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_vulnerability_body)
self.app.post('/api/vulnerabilities', content_type='application/json', data=new_vulnerability_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.info('[%s] Response data: %s', method, responseData)
self.assertIsNotNone(responseData, 'No response')
json_resp = jsonpickle.decode(responseData)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s\n', method, message)
def test_post(self):
method = 'test_post'
url = '/api/vulnerabilities'
self.logger.info('[%s] URL: %s', method, url)
new_vulnerability_body = self.prepare_json()
self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theName))
rv = self.app.post(url, content_type='application/json', data=new_vulnerability_body)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
msg = json_resp.get('message', None)
self.assertIsNotNone(msg, 'No message returned')
self.logger.info('[%s] Message: %s\n', method, msg)
rv = self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theName))
def test_put(self):
method = 'test_put'
url = '/api/vulnerabilities'
self.logger.info('[%s] URL: %s', method, url)
new_vulnerability_body = self.prepare_json()
rv = self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theName))
rv = self.app.post(url, content_type='application/json', data=new_vulnerability_body)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
msg = json_resp.get('message', None)
self.assertIsNotNone(msg, 'No message returned')
self.logger.info('[%s] Message: %s', method, msg)
vulnerability_to_update = self.prepare_new_vulnerability()
vulnerability_to_update.theName = 'Edited test vulnerability'
upd_env_body = self.prepare_json(vulnerability=vulnerability_to_update)
rv = self.app.put('/api/vulnerabilities/name/%s?session_id=test' % quote(self.prepare_new_vulnerability().theName), data=upd_env_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
json_resp = jsonpickle.decode(responseData)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertGreater(message.find('updated'), -1, 'The vulnerability was not successfully updated')
rv = self.app.get('/api/vulnerabilities/name/%s?session_id=test' % quote(vulnerability_to_update.theName))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
upd_vulnerability = jsonpickle.decode(responseData)
self.assertIsNotNone(upd_vulnerability, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, responseData)
self.logger.info('[%s] Vulnerability: %s\n', method, upd_vulnerability['theName'])
rv = self.app.delete('/api/vulnerabilities/name/%s?session_id=test' % quote(vulnerability_to_update.theName))
def prepare_new_vulnerability(self):
new_vulnerability_prop = VulnerabilityEnvironmentProperties(
environmentName=self.existing_environment_name,
severity='Critical',
assets=self.existing_asset_names
)
new_vulnerability = Vulnerability(
vulId=-1,
vulName='Test Vulnerability',
vulDesc='This is a test vulnerability',
vulType='Design',
tags=[],
cProps=[new_vulnerability_prop]
)
new_vulnerability.theEnvironmentDictionary = {}
return new_vulnerability
def prepare_dict(self, vulnerability=None):
if vulnerability is None:
vulnerability = self.prepare_new_vulnerability()
else:
assert isinstance(vulnerability, Vulnerability)
return {
'session_id': 'test',
'object': vulnerability,
}
def prepare_json(self, data_dict=None, vulnerability=None):
if data_dict is None:
data_dict = self.prepare_dict(vulnerability=vulnerability)
else:
assert isinstance(data_dict, dict)
new_vulnerability_body = jsonpickle.encode(data_dict)
self.logger.info('JSON data: %s', new_vulnerability_body)
return new_vulnerability_body
| |
from PyQt4.QtGui import QGraphicsView, QPainter
from ..scene import CanvasScene
from .. import items
from ... import scheme
from ...gui.test import QAppTestCase
class TestScene(QAppTestCase):
def setUp(self):
QAppTestCase.setUp(self)
self.scene = CanvasScene()
self.view = QGraphicsView(self.scene)
self.view.setRenderHints(QPainter.Antialiasing | \
QPainter.TextAntialiasing)
self.view.show()
self.view.resize(400, 300)
def test_scene(self):
"""Test basic scene functionality.
"""
file_desc, disc_desc, bayes_desc = self.widget_desc()
file_item = items.NodeItem(file_desc)
disc_item = items.NodeItem(disc_desc)
bayes_item = items.NodeItem(bayes_desc)
file_item = self.scene.add_node_item(file_item)
disc_item = self.scene.add_node_item(disc_item)
bayes_item = self.scene.add_node_item(bayes_item)
# Remove a node
self.scene.remove_node_item(bayes_item)
self.assertSequenceEqual(self.scene.node_items(),
[file_item, disc_item])
# And add it again
self.scene.add_node_item(bayes_item)
self.assertSequenceEqual(self.scene.node_items(),
[file_item, disc_item, bayes_item])
# Adding the same item again should raise an exception
with self.assertRaises(ValueError):
self.scene.add_node_item(bayes_item)
# Add links
link1 = self.scene.new_link_item(file_item, "Data", disc_item, "Data")
link2 = self.scene.new_link_item(disc_item, "Data", bayes_item, "Data")
link1a = self.scene.add_link_item(link1)
link2a = self.scene.add_link_item(link2)
self.assertEqual(link1, link1a)
self.assertEqual(link2, link2a)
self.assertSequenceEqual(self.scene.link_items(), [link1, link2])
# Remove links
self.scene.remove_link_item(link2)
self.scene.remove_link_item(link1)
self.assertSequenceEqual(self.scene.link_items(), [])
self.assertTrue(link1.sourceItem is None and link1.sinkItem is None)
self.assertTrue(link2.sourceItem is None and link2.sinkItem is None)
self.assertSequenceEqual(file_item.outputAnchors(), [])
self.assertSequenceEqual(disc_item.inputAnchors(), [])
self.assertSequenceEqual(disc_item.outputAnchors(), [])
self.assertSequenceEqual(bayes_item.outputAnchors(), [])
# And add one link again
link1 = self.scene.new_link_item(file_item, "Data", disc_item, "Data")
link1 = self.scene.add_link_item(link1)
self.assertSequenceEqual(self.scene.link_items(), [link1])
self.assertTrue(file_item.outputAnchors())
self.assertTrue(disc_item.inputAnchors())
self.app.exec_()
def test_scene_with_scheme(self):
"""Test scene through modifying the scheme.
"""
test_scheme = scheme.Scheme()
self.scene.set_scheme(test_scheme)
node_items = []
link_items = []
self.scene.node_item_added.connect(node_items.append)
self.scene.node_item_removed.connect(node_items.remove)
self.scene.link_item_added.connect(link_items.append)
self.scene.link_item_removed.connect(link_items.remove)
file_desc, disc_desc, bayes_desc = self.widget_desc()
file_node = scheme.SchemeNode(file_desc)
disc_node = scheme.SchemeNode(disc_desc)
bayes_node = scheme.SchemeNode(bayes_desc)
nodes = [file_node, disc_node, bayes_node]
test_scheme.add_node(file_node)
test_scheme.add_node(disc_node)
test_scheme.add_node(bayes_node)
self.assertTrue(len(self.scene.node_items()) == 3)
self.assertSequenceEqual(self.scene.node_items(), node_items)
for node, item in zip(nodes, node_items):
self.assertIs(item, self.scene.item_for_node(node))
# Remove a widget
test_scheme.remove_node(bayes_node)
self.assertTrue(len(self.scene.node_items()) == 2)
self.assertSequenceEqual(self.scene.node_items(), node_items)
# And add it again
test_scheme.add_node(bayes_node)
self.assertTrue(len(self.scene.node_items()) == 3)
self.assertSequenceEqual(self.scene.node_items(), node_items)
# Add links
link1 = test_scheme.new_link(file_node, "Data", disc_node, "Data")
link2 = test_scheme.new_link(disc_node, "Data", bayes_node, "Data")
self.assertTrue(len(self.scene.link_items()) == 2)
self.assertSequenceEqual(self.scene.link_items(), link_items)
# Remove links
test_scheme.remove_link(link1)
test_scheme.remove_link(link2)
self.assertTrue(len(self.scene.link_items()) == 0)
self.assertSequenceEqual(self.scene.link_items(), link_items)
# And add one link again
test_scheme.add_link(link1)
self.assertTrue(len(self.scene.link_items()) == 1)
self.assertSequenceEqual(self.scene.link_items(), link_items)
self.app.exec_()
def test_scheme_construction(self):
"""Test construction (editing) of the scheme through the scene.
"""
test_scheme = scheme.Scheme()
self.scene.set_scheme(test_scheme)
node_items = []
link_items = []
self.scene.node_item_added.connect(node_items.append)
self.scene.node_item_removed.connect(node_items.remove)
self.scene.link_item_added.connect(link_items.append)
self.scene.link_item_removed.connect(link_items.remove)
file_desc, disc_desc, bayes_desc = self.widget_desc()
file_node = scheme.SchemeNode(file_desc)
file_item = self.scene.add_node(file_node)
self.scene.commit_scheme_node(file_node)
self.assertSequenceEqual(self.scene.node_items(), [file_item])
self.assertSequenceEqual(node_items, [file_item])
self.assertSequenceEqual(test_scheme.nodes, [file_node])
disc_node = scheme.SchemeNode(disc_desc)
bayes_node = scheme.SchemeNode(bayes_desc)
disc_item = self.scene.add_node(disc_node)
bayes_item = self.scene.add_node(bayes_node)
self.assertSequenceEqual(self.scene.node_items(),
[file_item, disc_item, bayes_item])
self.assertSequenceEqual(self.scene.node_items(), node_items)
# The scheme is still the same.
self.assertSequenceEqual(test_scheme.nodes, [file_node])
# Remove items
self.scene.remove_node(disc_node)
self.scene.remove_node(bayes_node)
self.assertSequenceEqual(self.scene.node_items(), [file_item])
self.assertSequenceEqual(node_items, [file_item])
self.assertSequenceEqual(test_scheme.nodes, [file_node])
# Add them again this time also in the scheme.
disc_item = self.scene.add_node(disc_node)
bayes_item = self.scene.add_node(bayes_node)
self.scene.commit_scheme_node(disc_node)
self.scene.commit_scheme_node(bayes_node)
self.assertSequenceEqual(self.scene.node_items(),
[file_item, disc_item, bayes_item])
self.assertSequenceEqual(self.scene.node_items(), node_items)
self.assertSequenceEqual(test_scheme.nodes,
[file_node, disc_node, bayes_node])
link1 = scheme.SchemeLink(file_node, "Data", disc_node, "Data")
link2 = scheme.SchemeLink(disc_node, "Data", bayes_node, "Data")
link_item1 = self.scene.add_link(link1)
link_item2 = self.scene.add_link(link2)
self.assertSequenceEqual(self.scene.link_items(),
[link_item1, link_item2])
self.assertSequenceEqual(self.scene.link_items(), link_items)
self.assertSequenceEqual(test_scheme.links, [])
# Commit the links
self.scene.commit_scheme_link(link1)
self.scene.commit_scheme_link(link2)
self.assertSequenceEqual(self.scene.link_items(),
[link_item1, link_item2])
self.assertSequenceEqual(self.scene.link_items(), link_items)
self.assertSequenceEqual(test_scheme.links,
[link1, link2])
self.app.exec_()
def widget_desc(self):
from ...registry.tests import small_testing_registry
reg = small_testing_registry()
file_desc = reg.widget(
"Orange.widgets.data.owfile.OWFile"
)
discretize_desc = reg.widget(
"Orange.widgets.data.owdiscretize.OWDiscretize"
)
bayes_desc = reg.widget(
"Orange.widgets.classify.ownaivebayes.OWNaiveBayes"
)
return file_desc, discretize_desc, bayes_desc
| |
# -*- coding: utf-8 -*-
"""
Handles the queries.
"""
# redisco imports
import redisco
from redisco.containers import SortedSet, Set, List
from redisco.models.exceptions import AttributeNotIndexed
# Model Set
class ModelSet(Set):
def __init__(self, model_class):
self.model_class = model_class
self.key = model_class._all_key
# We access directly _meta as .db is a property and should be
# access from an instance, not a Class
self._db = model_class._meta['db'] or redisco.get_client()
self._filters = {}
self._exclusions = {}
self._zfilters = []
self._ordering = []
self._limit = None
self._offset = None
self._return_values = False
#################
# MAGIC METHODS #
#################
def __getitem__(self, index):
"""
Will look in _set to get the id and simply return the instance of the model.
"""
if isinstance(index, slice):
pipeline = self._db.pipeline()
for id in self._set[index]:
pipeline.hgetall(self.model_class.instance_key(id))
rawdata = pipeline.execute()
if self._return_values:
return rawdata
else:
instances = []
for id, rd in zip(self._set[index], rawdata):
instances.append(self.model_class.load_from_raw_data(id, rd))
return instances
else:
id = self._set[index]
if id:
return self._get_item_with_id(id)
else:
raise IndexError
def __repr__(self):
if len(self._set) > 30:
m = self._set[:30]
else:
m = self._set
# s = map(lambda id: self._get_item_with_id(id), m)
s = [self._get_item_with_id(id) for id in m]
return "%s" % s
def __iter__(self):
for id in self._set:
yield self._get_item_with_id(id)
def __len__(self):
return len(self._set)
def __contains__(self, val):
return val.id in self._set
##########################################
# METHODS THAT RETURN A SET OF INSTANCES #
##########################################
def get_by_id(self, id):
"""
Returns the object definied by ``id``.
:param id: the ``id`` of the objects to lookup.
:returns: The object instance or None if not found.
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute()
...
>>> f = Foo(name="Einstein")
>>> f.save()
True
>>> Foo.objects.get_by_id(f.id) == f
True
>>> [f.delete() for f in Foo.objects.all()] # doctest: +ELLIPSIS
[...]
"""
if (self._filters or self._exclusions or self._zfilters) and str(id) not in self._set:
return
return self._get_item_with_id(id)
def get(self, **kwargs):
return self.filter(**kwargs).first()
def first(self):
"""
Return the first object of a collections.
:return: The object or Non if the lookup gives no result
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute()
...
>>> f = Foo(name="toto")
>>> f.save()
True
>>> Foo.objects.filter(name="toto").first() # doctest: +ELLIPSIS
<Foo:...>
>>> [f.delete() for f in Foo.objects.all()] # doctest: +ELLIPSIS
[...]
"""
try:
return self.limit(1).__getitem__(0)
except IndexError:
return None
count = __len__
#####################################
# METHODS THAT MODIFY THE MODEL SET #
#####################################
def values(self):
"""
Returns dictionaries rather than model-instance objects.
"""
clone = self._clone()
clone._return_values = True
return clone
def filter(self, **kwargs):
"""
Filter a collection on criteria
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute()
...
>>> Foo(name="toto").save()
True
>>> Foo(name="toto").save()
True
>>> Foo.objects.filter() # doctest: +ELLIPSIS
[<Foo:...>, <Foo:...>]
>>> [f.delete() for f in Foo.objects.all()] # doctest: +ELLIPSIS
[...]
"""
clone = self._clone()
if not clone._filters:
clone._filters = {}
clone._filters.update(kwargs)
return clone
def exclude(self, **kwargs):
"""
Exclude a collection within a lookup.
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute()
... exclude_me = models.BooleanField()
...
>>> Foo(name="Einstein").save()
True
>>> Foo(name="Edison", exclude_me=True).save()
True
>>> Foo.objects.exclude(exclude_me=True).first().name
'Einstein'
>>> [f.delete() for f in Foo.objects.all()] # doctest: +ELLIPSIS
[...]
"""
clone = self._clone()
if not clone._exclusions:
clone._exclusions = {}
clone._exclusions.update(kwargs)
return clone
def zfilter(self, **kwargs):
clone = self._clone()
if not clone._zfilters:
clone._zfilters = []
clone._zfilters.append(kwargs)
return clone
# this should only be called once
def order(self, field):
"""
Enable ordering in collections when doing a lookup.
.. Warning:: This should only be called once per lookup.
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute()
... exclude_me = models.BooleanField()
...
>>> Foo(name="Abba").save()
True
>>> Foo(name="Bbba").save()
True
>>> Foo(name="Zztop").save()
True
>>> Foo.objects.all().order("-name")[0].name
'Zztop'
>>> Foo.objects.all().order("-name").first().name
'Zztop'
>>> Foo.objects.all().order("name")[0].name
'Abba'
>>> Foo.objects.all().order("name").first().name
'Abba'
>>> [f.delete() for f in Foo.objects.all()] # doctest: +ELLIPSIS
[...]
"""
fname = field.lstrip('-')
if fname not in self.model_class._indices:
raise ValueError("Order parameter should be an indexed attribute.")
alpha = True
if fname in self.model_class._attributes:
v = self.model_class._attributes[fname]
alpha = not v.zindexable
clone = self._clone()
if not clone._ordering:
clone._ordering = []
clone._ordering.append((field, alpha,))
return clone
def limit(self, n, offset=0):
"""
Limit the size of the collection to *n* elements.
"""
clone = self._clone()
clone._limit = n
clone._offset = offset
return clone
def create(self, **kwargs):
"""
Create an object of the class.
.. Note:: This is the same as creating an instance of the class and saving it.
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute()
...
>>> Foo.objects.create(name="Obama") # doctest: +ELLIPSIS
<Foo:...>
>>> [f.delete() for f in Foo.objects.all()] # doctest: +ELLIPSIS
[...]
"""
instance = self.model_class(**kwargs)
if instance.save():
return instance
else:
return None
def all(self):
"""
Return all elements of the collection.
"""
return self._clone()
def get_or_create(self, **kwargs):
"""
Return an element of the collection or create it if necessary.
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute()
...
>>> new_obj = Foo.objects.get_or_create(name="Obama")
>>> get_obj = Foo.objects.get_or_create(name="Obama")
>>> new_obj == get_obj
True
>>> [f.delete() for f in Foo.objects.all()] # doctest: +ELLIPSIS
[...]
"""
opts = {}
for k, v in kwargs.items():
if k in self.model_class._indices:
opts[k] = v
o = self.filter(**opts).first()
if o:
return o
else:
return self.create(**kwargs)
def get_indexed_values(self, attribute_name):
"""
Return indexed values for a model attribute in unsorted form.
>>> from redisco import models
>>> class Foo(models.Model):
... name = models.Attribute(indexed=True)
...
>>> c = Foo.objects.create(name="Obama")
>>> c = Foo.objects.create(name="Hollande")
>>> c = Foo.objects.create(name="Merkel")
>>> c = Foo.objects.create(name="Merkel")
>>> a = Foo.objects.get_indexed_values("name")
>>> b = ["Hollande", "Merkel", "Obama"]
>>> [i in b for i in a]
[True, True, True]
"""
if attribute_name not in self.model_class._indices:
# TODO: k is not defined O.o
raise AttributeNotIndexed("Attribute %s is not indexed in %s class." % (k, self.model_class.__name__))
wildcard_search_key = "%s:%s:*" % (self.model_class._key, attribute_name)
keys = self._db.keys(wildcard_search_key)
attribute = self.model_class._attributes[attribute_name]
values = [attribute.typecast_for_read(k.split(":")[2]) for k in keys]
return values
@property
def db(self):
return self._db
###################
# PRIVATE METHODS #
###################
@property
def _set(self):
"""
This contains the list of ids that have been looked-up,
filtered and ordered. This set is build hen we first access
it and is cached for has long has the ModelSet exist.
"""
# For performance reasons, only one zfilter is allowed.
if hasattr(self, '_cached_set'):
return self._cached_set
s = Set(self.key, db=self.db)
if self._zfilters:
s = self._add_zfilters(s)
if self._filters:
s = self._add_set_filter(s)
if self._exclusions:
s = self._add_set_exclusions(s)
n = self._order(s.key)
self._cached_set = n
return self._cached_set
def _add_set_filter(self, s):
"""
This function is the internal of the `filter` function.
It simply creates a new "intersection" of indexed keys (the filter) and
the previous filtered keys (if any).
.. Note:: This function uses the ``Set`` container class.
:return: the new Set
"""
indices = []
for k, v in self._filters.items():
index = self._build_key_from_filter_item(k, v)
if k not in self.model_class._indices:
raise AttributeNotIndexed("Attribute %s is not indexed in %s class." % (k, self.model_class.__name__))
indices.append(index)
new_set_key = "~%s.%s" % ("+".join([self.key] + indices), id(self))
s.intersection(new_set_key, *[Set(n, db=self.db) for n in indices])
new_set = Set(new_set_key, db=self.db)
new_set.set_expire()
return new_set
def _add_set_exclusions(self, s):
"""
This function is the internals of the `filter` function.
It simply creates a new "difference" of indexed keys (the filter) and
the previous filtered keys (if any).
.. Note:: This function uses the ``Set`` container class.
:return: the new Set
"""
indices = []
for k, v in self._exclusions.items():
index = self._build_key_from_filter_item(k, v)
if k not in self.model_class._indices:
raise AttributeNotIndexed("Attribute %s is not indexed in %s class." % (k, self.model_class.__name__))
indices.append(index)
new_set_key = "~%s.%s" % ("-".join([self.key] + indices), id(self))
s.difference(new_set_key, *[Set(n, db=self.db) for n in indices])
new_set = Set(new_set_key, db=self.db)
new_set.set_expire()
return new_set
def _add_zfilters(self, s):
"""
This function is the internals of the zfilter function.
It will create a SortedSet and will compare the scores to
the value provided.
:return: a SortedSet with the ids.
"""
k, v = next(iter(self._zfilters[0].items()))
try:
att, op = k.split('__')
except ValueError:
raise ValueError("zfilter should have an operator.")
index = "%s:%s" % (self.model_class._key, att)
desc = self.model_class._attributes[att]
zset = SortedSet(index, db=self.db)
limit, offset = self._get_limit_and_offset()
new_set_key = "~%s.%s" % ("+".join([self.key, att, op]), id(self))
new_set_key_temp = "#%s.%s" % ("+".join([self.key, att, op]), id(self))
members = []
if isinstance(v, (tuple, list,)):
min, max = v
min = float(desc.typecast_for_storage(min))
max = float(desc.typecast_for_storage(max))
else:
v = float(desc.typecast_for_storage(v))
if op == 'lt':
members = zset.lt(v, limit, offset)
elif op == 'gt':
members = zset.gt(v, limit, offset)
elif op == 'gte':
members = zset.ge(v, limit, offset)
elif op == 'le':
members = zset.le(v, limit, offset)
elif op == 'lte':
members = zset.le(v, limit, offset)
elif op == 'in':
members = zset.between(min, max, limit, offset)
temp_set = Set(new_set_key_temp, db=self.db)
if members:
temp_set.add(*members)
temp_set.set_expire()
s.intersection(new_set_key, temp_set)
new_set = Set(new_set_key, db=self.db)
new_set.set_expire()
return new_set
def _order(self, skey):
"""
This function does not job. It will only call the good
subfunction in case we want an ordering or not.
"""
if self._ordering:
return self._set_with_ordering(skey)
else:
return self._set_without_ordering(skey)
def _set_with_ordering(self, skey):
"""
Final call for finally ordering the looked-up collection.
The ordering will be done by Redis itself and stored as a temporary set.
:return: a Set of `id`
"""
num, start = self._get_limit_and_offset()
old_set_key = skey
for ordering, alpha in self._ordering:
if ordering.startswith('-'):
desc = True
ordering = ordering.lstrip('-')
else:
desc = False
new_set_key = "%s#%s.%s" % (old_set_key, ordering, id(self))
by = "%s:*->%s" % (self.model_class._key, ordering)
self.db.sort(old_set_key,
by=by,
store=new_set_key,
alpha=alpha,
start=start,
num=num,
desc=desc)
if old_set_key != self.key:
Set(old_set_key, db=self.db).set_expire()
new_list = List(new_set_key, db=self.db)
new_list.set_expire()
return new_list
def _set_without_ordering(self, skey):
"""
Final call for "non-ordered" looked up.
We order by id anyway and this is done by redis (same as above).
:returns: A Set of `id`
"""
# sort by id
num, start = self._get_limit_and_offset()
old_set_key = skey
new_set_key = "%s#.%s" % (old_set_key, id(self))
self.db.sort(old_set_key,
store=new_set_key,
start=start,
num=num)
if old_set_key != self.key:
Set(old_set_key, db=self.db).set_expire()
new_list = List(new_set_key, db=self.db)
new_list.set_expire()
return new_list
def _get_limit_and_offset(self):
"""
Return the limit and offset of the looked up ids.
"""
if (self._limit is not None and self._offset is None) or \
(self._limit is None and self._offset is not None):
raise "Limit and offset must be specified"
if self._limit is None:
return (None, None)
else:
return (self._limit, self._offset)
def _get_item_with_id(self, id):
"""
Fetch an object and return the instance.
"""
if self._return_values:
instance_key = self.model_class.instance_key(id)
return self.db.hgetall(instance_key)
else:
return self.model_class.get_by_id(id)
def _build_key_from_filter_item(self, index, value):
"""
Build the keys from the filter so we can fetch the good keys
with the indices.
Example:
Foo.objects.filter(name='bar')
=> 'Foo:name:bar'
"""
desc = self.model_class._attributes.get(index)
if desc:
value = desc.typecast_for_storage(value)
_k = "%s:%s:%s" % (self.model_class._key, index, value)
return _k
def _clone(self):
"""
This function allows the chaining of lookup calls.
Example:
Foo.objects.filter().filter().exclude()...
:returns: a modelset instance with all the previous filters.
"""
klass = self.__class__
c = klass(self.model_class)
if self._filters:
c._filters = self._filters
if self._exclusions:
c._exclusions = self._exclusions
if self._zfilters:
c._zfilters = self._zfilters
if self._ordering:
c._ordering = self._ordering
c._limit = self._limit
c._offset = self._offset
c._return_values = self._return_values
return c
| |
"""Component to integrate the Home Assistant cloud."""
import logging
from hass_nabucasa import Cloud
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.alexa import const as alexa_const
from homeassistant.components.google_assistant import const as ga_c
from homeassistant.const import (
CONF_MODE,
CONF_NAME,
CONF_REGION,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entityfilter
from homeassistant.loader import bind_hass
from homeassistant.util.aiohttp import MockRequest
from . import account_link, http_api
from .client import CloudClient
from .const import (
CONF_ACCOUNT_LINK_URL,
CONF_ACME_DIRECTORY_SERVER,
CONF_ALEXA,
CONF_ALEXA_ACCESS_TOKEN_URL,
CONF_ALIASES,
CONF_CLOUDHOOK_CREATE_URL,
CONF_COGNITO_CLIENT_ID,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_GOOGLE_ACTIONS,
CONF_GOOGLE_ACTIONS_REPORT_STATE_URL,
CONF_GOOGLE_ACTIONS_SYNC_URL,
CONF_RELAYER,
CONF_REMOTE_API_URL,
CONF_SUBSCRIPTION_INFO_URL,
CONF_USER_POOL_ID,
CONF_VOICE_API_URL,
DOMAIN,
MODE_DEV,
MODE_PROD,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
DEFAULT_MODE = MODE_PROD
SERVICE_REMOTE_CONNECT = "remote_connect"
SERVICE_REMOTE_DISCONNECT = "remote_disconnect"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(alexa_const.CONF_DESCRIPTION): cv.string,
vol.Optional(alexa_const.CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
GOOGLE_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ga_c.CONF_ROOM_HINT): cv.string,
}
)
ASSISTANT_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA}
)
ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}}
)
GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA}}
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.In(
[MODE_DEV, MODE_PROD]
),
vol.Optional(CONF_COGNITO_CLIENT_ID): str,
vol.Optional(CONF_USER_POOL_ID): str,
vol.Optional(CONF_REGION): str,
vol.Optional(CONF_RELAYER): str,
vol.Optional(CONF_GOOGLE_ACTIONS_SYNC_URL): vol.Url(),
vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(),
vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(),
vol.Optional(CONF_REMOTE_API_URL): vol.Url(),
vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(),
vol.Optional(CONF_ALEXA): ALEXA_SCHEMA,
vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA,
vol.Optional(CONF_ALEXA_ACCESS_TOKEN_URL): vol.Url(),
vol.Optional(CONF_GOOGLE_ACTIONS_REPORT_STATE_URL): vol.Url(),
vol.Optional(CONF_ACCOUNT_LINK_URL): vol.Url(),
vol.Optional(CONF_VOICE_API_URL): vol.Url(),
}
)
},
extra=vol.ALLOW_EXTRA,
)
class CloudNotAvailable(HomeAssistantError):
"""Raised when an action requires the cloud but it's not available."""
@bind_hass
@callback
def async_is_logged_in(hass) -> bool:
"""Test if user is logged in."""
return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in
@bind_hass
@callback
def async_active_subscription(hass) -> bool:
"""Test if user has an active subscription."""
return async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired
@bind_hass
async def async_create_cloudhook(hass, webhook_id: str) -> str:
"""Create a cloudhook."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True)
return hook["cloudhook_url"]
@bind_hass
async def async_delete_cloudhook(hass, webhook_id: str) -> None:
"""Delete a cloudhook."""
if DOMAIN not in hass.data:
raise CloudNotAvailable
await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id)
@bind_hass
@callback
def async_remote_ui_url(hass) -> str:
"""Get the remote UI URL."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
if not hass.data[DOMAIN].remote.instance_domain:
raise CloudNotAvailable
return "https://" + hass.data[DOMAIN].remote.instance_domain
def is_cloudhook_request(request):
"""Test if a request came from a cloudhook.
Async friendly.
"""
return isinstance(request, MockRequest)
async def async_setup(hass, config):
"""Initialize the Home Assistant cloud."""
# Process configs
if DOMAIN in config:
kwargs = dict(config[DOMAIN])
else:
kwargs = {CONF_MODE: DEFAULT_MODE}
# Alexa/Google custom config
alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({})
google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({})
# Cloud settings
prefs = CloudPreferences(hass)
await prefs.async_initialize()
# Cloud user
user = None
if prefs.cloud_user:
# Fetch the user. It can happen that the user no longer exists if
# an image was restored without restoring the cloud prefs.
user = await hass.auth.async_get_user(prefs.cloud_user)
if user is None:
user = await hass.auth.async_create_system_user(
"Home Assistant Cloud", [GROUP_ID_ADMIN]
)
await prefs.async_update(cloud_user=user.id)
# Initialize Cloud
websession = hass.helpers.aiohttp_client.async_get_clientsession()
client = CloudClient(hass, prefs, websession, alexa_conf, google_conf)
cloud = hass.data[DOMAIN] = Cloud(client, **kwargs)
async def _startup(event):
"""Startup event."""
await cloud.start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _startup)
async def _shutdown(event):
"""Shutdown event."""
await cloud.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
async def _service_handler(service):
"""Handle service for cloud."""
if service.service == SERVICE_REMOTE_CONNECT:
await cloud.remote.connect()
await prefs.async_update(remote_enabled=True)
elif service.service == SERVICE_REMOTE_DISCONNECT:
await cloud.remote.disconnect()
await prefs.async_update(remote_enabled=False)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler
)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler
)
loaded = False
async def _on_connect():
"""Discover RemoteUI binary sensor."""
nonlocal loaded
# Prevent multiple discovery
if loaded:
return
loaded = True
hass.async_create_task(
hass.helpers.discovery.async_load_platform(
"binary_sensor", DOMAIN, {}, config
)
)
hass.async_create_task(
hass.helpers.discovery.async_load_platform("stt", DOMAIN, {}, config)
)
hass.async_create_task(
hass.helpers.discovery.async_load_platform("tts", DOMAIN, {}, config)
)
cloud.iot.register_on_connect(_on_connect)
await http_api.async_setup(hass)
account_link.async_setup(hass)
return True
| |
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
from pktverify.consts import MLE_CHILD_ID_RESPONSE, MGMT_ACTIVE_SET_URI, MGMT_ACTIVE_GET_URI
from pktverify.packet_verifier import PacketVerifier
from pktverify.bytes import Bytes
COMMISSIONER = 1
LEADER = 2
class Cert_9_2_04_ActiveDataset(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'active_dataset': {
'timestamp': 10,
'panid': 0xface,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rdn',
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'active_dataset': {
'timestamp': 10,
'panid': 0xface,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rdn',
'router_selection_jitter': 1,
'allowlist': [COMMISSIONER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(5)
self.collect_rlocs()
self.collect_leader_aloc(LEADER)
# Step 2
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=101,
channel_mask=0x7fff800,
extended_panid='000db70000000000',
network_name='GRL',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
self.nodes[COMMISSIONER].send_mgmt_active_get()
self.simulator.go(5)
# Step 6
# Attempt to set Channel TLV
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=102,
channel=18,
channel_mask=0x7fff800,
extended_panid='000db70000000001',
network_name='threadcert',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
# Step 8
# Attempt to set Mesh Local Prefix TLV
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=103,
channel_mask=0x7fff800,
extended_panid='000db70000000000',
mesh_local='fd00:0db7::',
network_name='UL',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
# Step 10
# Attempt to set Network Master Key TLV
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=104,
channel_mask=0x7fff800,
extended_panid='000db70000000000',
master_key='ffeeddccbbaa99887766554433221100',
mesh_local='fd00:0db7::',
network_name='GRL',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
# Step 12
# Attempt to set PAN ID TLV
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=105,
channel_mask=0x7fff800,
extended_panid='000db70000000000',
master_key='00112233445566778899aabbccddeeff',
mesh_local='fd00:0db7::',
network_name='UL',
panid=0xafce,
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
# Step 14
# Invalid Commissioner Session ID
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=106,
channel_mask=0x7fff800,
extended_panid='000db70000000000',
network_name='UL',
binary='0b02abcd',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
# Step 16
# Old Active Timestamp
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=101,
channel_mask=0x01fff800,
extended_panid='000db70000000000',
network_name='UL',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
# Step 18
# Unexpected Steering Data TLV
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=107,
channel_mask=0x7fff800,
extended_panid='000db70000000000',
network_name='UL',
binary='0806113320440000',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'UL')
# Step 20
# Undefined TLV
self.nodes[COMMISSIONER].send_mgmt_active_set(
active_timestamp=108,
channel_mask=0x7fff800,
extended_panid='000db70000000000',
network_name='GRL',
binary='8202aa55',
)
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_network_name(), 'GRL')
ipaddrs = self.nodes[COMMISSIONER].get_addrs()
for ipaddr in ipaddrs:
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_RLOC = pv.vars['LEADER_RLOC']
LEADER_ALOC = pv.vars['LEADER_ALOC']
COMMISSIONER = pv.vars['COMMISSIONER']
COMMISSIONER_RLOC = pv.vars['COMMISSIONER_RLOC']
# Step 1: Ensure the topology is formed correctly
pkts.filter_wpan_src64(LEADER).filter_wpan_dst64(COMMISSIONER).filter_mle_cmd(
MLE_CHILD_ID_RESPONSE).must_next()
# Step 2: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(LEADER_RLOC, LEADER_ALOC).filter_coap_request(
MGMT_ACTIVE_SET_URI).filter(lambda p: p.thread_meshcop.tlv.xpan_id == '000db70000000000' and p.
thread_meshcop.tlv.net_name == ['GRL'] and p.thread_meshcop.tlv.chan_mask_mask
== '001fffe0' and p.thread_meshcop.tlv.active_tstamp == 101).must_next()
# Step 3: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == 1).must_next()
# Step 4: Commissioner sends MGMT_ACTIVE_GET.req to Leader
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_GET_URI).must_next()
# Step 5: The Leader MUST send MGMT_ACTIVE_GET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(
MGMT_ACTIVE_GET_URI).filter(lambda p: p.thread_meshcop.tlv.active_tstamp == 101 and p.thread_meshcop.tlv.
xpan_id == '000db70000000000' and p.thread_meshcop.tlv.net_name == ['GRL'] and
p.thread_meshcop.tlv.chan_mask_mask == '001fffe0').must_next()
# Step 6: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.active_tstamp == 102 and p.thread_meshcop.tlv.xpan_id ==
'000db70000000001' and p.thread_meshcop.tlv.net_name == ['threadcert'] and p.thread_meshcop.tlv.
chan_mask_mask == '001fffe0' and p.thread_meshcop.tlv.channel == [18]).must_next()
# Step 7: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == -1).must_next()
# Step 8: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Leader Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.active_tstamp == 103 and p.thread_meshcop.tlv.xpan_id ==
'000db70000000000' and p.thread_meshcop.tlv.net_name == ['UL'] and p.thread_meshcop.tlv.chan_mask_mask
== '001fffe0' and p.thread_meshcop.tlv.ml_prefix == 'fd000db700000000').must_next()
# Step 9: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == -1).must_next()
# Step 10: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Leader Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.active_tstamp == 104 and p.thread_meshcop.tlv.xpan_id ==
'000db70000000000' and p.thread_meshcop.tlv.net_name == ['GRL'] and p.thread_meshcop.tlv.master_key ==
'ffeeddccbbaa99887766554433221100' and p.thread_meshcop.tlv.chan_mask_mask == '001fffe0' and p.
thread_meshcop.tlv.ml_prefix == 'fd000db700000000').must_next()
# Step 11: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == -1).must_next()
# Step 12: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Leader Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.active_tstamp == 105 and p.thread_meshcop.tlv.xpan_id ==
'000db70000000000' and p.thread_meshcop.tlv.net_name == ['UL'] and p.thread_meshcop.tlv.master_key ==
'00112233445566778899aabbccddeeff' and p.thread_meshcop.tlv.pan_id == [0xafce] and p.thread_meshcop.tlv
.chan_mask_mask == '001fffe0' and p.thread_meshcop.tlv.ml_prefix == 'fd000db700000000').must_next()
# Step 13: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == -1).must_next()
# Step 14: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Leader Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.active_tstamp == 106 and p.thread_meshcop.tlv.xpan_id ==
'000db70000000000' and p.thread_meshcop.tlv.net_name == ['UL'] and p.thread_meshcop.tlv.
commissioner_sess_id == 0xabcd and p.thread_meshcop.tlv.chan_mask_mask == '001fffe0').must_next()
# Step 15: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == -1).must_next()
# Step 16: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Leader Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(LEADER_RLOC, LEADER_ALOC).filter_coap_request(
MGMT_ACTIVE_SET_URI).filter(lambda p: p.thread_meshcop.tlv.active_tstamp == 101 and p.thread_meshcop.tlv.
xpan_id == '000db70000000000' and p.thread_meshcop.tlv.net_name == ['UL'] and p
.thread_meshcop.tlv.chan_mask_mask == '001fff80').must_next()
# Step 17: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == -1).must_next()
# Step 18: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Leader Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.active_tstamp == 107 and p.thread_meshcop.tlv.xpan_id ==
'000db70000000000' and p.thread_meshcop.tlv.net_name == ['UL'] and p.thread_meshcop.tlv.steering_data
== Bytes('113320440000') and p.thread_meshcop.tlv.chan_mask_mask == '001fffe0').must_next()
# Step 19: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == 1).must_next()
# Step 20: Commissioner sends MGMT_ACTIVE_SET.req to Leader RLOC or Leader Anycast Locator
pkts.filter_wpan_src64(COMMISSIONER).filter_ipv6_2dsts(
LEADER_RLOC, LEADER_ALOC).filter_coap_request(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.active_tstamp == 108 and p.thread_meshcop.tlv.xpan_id ==
'000db70000000000' and p.thread_meshcop.tlv.net_name == ['GRL'] and p.thread_meshcop.tlv.unknown ==
'aa55' and p.thread_meshcop.tlv.chan_mask_mask == '001fffe0').must_next()
# Step 21: Leader MUST send MGMT_ACTIVE_SET.rsp to the Commissioner
pkts.filter_wpan_src64(LEADER).filter_ipv6_dst(COMMISSIONER_RLOC).filter_coap_ack(MGMT_ACTIVE_SET_URI).filter(
lambda p: p.thread_meshcop.tlv.state == 1).must_next()
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/version -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_version
short_description: Return the current openshift version
description:
- Return the openshift installed version. `oc version`
options:
state:
description:
- Currently list is only supported state.
required: true
default: list
choices: ["list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
oc_version:
- name: get oc version
oc_version:
register: oc_version
'''
# -*- -*- -*- End included fragment: doc/version -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
if hasattr(yaml, 'RoundTripDumper'):
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
# pylint: disable=no-member
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
else:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
else:
self.yaml_dict = yaml.safe_load(contents)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
if hasattr(yaml, 'round_trip_dump'):
# pylint: disable=no-member
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
else:
tmp_copy = copy.deepcopy(self.yaml_dict)
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
if hasattr(yaml, 'round_trip_dump'):
# pylint: disable=no-member
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
else:
tmp_copy = copy.deepcopy(self.yaml_dict)
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['oadm']
else:
cmds = ['oc']
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
returncode, stdout, stderr = self._run(cmds, input_data)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(value)
print(user_def[key])
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(api_values)
print(user_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_version.py -*- -*- -*-
def main():
''' ansible oc module for version '''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='list', type='str',
choices=['list']),
debug=dict(default=False, type='bool'),
),
supports_check_mode=True,
)
rval = OCVersion.run_ansible(module.params)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_version.py -*- -*- -*-
| |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import warnings
#from math import factorial
from skimage import io
from skimage.measure import profile_line
#from scipy.ndimage import gaussian_filter
from scipy import integrate
import scipy.ndimage
import matplotlib.patches as patches
import seaborn as sns
class Surface(object):
'''A class for bare earth and snow surfaces.
Each instance is initialized to mask out common NoData values and remove
empty rows. The surface is also resampled to 1 by 1 meter data to make
profile and flux calculations more straightforward.
'''
def __init__(self, fpath):
self.arr = io.imread(fpath)
self.arr[self.arr == -9999] = np.nan
self.arr[(self.arr < -10)] = np.nan
rowmask = np.all(np.isnan(self.arr), axis=1)
self.arr = self.arr[~rowmask]
self.arr = scipy.ndimage.zoom(self.arr, 2, order = 1)
def subset_surf(self, n_divs, names):
'''Subset the surface remove empty columns. Subsets are stored in a
dict with user defined keys.
Parameters
----------
n_divs : int
the number of vertical subsets.
names: list of strings for keys
'''
ysize = round(self.arr.shape[0] / n_divs)
self.subdict = dict.fromkeys(names)
for nam, num in zip(names, range(0, n_divs)):
self.subdict[nam] = self.arr[(ysize * num):(ysize * (num+1))]
colmask = np.nansum(self.subdict[nam],axis=0)==0
emptycols = np.where(colmask)
nd_boundaries = [j-i for i, j in zip(emptycols[0][:-1], emptycols[0][1:])]
if max(nd_boundaries, default='no elements') == 1:
if emptycols[0][0]==0:
self.subdict[nam] = self.subdict[nam][::, emptycols[0].max():]
else:
self.subdict[nam] = self.subdict[nam][::, :emptycols[0].min()]
elif max(nd_boundaries, default='no elements') == 'no elements':
pass
else:
l_edge = np.where(np.array(nd_boundaries) > 1)[0][0]
r_edge = l_edge + max(nd_boundaries)
self.subdict[nam] = self.subdict[nam][::,l_edge:r_edge]
class Transect(object):
'''A class for transects along which we can measure flux and generate
Tabler profiles. We initialize a Transect instance by providing the bare
earth snow depth surfaces and the beginning and end indices for
the profile we want. We can initialize Tabler assuming the user is choosing
the start and end of the drift as the profile.
Parameters
----------
bare_earth : array_like, shape (N,)
elevation values of snow free surface
years : list[str]
years for which snow depth surfaces are available
snow_depths : list[arr]
snow depth surfaces
angle_deg: int
angle in degrees TN from which we take the transect (i.e. the
the wind direction)
x1, y1: int
start indices of transect
length: int
length of transect in meters
'''
def __init__(self, bare_earth, years, snow_depths, angle_deg, x1, y1, length):
self.bare_earth = bare_earth # bare earth surface
self.angle_deg = angle_deg # where does the wind come from?
self.x1 = x1
self.y1 = y1
# Compute end of transect
cosa = np.cos((np.deg2rad(90 + angle_deg)))
cosb = np.cos((np.deg2rad(angle_deg)))
self.x2, self.y2 = (x1 +(length * cosa), y1+(length * cosb))
# Construct DEM profile with a width of 1 m.
self.dem_profile = profile_line(bare_earth,
((y1,x1)),((self.y2,self.x2)),1)
# Check DEM profile for No Data values and where they occur.
nan_idx = np.argwhere(np.isnan(self.dem_profile))
# Truncate DEM Profile at the first No Data instance
if len(nan_idx) > 0:
self.dem_profile = self.dem_profile[:nan_idx[0]-1]
# The DEM profile is a template for the snow depth profile
dem_pro_len = len(self.dem_profile)
self.x3, self.y3 = (x1 +(dem_pro_len * cosa), y1+(dem_pro_len * cosb))
self.snowdict = dict.fromkeys(years)
# For each year generate profiles and store in dictionary
for yr, depth in zip(years, snow_depths):
self.snowdict[yr] = {}
self.snowdict[yr]['snow depth surface'] = depth
self.snowdict[yr]['winter surface'] = depth + self.bare_earth
self.snowdict[yr]['winter surface profile'] = \
profile_line(self.snowdict[yr]['winter surface'],
((y1,x1)),((self.y3,self.x3)),1)[:-1]
self.snowdict[yr]['depth profile'] = profile_line(depth,((y1,x1)),
((self.y3,self.x3)),1)[:-1]
def TablerProfile(self):
'''
Here we generate a Tabler Equilibirum Profile for the drift.
Parameters include the lip of the trap and the drift end, because the
we need to have more data (e.g. upwind) in the transect. Basically the
user decides where the drift starts and ends. I have ways to automate
this but they are not fully tested.
'''
# Dynamic Creation of Tabler Profile
# All Coeffcients march downwind 1 m at a time
# x2 is now the slope from the snow surface to the ground at a
# horizontal distance of 15 m downwind.
self.dynamic_tabler = self.dem_profile.copy()
i = 45
while i < len(self.dem_profile) - 46:
upwind_0_45 = self.dem_profile[i-45] - self.dem_profile[i]
snow_to_ground = self.dynamic_tabler[i] - self.dem_profile[i+5]
downwind_15_30 = self.dem_profile[i+15] - self.dem_profile[i+30]
downwind_30_45 = self.dem_profile[i+30] - self.dem_profile[i+45]
if upwind_0_45 > 0:
x1 = (upwind_0_45 / 45) * -100
else:
x1 = (upwind_0_45 / 45) * 100
if snow_to_ground > 0:
x2 = snow_to_ground * -100
else:
x2 = snow_to_ground * 100
if downwind_15_30 > 0:
x3 = (downwind_15_30 / 15) * -100
else:
x3 = (downwind_15_30 / 15) * 100
if downwind_30_45 > 0:
x4 = (downwind_30_45 / 15) * -100
else:
x4 = (downwind_30_45 / 15) * 100
y = (0.25 * x1) + (0.55 * x2) + (0.15 * x3) + (0.05 * x4)
rise = y / 100
self.dynamic_tabler[i+1] = self.dem_profile[i] - rise
i+=1
def Compute_Flux(self):
''' Use Simpson's rule integration to calculate flux over the drift
fetch. Values will be m^3 per lineal m, so essentialy we have m^2 which
makes sense because we are getting the area under the snow depth curve.
'''
tabler_snow_depth = self.dynamic_tabler - self.dem_profile
self.tabler_flux = integrate.simps(tabler_snow_depth[46:-46])
self.mean_flux = 0
for k in self.snowdict:
snow_depth = self.snowdict[k]['depth profile']
self.snowdict[k]['flux'] = integrate.simps(snow_depth[46:-46])
self.mean_flux = self.mean_flux + self.snowdict[k]['flux']
self.mean_flux = self.mean_flux / 4.0
self.tabler_flux_err = self.mean_flux - self.tabler_flux
self.flux_err_ratio = self.tabler_flux_err / self.tabler_flux
def PlotMaps(self):
fig = plt.figure()
ax1=plt.subplot(2,2,1)
im1=plt.imshow(self.snowdict['2016']['snow depth surface'],
vmin = 0, vmax = 2)
ax1.plot([self.x1, self.x3], [self.y1, self.y3], c='r',
linestyle='-', linewidth=2)
plt.title('2016')
ax1.set_ylabel('m',size = 7)
ax2=plt.subplot(2,2,2,sharex=ax1)
plt.imshow(self.snowdict['2015']['snow depth surface'],
vmin = 0, vmax = 2)
ax2.plot([self.x1, self.x3], [self.y1, self.y3], c='r',
linestyle='-', linewidth=2)
plt.title('2015')
ax2.set_yticks([])
ax2.set_xticks([])
ax3=plt.subplot(2,2,3,sharey=ax1)
plt.imshow(self.snowdict['2013']['snow depth surface'],
vmin = 0, vmax = 2)
ax3.plot([self.x1, self.x3], [self.y1, self.y3], c='r',
linestyle='-', linewidth=2)
plt.title('2013')
ax3.set_xlabel('m',size = 7)
ax4=plt.subplot(2,2,4, sharex = ax3)
plt.imshow(self.snowdict['2012']['snow depth surface'],
vmin = 0, vmax = 2)
ax4.plot([self.x1, self.x3], [self.y1, self.y3], c='r',
linestyle='-', linewidth=2)
plt.title('2012')
ax4.set_yticks([])
ax4.set_xlabel('m',size = 7)
cbar_ax = fig.add_axes([0.90, 0.15, 0.05, 0.7])
fig.colorbar(im1, cax=cbar_ax)
plt.suptitle('Snow Depth [m]')
plt.subplots_adjust(wspace=0.0,hspace=0.2)
#plt.savefig('/home/cparr/depthmaps.png',dpi=300)
def PlotDepthProfiles(self):
fig, ax = plt.subplots()
plt.plot(self.snowdict['2012']['depth profile'], ':b1', label = '2012')
plt.plot(self.snowdict['2013']['depth profile'], ':r2', label = '2013')
plt.plot(self.snowdict['2015']['depth profile'], ':g3', label = '2015')
plt.plot(self.snowdict['2016']['depth profile'], ':k4', label = '2016')
plt.ylabel('Snow Depth [m]')
plt.xlabel('m')
plt.legend()
#plt.savefig('/home/cparr/snowdepth_profiles.png',dpi=300)
def PlotSurfaceProfiles(self):
fig, ax = plt.subplots()
plt.plot(self.dem_profile, label = 'Bare Earth',c='saddlebrown')
plt.plot(self.snowdict['2012']['winter surface profile'], '--b',alpha=0.5, label = '2012')
plt.plot(self.snowdict['2013']['winter surface profile'], '--r',alpha=0.5, label = '2013')
plt.plot(self.snowdict['2015']['winter surface profile'], '--g',alpha=0.5, label = '2015')
plt.plot(self.snowdict['2016']['winter surface profile'], '--m',alpha=0.5, label = '2016')
plt.plot(self.dynamic_tabler, label = 'Dynamic Tabler Surface',c='k')
plt.xlabel('m')
plt.ylabel('m')
plt.legend()
def PlotSnowSurfacesOnly(self):
fig, ax = plt.subplots()
plt.plot(self.snowdict['2012']['winter surface profile'] - self.dem_profile, '--b',alpha=0.5, label = '2012')
plt.plot(self.snowdict['2013']['winter surface profile'] - self.dem_profile, '--r',alpha=0.5, label = '2013')
plt.plot(self.snowdict['2015']['winter surface profile'] - self.dem_profile, '--g',alpha=0.5, label = '2015')
plt.plot(self.snowdict['2016']['winter surface profile'] - self.dem_profile, '--m',alpha=0.5, label = '2016')
plt.plot(self.dynamic_tabler - self.dem_profile, label = 'Dynamic Tabler Surface',c='k')
plt.xlabel('m')
plt.ylabel('m')
plt.title('Snow Surface - Bare Earth Surface')
plt.legend()
def PlotFlux(self):
fig, ax = plt.subplots()
plt.suptitle('Integrated Flux [m^3 per lineal m]')
y = [self.snowdict['2012']['flux'],
self.snowdict['2013']['flux'],
self.snowdict['2015']['flux'],
self.snowdict['2016']['flux'],
self.tabler_flux]
ax.bar(np.arange(5),y)
ax.set_xticklabels(('','2012','2013','2015','2016','Tabler'))
ax.set_xlabel('Profile')
ax.set_ylabel('Flux')
def PlotAll(self):
self.PlotMaps()
self.PlotDepthProfiles()
self.PlotSurfaceProfiles()
self.PlotSnowSurfacesOnly()
self.PlotFlux()
def make_transect_obj(bare, years, snows, angle, x1,y1,length):
t = Transect(bare, years, snows, angle, x1,y1,length)
t.TablerProfile()
t.Compute_Flux()
return t
### globally bring in surfaces and test transects
years = ['2012','2013','2015','2016']
bare_surf = Surface('/home/cparr/surfaces/level_1_surfaces/hv/bare_earth/hv_2012_158_bare_earth_dem.tif')
snow_depth_surf16 = Surface('/home/cparr/surfaces/depth_ddems/hv/hv_2016_096_depth.tif')
snow_depth_surf15 = Surface('/home/cparr/surfaces/depth_ddems/hv/hv_2015_096_depth.tif')
snow_depth_surf13 = Surface('/home/cparr/surfaces/depth_ddems/hv/hv_2013_103_depth.tif')
snow_depth_surf12 = Surface('/home/cparr/surfaces/depth_ddems/hv/hv_2012_107_depth.tif')
snow_depth_surf16.arr = snow_depth_surf16.arr[2:]
snow_depth_surf15.arr = snow_depth_surf15.arr[2:]
snow_depth_surf13.arr = snow_depth_surf13.arr[2:]
bare_surf.arr = bare_surf.arr[2:]
bare_surf.subset_surf(12, ['b1','b2','b3','b4',
'b5','b6','b7','b8',
'b9','b10','b11','b12'])
snow_depth_surf16.subset_surf(12, ['2016_1','2016_2','2016_3','2016_4',
'2016_5','2016_6','2016_7','2016_8',
'2016_9','2016_10','2016_11','2016_12'])
snow_depth_surf15.subset_surf(12, ['2015_1','2015_2','2015_3','2015_4',
'2015_5','2015_6','2015_7','2015_8',
'2015_9','2015_10','2015_11','2015_12'])
snow_depth_surf13.subset_surf(12, ['2013_1','2013_2','2013_3','2013_4',
'2013_5','2013_6','2013_7','2013_8',
'2013_9','2013_10','2013_11','2013_12'])
snow_depth_surf12.subset_surf(12, ['2012_1','2012_2','2012_3','2012_4',
'2012_5','2012_6','2012_7','2012_8',
'2012_9','2012_10','2012_11','2012_12'])
#bare_surf.subset_surf(4, ['b1','b2','b3','b4'])
#
#snow_depth_surf16.subset_surf(4, ['2016_1','2016_2','2016_3','2016_4'])
#
#snow_depth_surf15.subset_surf(4, ['2015_1','2015_2','2015_3','2015_4'])
#
#snow_depth_surf13.subset_surf(4, ['2013_1','2013_2','2013_3','2013_4'])
#
#snow_depth_surf12.subset_surf(4, ['2012_1','2012_2','2012_3','2012_4'])
kidney_lake_bare = bare_surf.subdict['b7']
kidney_lake_snows = [snow_depth_surf12.subdict['2012_7'],
snow_depth_surf13.subdict['2013_7'],
snow_depth_surf15.subdict['2015_7'],
snow_depth_surf16.subdict['2016_7']]
#test = make_transect_obj(kidney_lake_bare, years, kidney_lake_snows, 270,70,750,200)
#test.PlotAll()
###
class Flux_point(object):
'''A class for flux points where we determine the seasonal flux direction
and flux amount.
'''
def __init__(self, x,y,name,windrange):
self.results = dict()
self.df = pd.DataFrame()
self.x = x
self.y = y
self.name = name
self.transect_keys = []
self.windrange = np.arange(windrange[0],windrange[1],windrange[2])
for w in self.windrange:
self.transect_keys.append(self.name + '_' + str(w))
for k in zip(self.transect_keys, self.windrange):
self.results[k[0]] = make_transect_obj(kidney_lake_bare,
years,
kidney_lake_snows, k[1],x,y,200) # 200 m long.
idx = []
tflux = []
fluxerr = []
fluxerrratio = []
angle = []
for k in self.results.items():
idx.append(str(k[0]))
angle.append(k[1].angle_deg)
fluxerr.append(k[1].tabler_flux_err)
tflux.append(k[1].tabler_flux)
fluxerrratio.append(k[1].flux_err_ratio)
self.df['idx']=idx
self.df['wind direction'] = angle
self.df['tabler flux error'] = fluxerr
self.df['abs flux error'] = abs(self.df['tabler flux error'])
self.df['tabler flux'] = tflux
self.df['y'] = y
self.df['err_ratio'] = fluxerrratio
self.df['x']=x
self.df.sort_values(['abs flux error'],inplace=True)
tabler_match1 = self.df.iloc[0]
tabler_match2 = self.df.iloc[1]
tabler_match3 = self.df.iloc[2]
# tabler_match1 = self.df.ix[np.argmin(abs(self.df['err_ratio']))]
self.tabler_vector1 = tabler_match1['wind direction']
self.tabler_vector2 = tabler_match2['wind direction']
self.tabler_vector3 = tabler_match3['wind direction']
def plot_flux(points):
'''
Plot the profile lines on top of one snow depth map.
Lines should fan out from each point at every angle specified.
'''
fig=plt.figure(figsize=(8,5))
ax1=plt.subplot(1,1,1)
im1=plt.imshow(kidney_lake_snows[2], cmap='viridis', vmin = 0, vmax = 2)
ax1.set_ylabel('m',size = 7)
ax1.set_xlabel('m',size = 7)
plt.title('2015 Snow Depth Map [m]')
cbar_ax = fig.add_axes([0.90, 0.15, 0.05, 0.7])
fig.colorbar(im1, cax=cbar_ax)
for p in points:
for k in p.results:
ax1.plot([p.results[k].x1, p.results[k].x3],
[p.results[k].y1, p.results[k].y3], c='r',
alpha=0.5, linestyle='-', linewidth=1)
#plt.savefig('/home/cparr/Snow_Patterns/figures/tabler_test.png',dpi=450)
def flux_map(points):
'''
Plot the profile lines on top of one snow depth map.
Lines should fan out from each point at every angle specified.
'''
fig=plt.figure(figsize=(8,5))
ax1=plt.subplot(1,1,1)
im1=plt.imshow(kidney_lake_snows[2], cmap='viridis', vmin = 0, vmax = 2)
ax1.set_ylabel('m',size = 7)
ax1.set_xlabel('m',size = 7)
plt.title('2015 Snow Depth Map [m]')
cbar_ax = fig.add_axes([0.90, 0.15, 0.05, 0.7])
fig.colorbar(im1, cax=cbar_ax)
for p in points:
for k in p.results:
if p.results[k].angle_deg == p.tabler_vector1 or \
p.results[k].angle_deg == p.tabler_vector2 or \
p.results[k].angle_deg == p.tabler_vector3:
if p.results[k].angle_deg == p.tabler_vector1:
width=7.0
fc = 'r'
elif p.results[k].angle_deg == p.tabler_vector2:
width=5.0
fc = 'y'
else:
width = 3.0
fc='white'
ax1.arrow(
p.results[k].x1, # x
p.results[k].y1, # y
p.results[k].x3 - p.results[k].x1, # dx
p.results[k].y3 - p.results[k].y1, # dy
length_includes_head = True,
width = width,
fc = fc
)
# ax1.annotate(str(int(p.results[k].mean_flux)),
# xy=(p.results[k].x1, p.results[k].y1),
# xycoords="data",
# va="center", ha="center",
# bbox=dict(boxstyle="round", fc="w"))
#plt.savefig('/home/cparr/Snow_Patterns/figures/tabler_test.png',dpi=450)
lake_n = Flux_point(125,650,'lake_n',[180,370,15])
lake_w = Flux_point(80,750,'lake_w',[180,370,15])
lake_s = Flux_point(90,850,'lake_s',[180,370,15])
lake_n1 = Flux_point(135,670,'lake_n1',[180,370,15])
lake_w1 = Flux_point(90,770,'lake_w1',[180,370,15])
lake_s1 = Flux_point(100,870,'lake_s1',[180,370,15])
gully1_n = Flux_point(80,200,'gully1_n',[180,370,15])
gully1_w = Flux_point(80,250,'gully1_w',[180,370,15])
gully1_s = Flux_point(80,300,'gully1_s',[180,370,15])
gully1_n1 = Flux_point(95,200,'gully1_n1',[180,370,15])
gully1_w1 = Flux_point(95,250,'gully1_w1',[180,370,15])
gully1_s1 = Flux_point(110,300,'gully1_s1',[180,370,15])
gully1_n2 = Flux_point(110,200,'gully1_n2',[180,370,15])
gully1_w2 = Flux_point(110,250,'gully1_w2',[180,370,15])
gully1_s2 = Flux_point(110,300,'gully1_s2',[180,370,15])
gully2_n = Flux_point(310,50,'gully2_n',[180,370,15])
gully2_w = Flux_point(310,100,'gully2_w',[180,370,15])
gully2_s = Flux_point(310,150,'gully2_s',[180,370,15])
gully2_n1 = Flux_point(345,50,'gully2_n1',[180,370,15])
gully2_w1 = Flux_point(345,100,'gully2_w1',[180,370,15])
gully2_s1 = Flux_point(345,150,'gully2_s1',[180,370,15])
gully3_n = Flux_point(1100,250,'gully3_n',[180,370,15])
gully3_w = Flux_point(1100,300,'gully3_w',[180,370,15])
gully3_s = Flux_point(1050,350,'gully3_s',[180,370,15])
gully3_n1 = Flux_point(1100,280,'gully3_n1',[180,370,15])
gully3_w1 = Flux_point(1100,330,'gully3_w1',[180,370,15])
gully3_s1 = Flux_point(1050,380,'gully3_s1',[180,370,15])
gully3_n2 = Flux_point(1000,400,'gully3_n2',[180,370,15])
gully3_w2 = Flux_point(1000,415,'gully3_w2',[180,370,15])
gully3_s2 = Flux_point(1000,430,'gully3_s2',[180,370,15])
gully3_n3 = Flux_point(1075,500,'gully3_n3',[180,370,15])
gully3_w3 = Flux_point(1100,500,'gully3_w3',[180,370,15])
gully3_s3 = Flux_point(1125,500,'gully3_s3',[180,370,15])
gully3_n4 = Flux_point(1025,450,'gully3_n4',[180,370,15])
gully3_w4 = Flux_point(1050,450,'gully3_w4',[180,370,15])
gully3_s4 = Flux_point(1075,450,'gully3_s4',[180,370,15])
flux_pts = [lake_n,lake_w,lake_s,
lake_n1,lake_w1,lake_s1,
gully1_n,gully1_w,gully1_s,
gully1_n1,gully1_w1,gully1_s1,
gully1_n2,gully1_w2,gully1_s2,
gully2_n,gully2_w,gully2_s,
gully2_n1,gully2_w1,gully2_s1,
gully3_n,gully3_w,gully3_s,
gully3_n1,gully3_w1,gully3_s1,
gully3_n2,gully3_w2,gully3_s2,
gully3_n3,gully3_w3,gully3_s3,
gully3_n4,gully3_w4,gully3_s4]
#plot_flux(flux_pts)
flux_map(flux_pts)
#plt.figure()
#sns.barplot(x=gully3_w4.df['wind direction'],y=gully3_w4.df['flux err. / tabler flux'])
####### everything below here is trying to follow the curve of the lake
#nd_limits = []
#for k in kidney_lake_results:
# if kidney_lake_results[k].angle_deg == 290:
# nd_limits.append(kidney_lake_results[k].no_data_limit)
#
##limit of lake
#
#curve_list = []
#ylocs = []
#xlocs = []
#
#i = 0
#
#for l in kidney_lake_bare:
#
#
# nan_idx = np.argwhere(np.isnan(l[50:]))
#
# if len(nan_idx) > 0:
# c = min(nan_idx)
# if c < 150:
# ylocs.append(i)
# curve_list.append(int(c))
# i += 1
#
#for c in curve_list:
# xlocs.append(c-50)
#
#
#
#plt.plot(curve_list)
#plt.plot(xlocs)
#plt.plot(ylocs)
## test using lake buffer as start points
#
#kidney_lake_results = dict()
#df = pd.DataFrame()
#
#def test_wind_dirs(dir1,dir2,step):
#
# profile_keys = []
# wind_dirs = []
#
# for n in np.arange(dir1,dir2,step):
# for i in range(1,12):
# profile_keys.append('Lake_'+str(n)+'_'+str(i))
# wind_dirs.append(n)
#
# start_xs = [n for n in xlocs]*len(profile_keys)
# start_ys = [n for n in np.arange(min(ylocs),max(ylocs),1)]*len(profile_keys)
#
#
#
# for k in zip(profile_keys, start_xs, start_ys, wind_dirs):
# kidney_lake_results[k[0]] = make_transect_obj(kidney_lake_bare,
# years,
# kidney_lake_snows,
# k[3],k[1],k[2],200,50,106)
#
# idx = []
# tflux = []
# fluxerr = []
# angle = []
# ystart = []
#
# for k in kidney_lake_results.items():
# idx.append(str(k[0]))
# angle.append(k[1].angle_deg)
# fluxerr.append(k[1].tabler_flux_err)
# tflux.append(k[1].tabler_flux)
# ystart.append(k[1].y1)
#
# df['idx']=idx
# df['wind direction'] = angle
# df['tabler flux error'] = fluxerr
# df['tabler flux'] = tflux
# df['y1'] = ystart
#
#test_wind_dirs(260,305,5)
#
#plot_all_one_map()
#wt0 = Transect(watertrack_lake_bare,years,watertrack_lake_snows, 300,200,300,250)
#wt0.TablerProfile(23,48, wt0.snowdict['2012']['winter surface profile'], 8)
#big_lake0 = Transect(big_lake_bare,years,big_lake_snows, 300,40,340,120)
#big_lake0.TablerProfile(30,70, big_lake0.snowdict['2012']['winter surface profile'], 8)
#==============================================================================
#
# def savitzky_golay(self, y, window_size, order, deriv=0, rate=1):
# """Smooth (and optionally differentiate) with a Savitzky-Golay filter.
# The filter removes high frequency noise from data.
# It has the advantage of preserving the original shape and
# features of the signal better than other types of filtering
# approaches, such as moving averages techniques.
# Parameters
# ----------
# y : array_like, shape (N,)
# the values of the time history of the signal.
# window_size : int
# the length of the window. Must be an odd integer number.
# order : int
# the order of the polynomial used in the filtering.
# Must be less then `window_size` - 1.
# deriv: int
# order of the derivative to compute (default = 0 is only smoothing)
# Returns
# -------
# ys : ndarray, shape (N)
# the smoothed signal (or it's n-th derivative).
# Notes
# -----
# The Savitzky-Golay is a type of low-pass filter, particularly
# suited for smoothing noisy data. The main idea behind this
# approach is to make for each point a least-square fit with a
# polynomial of high order over a odd-sized window centered at
# the point.
# Examples
# --------
# t = np.linspace(-4, 4, 500)
# y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
# ysg = savitzky_golay(y, window_size=31, order=4)
# import matplotlib.pyplot as plt
# plt.plot(t, y, label='Noisy signal')
# plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
# plt.plot(t, ysg, 'r', label='Filtered signal')
# plt.legend()
# plt.show()
# References
# ----------
# .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
# Data by Simplified Least Squares Procedures. Analytical
# Chemistry, 1964, 36 (8), pp 1627-1639.
# .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
# W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
# Cambridge University Press ISBN-13: 9780521880688
# """
#
# try:
# window_size = np.abs(np.int(window_size))
# order = np.abs(np.int(order))
# except:
# raise ValueError("window_size and order have to be of type int")
# if window_size % 2 != 1 or window_size < 1:
# raise TypeError("window_size size must be a positive odd number")
# if window_size < order + 2:
# raise TypeError("window_size is too small for the polynomials order")
# order_range = range(order+1)
# half_window = (window_size -1) // 2
# # precompute coefficients
# b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
# m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# # pad the signal at the extremes with
# # values taken from the signal itself
# firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
# lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
# y = np.concatenate((firstvals, y, lastvals))
# return np.convolve( m[::-1], y, mode='valid')
#
# def smooth_surfaces(self):
# ''' Use Savitzky-Golay to generate slope and curvature'''
# self.smooth_bare_earth = self.savitzky_golay(self.bare_earth, 9, 1)
# self.slope = self.savitzky_golay(self.smooth_bare_earth, 9, 1, 1)
# self.curvature = self.savitzky_golay(self.slope, 9, 1, 1)
#
# def thresh_data(self, arr, sig, thresh):
# ''' Use Gaussian Filter to create boolean array where True values are
# consecutively greater than some threshold. This is useful where there
# are many drifts arranged in series, e.g. watertracks.
# '''
# sigma = sig
# threshold = thresh
# self.above_threshold = gaussian_filter(arr, sigma=sigma) > threshold
#
# def contiguous_regions(self, condition):
# """Finds contiguous True regions of the boolean array "condition". Returns
# a 2D array where the first column is the start index of the region and the
# second column is the end index."""
#
# # Find the indicies of changes in "condition"
# d = np.diff(condition)
# idx, = d.nonzero()
#
# # We need to start things after the change in "condition". Therefore,
# # we'll shift the index by 1 to the right.
# idx += 1
#
# if condition[0]:
# # If the start of condition is True prepend a 0
# idx = np.r_[0, idx]
#
# if condition[-1]:
# # If the end of condition is True, append the length of the array
# idx = np.r_[idx, condition.size] # Edit
#
# # Reshape the result into two columns
# idx.shape = (-1,2)
# self.drift_index = idx
#
# def drift_brackets(self):
# ''' Define the start and end of a drift based on some kind of
# topographic indicator. These might need to vary depending on the type
# of terrain'''
# self.min_curve_start = np.nanargmin(self.curvature)
# self.max_curve_end = np.nanargmax(self.curvature)
#
# def measure_flux(self, start, end):
# ''' Use trapezoidal rule integration to calculate flux over the drift
# fetch. Values will be m^3 per lineal m, so essentialy we have m^2 which
# makes sense because we are getting the area under the snow depth curve.
# We divide the initial result by 2 because our data cells are 2m x 2m.
# Parameters are start and end of drift zone for integration.
# '''
# self.flux = integrate.simps(self.snow_depth, range(start, end)) / 2.0
# #self.flux = np.trapz(self.snow_depth[start:end]) / 2.0
# self.drift_length = end - start
# self.avg_slope_under_drift = np.nanmean(self.slope[start:end])
#
#
# ############
# def make_transect(bsubsurf, ssubsurf, tline):
#
# tname = Transect(bare_surf.subdict[bsubsurf][tline],
# snow_depth_surf.subdict[ssubsurf][tline])
# tname.thresh_data(tname.snow_depth,3,np.nanmean(tname.snow_depth))
# tname.smooth_surfaces()
# tname.contiguous_regions(tname.above_threshold == True)
#
# return tname
#
# # uncomment below if entire transect is a drift
# #t1.drift_brackets()
# #t1.measure_flux(t1.min_curve_start,t1.max_curve_end)
#
# def make_subdrifts(tname):
#
# dct = dict()
# # I expect to see meaningless 'mean of empty slice
# # RuntimeWarnings in this block
# with warnings.catch_warnings():
# warnings.simplefilter("ignore", category=RuntimeWarning)
# for d in tname.drift_index:
# dr_id = str(d)[1:-1]
# dct[dr_id] = Subdrift(tname, d)
# dct[dr_id].smooth_surfaces()
# dct[dr_id].measure_flux(dct[dr_id].start,
# dct[dr_id].end)
# return dct
#
# # this is cool!
# # big flux...low flux...big flux..low flux..then decreasing trend:
# def scatter_flux(drift_dct):
#
# fluxes = []
# starts = []
# dlengths = []
# for k,v in drift_dct.items():
# fluxes.append(v.flux)
# starts.append(v.start)
# dlengths.append(v.drift_length)
# #plt.figure()
# plt.scatter(starts,fluxes,s=dlengths)
# plt.ylabel('Drift Flux')
# plt.xlabel('drift start [m]')
# return fluxes
#
# def plot_overview(im, dct, tline):
#
# dims = np.isnan(im[tline])
# x = np.diff(np.where(dims))
# cutoff = np.where(x>1)
# include_start = cutoff[1][0]
# include_stop = x.max()
#
# plt.figure()
# plt.imshow(im[:,include_start:include_stop])
#
# for k,v in dct.items():
# x1 = v.start / im[:,include_start:include_stop].shape[1]
# x2 = v.end / im[:,include_start:include_stop].shape[1]
# plt.axhline(y=tline, xmin = x1, xmax = x2, c = 'r', alpha=0.5)
#
# ##############################################################################
# tlines = np.arange(220,455,5)
# tnames = []
# dnames=[]
# for y in tlines:
# t = 't5_' + str(y)
# d = 't5_' + str(y) +'_sub'
# tnames.append(t)
# dnames.append(d)
# del y, d, t
#
# big_t_dct = dict()
# for f in zip(tnames ,tlines):
# big_t_dct[f[0]] = make_transect('b5', 's5', f[1])
#
# big_s_dct = dict()
# for f in zip(dnames ,tnames):
# big_s_dct[f[0]] = make_subdrifts(big_t_dct[f[1]])
#
# all_flux = []
# flux_sums = []
# for k in big_s_dct.keys():
# f = scatter_flux(big_s_dct[k])
# all_flux.append(f)
#
# for f in all_flux:
# flux_sums.append(sum(f) / 2)
#
# plt.scatter(tlines, flux_sums)
#
#
# for k in zip(big_s_dct.keys(), tlines):
# plot_overview(snow_depth_surf.subdict['s5'], big_s_dct[k[0]], k[1])
#
# ################################################################
| |
# ============================================================================
# File: neotags.py
# Author: Christian Persson <c0r73x@gmail.com>
# Repository: https://github.com/c0r73x/neotags.nvim
# Released under the MIT license
# ============================================================================
# NOTE: Psutil import has been moved to Neotags._kill() to allow the script to
# run without the module (and therefore essentially without the kill function;
# beggers can't be choosers). Psutil is not and will never be available on
# Cygwin, which makes this plugin unusable there without this change.
import hashlib
import os
import re
import subprocess
import sys
import time
import json
from copy import deepcopy
from pynvim.api import NvimError
# sys.path.append(os.path.dirname(__file__))
from neotags.utils import (do_set_base, do_remove_base, get_project_path,
do_add_extra_dir, do_remove_extra_dir, find_tags,
strip_c, tokenize, bindex)
from neotags.diagnostics import Diagnostics
CLIB = None
dia = None
if sys.platform == 'win32':
SEPCHAR = ';'
else:
SEPCHAR = ':'
class Neotags(object):
def __init__(self, vim):
self.__prefix = r'\C\<'
self.__suffix = r'\>'
self.__initialized = False
self.__is_running = False
self.__run_ctags = False
self.__cmd_cache = {}
self.__exists_buffer = {}
self.__groups = {}
self.__md5_cache = {}
self.__regex_buffer = {}
self.__tmp_cache = {}
self.__cur = {'file': None, 'buf': None}
self.__seen = []
self.__backup_groups = {}
self.__autocmd = None
self.__gzfile = None
self.__init_tagfiles = None
self.__neotags_bin = None
self.__patternlength = None
self.__slurp = None
self.__tagfile = None
self.__ctov = ''
self.__fsuffix = ''
self.__keyword_pattern = ''
self.__match_pattern = ''
self.__match_pattern_not = ''
self.__notin_pattern = ''
self.__to_escape = ''
self.__vtoc = ''
self.__hlbuf = 1
self.vim = vim
def init(self):
if self.__initialized:
return
self.__ctov = self.vv('ft_conv')
self.__vtoe = self.vv('ft_ext')
self.__init_tagfiles = self.vim.api.eval('&tags').split(",")
self.__to_escape = re.compile(r'[.*^$/\\~\[\]]')
self.__vtoc = {}
for x, y in self.__ctov.items():
if isinstance(y, list):
for z in y:
self.__vtoc[z] = x
else:
self.__vtoc[y] = x
self.__notin_pattern = r'syntax match %s /%s\%%(%s\)%s/ containedin=ALLBUT,%s display'
self.__match_pattern_not = r'syntax match %s /%s\%%(%s\)%s/ containedin=ALLBUT,%s display'
self.__match_pattern = r'syntax match %s /%s\%%(%s\)%s/ display'
self.__keyword_pattern = r'syntax keyword %s %s'
self.__tagfiles_by_type = self.vv('tagfiles_by_type')
if self.vv('use_binary') == 1:
self.__neotags_bin = self._get_binary()
global dia
dia = Diagnostics(bool(self.vv('verbose')), self.vim, self.vv)
if self.vv('use_compression'):
global CLIB
ctype = self.vv('compression_type')
if ctype in ('gz', 'gzip'):
import gzip as CLIB
self.vv('compression_type', SET='gzip')
self.__fsuffix = '.gz'
elif ctype in ('xz', 'lzma'):
import lzma as CLIB
self.vv('compression_type', SET='lzma')
self.__fsuffix = '.xz'
else:
dia.error("Neotags: Unrecognized compression type.")
self.vv('compression_type', SET=None)
self.vv('use_compression', SET=0)
else:
self.vv('compression_type', SET=None)
self.vv('use_compression', SET=0)
dia.debug_echo("Using compression type %s with ext %s" %
(self.vv('compression_type'), self.__fsuffix))
self.__autocmd = "if execute('autocmd User') =~# 'NeotagsPost' | " \
"doautocmd User NeotagsPost | endif"
self.__initialized = True
if self.vv('enabled'):
evupd = ','.join(self.vv('events_update'))
evhl = ','.join(self.vv('events_highlight'))
evre = ','.join(self.vv('events_rehighlight'))
self.__patternlength = self.vv('patternlength')
self.vim.command('autocmd %s * call NeotagsUpdate()' %
evupd, async_=True)
self.vim.command(
'autocmd %s * call NeotagsHighlight()' % evhl, async_=True)
self.vim.command(
'autocmd %s * call NeotagsRehighlight()' % evre, async_=True)
if self.vv('loaded'):
self.update(False)
def update(self, force):
"""Update tags file, tags cache, and highlighting."""
ft = self.vim.api.eval('&ft')
init_time = time.time()
if not self.vv('enabled'):
self._clear(ft)
self.vim.command(self.__autocmd, async_=True)
return
if ft == '' or ft in self.vv('ignore') or self.vim.api.eval('&previewwindow'):
self.vim.command(self.__autocmd, async_=True)
return
if self.__is_running:
# XXX This should be more robust
return
dia.debug_start()
self.__is_running = True
self.__exists_buffer = {}
hl = HighlightGroup()
hl.ft = ft
hl.file = self.vim.api.eval("expand('%:p:p')")
self.__cur['file'] = os.path.realpath(hl.file)
self.__cur['buf'] = self.vim.current.buffer
hl.highlights, hl.number = self._getbufferhl()
if hl.number in self.__md5_cache:
hl.highlights = self.__md5_cache[hl.number]
else:
self.__md5_cache[hl.number] = hl.highlights = {}
if force:
if self.vv('run_ctags'):
self._run_ctags(True)
self.__groups[ft] = self._parseTags(ft)
elif hl.number not in self.__seen:
self.__seen.append(self.__cur['buf'].number)
self.__groups[ft] = self._parseTags(ft)
elif hl.number not in self.__cmd_cache:
self.__groups[ft] = self._parseTags(ft)
self.highlight(force, hl)
self.vim.command(self.__autocmd, async_=True)
dia.clear_stack()
dia.debug_echo('Finished all => (%.4fs)' % (time.time() - init_time))
self.__is_running = False
def highlight(self, force, hl):
"""Analyze the tags data and format it for nvim's regex engine."""
restored_groups = self.vv('restored_groups')
if hl.ft not in self.__backup_groups:
self.__backup_groups[hl.ft] = {}
if hl.ft in restored_groups and restored_groups[hl.ft]:
for group in restored_groups[hl.ft]:
if group not in self.__backup_groups[hl.ft]:
self._get_backup(hl.ft, group)
groups = self.__groups[hl.ft]
order = self._tags_order(hl.ft)
if groups is None:
self.__is_running = False
dia.debug_end('Skipping file')
return
if not order:
order = groups.keys()
for hl.key in order:
dia.debug_start()
hl.group = self._exists(hl.key, '.group', None)
fgroup = self._exists(hl.key, '.filter.group', None)
if hl.group is None:
continue
if hl.key in groups:
hl.allow_keyword = self._exists(hl.key, '.allow_keyword', 1)
hl.prefix = self._exists(hl.key, '.prefix', self.__prefix)
hl.suffix = self._exists(hl.key, '.suffix', self.__suffix)
hl.notin = self._exists(hl.key, '.notin', [])
if not self._highlight(hl, groups[hl.key], force):
break
else:
dia.error("Unexpected error")
fkey = hl.key + '_filter'
if fkey in groups:
fhl = deepcopy(hl)
fhl.key = fkey
fhl.allow_keyword = self._exists(hl.key, '.allow_keyword', 1)
fhl.prefix = self._exists(
hl.key, '.filter.prefix', self.__prefix)
fhl.suffix = self._exists(
hl.key, '.filter.suffix', self.__suffix)
fhl.notin = self._exists(hl.key, '.filter.notin', [])
if not self._highlight(fhl, groups[fhl.key], force):
break
for group in self.__backup_groups[hl.ft]:
self._restore_group(hl.ft, group)
self.__hlbuf = self.__cur['buf'].number
dia.debug_end('applied syntax for %s' % hl.ft)
##############################################################################
# Projects
def setBase(self, args):
do_set_base(dia, self.vv('settings_file'), args)
def removeBase(self, args):
do_remove_base(dia, self.vv('settings_file'), args)
def addExtraDir(self, args):
do_add_extra_dir(dia, self.vv('settings_file'), args)
def removeExtraDir(self, args):
do_remove_extra_dir(dia, self.vv('settings_file'), args)
##############################################################################
# Private
def _highlight(self, hl, group, force):
highlights, number = hl.highlights, hl.number
dia.debug_echo("Highlighting for buffer %s" % number)
if number in self.__md5_cache:
highlights = self.__md5_cache[number]
else:
self.__md5_cache[number] = highlights = {}
current = []
cmds = []
hl.key = '_Neotags_%s_%s' % (hl.key.replace('#', '_'), hl.group)
md5 = hashlib.md5()
strgrp = b''.join(group)
for i in range(0, len(strgrp), 128):
md5.update(strgrp[i:i + 128])
md5hash = md5.hexdigest()
if not force \
and (hl.key in highlights and md5hash == highlights[hl.key]) \
or (number != self.__hlbuf and number in self.__cmd_cache
and hl.key in self.__cmd_cache[number]):
try:
cmds = self.__cmd_cache[number][hl.key]
except KeyError:
dia.error('Key error in _highlight()!')
dia.debug_end('')
return True
dia.debug_echo("Updating from cache" % cmds)
else:
cmds.append('silent! syntax clear %s' % hl.key)
for i in range(0, len(group), self.__patternlength):
current = group[i:i + self.__patternlength]
current = [x.decode('ascii') for x in current]
if hl.notin:
hl.notin = [*hl.notin, *self.vv('global_notin')]
dia.debug_echo(hl.notin)
cmds.append(self.__notin_pattern %
(hl.key, hl.prefix, r'\|'.join(current),
hl.suffix, ','.join(hl.notin)))
elif (hl.prefix == self.__prefix and
hl.suffix == self.__suffix and
hl.allow_keyword == 1 and
'.' not in r''.join(current)):
cmds.append(self.__keyword_pattern %
(hl.key, r' '.join(current)))
else:
cmds.append(self.__match_pattern_not %
(hl.key, hl.prefix, r'\|'.join(current), hl.suffix, ','.join(self.vv('global_notin'))))
if hl.ft != self.vim.api.eval('&ft'):
dia.debug_end('filetype changed aborting highlight')
return False
self.__md5_cache[number][hl.key] = md5hash
cmds.append('hi def link %s %s' % (hl.key, hl.group))
full_cmd = ' | '.join(cmds)
dia.debug_echo(full_cmd)
if self.__cur['buf'] == self.vim.current.buffer:
self.vim.command(full_cmd, async_=True)
success = True
else:
dia.debug_echo('Buffer changed, aborting.')
success = False
try:
self.__cmd_cache[number][hl.key] = cmds
except KeyError:
self.__cmd_cache[number] = {}
self.__cmd_cache[number][hl.key] = cmds
finally:
if success:
dia.debug_end('Updated highlight for %s' % hl.key)
else:
dia.pop()
return True
def _parseTags(self, ft):
self._get_files()
files = []
dia.debug_start()
dia.debug_echo("Using tags file %s" % self.__gzfile)
dia.debug_echo("run_ctags -> %d" % self.vv('run_ctags'))
if not os.path.exists(self.__gzfile):
if self.vv('run_ctags'):
self._run_ctags(True)
files.append(self.__gzfile)
else:
self.__gzfile = None
else:
dia.debug_echo('updating vim-tagfile')
with self._open(self.__gzfile, 'rb', self.vv('compression_type')) as fp:
self._update_vim_tagfile(self.__gzfile, fp)
files.append(self.__gzfile)
for File in self.__init_tagfiles:
if os.path.exists(File):
files.append(File)
dia.debug_end("Finished updating file list")
if not files:
dia.error('No tag files found!')
return None
# Slurp the whole content of the current buffer
self.__slurp = '\n'.join(self.__cur['buf'])
if self.__neotags_bin is not None:
try:
return self._bin_get_tags(files, ft)
except CBinError as err:
self.vim.command("echoerr 'C binary failed with status %d: \"%s\"' "
"| echoerr 'Will try python code.'" % err.args, async_=True)
return self._get_tags(files, ft)
else:
return self._get_tags(files, ft)
def _get_backup(self, ft, group):
tmp = self.vim.api.eval("execute('syn list %s')" % group)
tmp = re.sub(r'.*xxx\s*(.*)\s*links to (.*)',
r'\1 \2', tmp, flags=re.S)
tmp = re.sub(r'(?:\s+|\n)', ' ', tmp).split()
try:
self.__backup_groups[ft][group] = (tmp[-1], tmp[:-1])
except IndexError:
dia.error("Unexpected index error in _get_backup()")
self.__backup_groups[ft][group] = []
def _restore_group(self, ft, group):
cmds = []
lnk = self.__backup_groups[ft][group][0]
symbols = self.__backup_groups[ft][group][1]
cmds.append('silent! syntax clear %s' % group)
cmds.append('syntax keyword %s %s' % (group, ' '.join(symbols)))
cmds.append('hi! link %s %s' % (group, lnk))
full_cmd = ' | '.join(cmds)
self.vim.command(full_cmd, async_=True)
# =============================================================================
# Yes C binary
def _bin_get_tags(self, files, ft):
filetypes = ft.lower().split('.')
languages = ft.lower().split('.')
vimlang = languages[0]
lang = self._vim_to_ctags(languages)[0]
try:
order = self.vim.api.eval('neotags#%s#order' % ft)
except NvimError:
return None
try:
equivalent = self.vim.api.eval('neotags#%s#equivalent' % ft)
except NvimError:
equivalent = None
groups = {
"%s#%s" % (ft, kind): []
for kind in [chr(i) for i in order.encode('ascii')]
}
if filetypes is None:
return groups
if self.__gzfile is not None:
comp_type = self.vv('compression_type')
comp_type = 'none' if comp_type is None else comp_type
file_list = '%s%s%s' % (comp_type, SEPCHAR, files[0])
for File in files[1:]:
file_list += '%snone%s%s' % (SEPCHAR, SEPCHAR, File)
else:
file_list = ''
for File in files:
file_list += '%snone%s%s' % (SEPCHAR, SEPCHAR, File)
stime = time.time()
dia.debug_echo("=============== Executing C code ===============")
ignored_tags = self.vv('ignored_tags')
if ft in ignored_tags and ignored_tags[ft]:
ignored_tags = SEPCHAR.join(ignored_tags[ft])
else:
ignored_tags = ''
if equivalent is None:
equiv_str = ''
else:
equiv_str = SEPCHAR.join([A + B for A, B in equivalent.items()])
indata = self.__slurp.encode('ascii', errors='replace')
File = self.__cur['file']
dia.debug_echo("Cmd is: %s" % [
self.__neotags_bin, file_list, lang, vimlang, order,
str(self.vv('strip_comments')), len(indata),
ignored_tags, equiv_str, File])
# Required arguments (in this order):
# 1) List of tags files the compression type of each file, with
# all fields separated by colons (comptype:file:comptype:file)
# 2) The language of the current buffer in ctags' format
# 3) The same in vim's format
# 4) The `order' string
# 5) Whether to strip out comments (0 or 1)
# 6) The length in bytes of the current vim buffer
# 7) The `ignored' tags list (colon separated)
# 8) The list of groups considered equivalent (colon separated)
# All numbers must be converted to strings for the subprocess interface.
proc = subprocess.Popen(
(
self.__neotags_bin,
file_list,
lang,
vimlang,
order,
str(self.vv('strip_comments')),
str(len(indata)),
ignored_tags,
equiv_str,
File
),
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
out, err = proc.communicate(input=indata)
if sys.platform == 'win32':
out = out.rstrip().split(b'\n').rstrip(b'\r')
else:
out = out.rstrip().split(b'\n')
err = err.rstrip().decode(errors='replace').split('\n')
dia.debug_echo("Returned %d items" % (len(out)))
for line in err:
if line:
dia.debug_echo("ERR: %s" % line)
if proc.returncode:
raise CBinError(proc.returncode, err[-1])
for line in out:
try:
key, name = line.split(b'\t')
except ValueError:
continue
key = key.decode()
try:
groups[key].append(name)
except KeyError:
groups[key] = [name]
dia.debug_echo("Elapsed time for reading file: %fs" %
(float(time.time()) - stime), err=True)
return groups
# =============================================================================
# No C binary
def _get_tags(self, files, ft):
filetypes = ft.lower().split('.')
languages = ft.lower().split('.')
dia.debug_echo("=============== Executing Python code ===============")
match_list = []
try:
ignored_tags = self.vv('ignored_tags')[ft]
except KeyError:
ignored_tags = []
try:
order = self.vim.api.eval('neotags#%s#order' % ft)
except NvimError:
dia.debug_echo("No order string found.")
return
try:
equivalent = self.vim.api.eval('neotags#%s#equivalent' % ft)
except NvimError:
equivalent = None
stime = time.time()
groups = {
"%s#%s" % (ft, kind): set()
for kind in [chr(i) for i in order.encode('ascii')]
}
if filetypes is None:
dia.debug_echo("No filetypes identified, returning.")
return groups
if self.__gzfile is None:
comp_type = None
else:
comp_type = self.vv('compression_type')
for File in files:
try:
with self._open(File, 'rb', comp_type) as fp:
data = fp.read()
tags = find_tags(dia, data, self._vim_to_ctags(languages)[0],
order, ignored_tags, equivalent)
match_list.append(tags)
except FileNotFoundError:
if File == self.__gzfile:
dia.error("No tags file found. Make sure Universal Ctags is "
"installed and in your $PATH.")
continue
comp_type = None
match_list = [i for s in match_list for i in s]
self._parse(ft, match_list, groups, languages,
ignored_tags, equivalent, order)
for grp in groups.keys():
groups[grp] = list(set(groups[grp]))
dia.debug_echo("Elapsed time for reading file: %fs" %
(float(time.time()) - stime), err=True)
dia.debug_echo("Finished finding tags, found %d items."
% sum(map(len, groups.values())))
return groups
def _parse(self, ft, match_list, groups, languages, ignored_tags, equivalent, order):
dia.debug_start()
key_lang = languages[0]
if key_lang in ('c', 'cpp', 'java', 'go', 'rust', 'cs'):
buf = strip_c(self.__slurp, dia)
else:
buf = bytes(self.__slurp, 'ascii', errors='replace')
toks = sorted(tokenize(buf, dia))
for match in match_list:
if (bindex(toks, match['name']) != (-1)
or b'$' in match['name']
or b'.' in match['name']):
key = "%s#%s" % (ft, match['kind'].decode('ascii'))
groups[key].add(match['name'])
dia.debug_end("Finished _parse, found %d items."
% sum(map(len, groups.values())))
# =============================================================================
def _run_ctags(self, force):
dia.debug_start()
ctags_command = self._get_ctags_command(force)
if ctags_command is None:
dia.debug_end("Not running ctags.")
return
try:
proc = subprocess.Popen(
ctags_command, shell=True, stderr=subprocess.PIPE)
proc.wait(self.vv('ctags_timeout'))
err = proc.communicate()[1]
if err:
dia.error('Ctags completed with errors')
for e in err.decode('ascii').split('\n'):
dia.error(e)
else:
dia.debug_echo('Ctags completed successfully')
cmpt = self.vv('compression_type')
try:
dia.debug_start()
if cmpt in ('gzip', 'lzma'):
with open(self.__tagfile, 'rb') as src:
with self._open(self.__gzfile, 'wb', cmpt, level=9) as dst:
dst.write(src.read())
src.seek(0)
self._update_vim_tagfile(self.__gzfile, src)
os.unlink(self.__tagfile)
except IOError as err:
dia.error("Unexpected IO Error -> '%s'" % err)
finally:
dia.debug_end('Finished compressing file.')
except FileNotFoundError as error:
dia.error('failed to run Ctags %s' % error)
except subprocess.TimeoutExpired:
try:
self._kill(proc.pid)
except ImportError:
proc.kill()
else:
if self.vv('silent_timeout') == 0:
self.vim.command("echom 'Ctags process timed out!'",
async_=True)
finally:
dia.debug_end("Finished running ctags")
def _get_ctags_command(self, force):
"""Create the commandline to be invoked when running ctags."""
ctags_args = self.vv('ctags_args')
# NOTE: _get_files() sets self.__tagfile and self.__gzfile!
recurse, paths, run = self._get_files()
if not run or (not force and os.path.exists(self.__gzfile)
and os.stat(self.__gzfile).st_size > 0):
return None
ctags_args.append('-f "%s"' % self.__tagfile)
ctags_binary = None
path_args = ' '.join(['"%s"' % p for p in paths])
if recurse:
if self.vv('find_tool'):
find_tool = "%s %s" % (self.vv('find_tool'), path_args)
if (self.__tagfiles_by_type == 1):
ft = self.vim.api.eval('&ft')
languages = self._vim_to_ext(ft.lower().split('.'))
find_tool = '%s | %s "\\.(%s)$"' % (
find_tool, self.vv('regex_tool'), '|'.join(languages))
ctags_args.append('-L -')
ctags_binary = "%s | %s" % (
find_tool,
self.vv('ctags_bin'))
dia.debug_echo(
"Using %s to find files recursively in dir(s) %s" %
(self.vv('find_tool'), path_args))
else:
ctags_args.append('-R %s' % path_args)
ctags_binary = self.vv('ctags_bin')
dia.debug_echo("Running ctags on dir(s) %s" % path_args)
else:
dia.debug_echo(
"Not running ctags recursively for dir(s) %s" % path_args)
File = self.__cur['file']
ctags_args.append('"%s"' % File)
ctags_binary = self.vv('ctags_bin')
dia.debug_echo("Running ctags on file '%s'" % File)
full_command = '%s %s' % (ctags_binary, ' '.join(ctags_args))
dia.debug_echo(full_command)
return full_command
# ==============================================================================
# Debug and util
def _tags_order(self, ft):
orderlist = []
filetypes = ft.lower().split('.')
for filetype in filetypes:
order = self._exists(filetype, '#order', None)
if order:
orderlist += [(filetype + '#') + s for s in list(order)]
return orderlist
def _exists(self, kind, var, default):
buf = kind + var
if buf not in self.__exists_buffer:
try:
self.__exists_buffer[buf] = self.vim.api.eval('neotags#' + buf)
except NvimError:
self.__exists_buffer[buf] = default
return self.__exists_buffer[buf]
def _getbufferhl(self):
number = self.__cur['buf'].number
if number in self.__md5_cache.keys():
highlights = self.__md5_cache[number]
else:
self.__md5_cache[number] = highlights = {}
return highlights, number
def _clear(self, ft):
if ft is None:
dia.debug_echo('Clear called with null ft')
return
cmds = []
order = self._tags_order(ft)
for key in order:
hlgroup = self._exists(key, '.group', None)
hlkey = '_Neotags_%s_%s' % (key.replace('#', '_'), hlgroup)
cmds.append('silent! syntax clear %s' % hlkey)
dia.debug_echo(str(cmds))
self.vim.command(' | '.join(cmds), async_=True)
def _kill(self, proc_pid):
import psutil
process = psutil.Process(proc_pid)
for proc in process.children():
proc.kill()
process.kill()
def _ctags_to_vim(self, lang, languages):
lang = lang.strip('\\')
if lang in self.__ctov and self.__ctov[lang] in languages:
return self.__ctov[lang]
return lang.lower()
def _vim_to_ext(self, languages):
for i, lang in enumerate(languages):
if lang in self.__vtoe:
languages[i] = '|'.join(self.__vtoe[lang])
return languages
def _vim_to_ctags(self, languages):
for i, lang in enumerate(languages):
if lang in self.__vtoc:
languages[i] = self.__vtoc[lang]
languages[i] = languages[i].strip('\\')
languages[i] = re.escape(languages[i])
return languages
def _get_files(self):
File = self.__cur['file']
path = os.path.dirname(File)
run = 1
extra_dirs = []
recurse = (self.vv('recursive')
and path not in self.vv('norecurse_dirs'))
if recurse:
try:
with open(self.vv('settings_file'), 'r') as fp:
projects = json.load(fp)
except (json.JSONDecodeError, FileNotFoundError):
# Just reset the projects file
projects = {}
with open(self.vv('settings_file'), 'w') as fp:
fp.write('')
proj_path = get_project_path(projects, path)
if proj_path is not None:
path = proj_path
run = projects[path].get('run', 1)
extra_dirs = projects[path].get('extra_dirs', [])
path = os.path.realpath(path)
self._path_replace(path)
else:
self._path_replace(File)
self.vim.command('let g:neotags_file = "%s"' %
self.__tagfile, async_=True)
return recurse, [path] + extra_dirs, run
def _path_replace(self, path):
if (sys.platform == 'win32'):
# For some reason replace wouldn't work here. I have no idea why.
path = re.sub(':', '__', path)
sep_char = '\\'
else:
sep_char = '/'
if (self.__tagfiles_by_type == 1):
ft = self.vim.api.eval('&ft')
self.__tagfile = "%s/%s_%s.tags" % (self.vv('directory'),
path.replace(sep_char, '__'),
ft)
else:
self.__tagfile = "%s/%s.tags" % (self.vv('directory'),
path.replace(sep_char, '__'))
self.__gzfile = self.__tagfile + self.__fsuffix
def _get_binary(self, loud=False):
binary = self.vv('bin')
if sys.platform == 'win32' and binary.find('.exe') < 0:
binary += '.exe'
if os.path.exists(binary):
self.vv('use_binary', SET=1)
else:
self.vv('use_binary', SET=0)
binary = None
if loud:
dia.inform_echo("Binary '%s' doesn't exist. Cannot enable." %
self.__neotags_bin)
else:
dia.debug_echo(
"Binary '%s' doesn't exist." % self.__neotags_bin)
return binary
def _update_vim_tagfile(self, tagfile, open_file):
try:
if tagfile not in self.__tmp_cache:
tmpfile = open(self.vim.call('tempname'), 'wb')
self.__tmp_cache[tagfile] = {
'fp': tmpfile,
'name': tmpfile.name
}
tmpfile.write(open_file.read())
self.vim.command('set tags+=%s' % tmpfile.name, async_=True)
else:
if sys.platform == 'win32':
tmpfile = open(self.__tmp_cache[tagfile]['name'], 'wb')
else:
tmpfile = self.__tmp_cache[tagfile]['fp']
tmpfile.seek(0)
tmpfile.truncate(0)
tmpfile.flush()
tmpfile.write(open_file.read())
tmpfile.flush()
# On windows we must close the file or else it will be impossible
# to delete it when nvim itself closes.
if sys.platform == 'win32':
tmpfile.close()
except IOError as err:
dia.error("Unexpected io error: %s" % err)
def _open(self, filename, mode, comp_type, level=None, **kwargs):
if comp_type not in ('gzip', 'lzma'):
string = 'open(filename, mode, **kwargs)'
elif comp_type == 'gzip':
string = 'CLIB.open(filename, mode, %s **kwargs)' % (
'' if level is None else 'compresslevel=level,')
elif comp_type == 'lzma':
string = 'CLIB.open(filename, mode, %s **kwargs)' % (
'' if level is None else 'preset=level,')
return eval(string)
def vv(self, varname, SET=None):
"""Either return a nvim variable prepended with 'neotags_', or set that
variable to the value of SET and then return that value.
"""
try:
if SET is None:
return self.vim.vars["neotags_" + varname]
else:
self.vim.vars["neotags_" + varname] = SET
return SET
except (NvimError, KeyError) as err:
dia.debug_echo("ERROR: varname %s doesn't exist." % varname)
raise err
###############################################################################
# Toggling. These are ugly and repeditive.
def toggle(self):
"""Toggle state of the plugin."""
if not self.vv('enabled'):
dia.inform_echo("Re-enabling neotags.")
self.vv('enabled', SET=1)
self.update(force=False)
else:
dia.inform_echo("Disabling neotags.")
self.vv('enabled', SET=0)
self.__seen = []
self.__md5_cache = {}
self.__cmd_cache = {}
self.update(force=False)
def toggle_C_bin(self):
if self.__neotags_bin is None:
self.__neotags_bin = self._get_binary(loud=True)
if self.__neotags_bin is not None:
dia.inform_echo("Switching to use C binary.")
self.vv('use_binary', SET=1)
else:
self.__neotags_bin = None
self.vv('use_binary', SET=0)
dia.inform_echo("Switching to use python code.")
def toggle_verbosity(self):
dia.toggle()
###############################################################################
class HighlightGroup:
"""Exists to keep the number of arguments being passed around down."""
def __init__(self):
self.file = None
self.ft = None
self.group = None
self.key = None
self.notin = None
self.prefix = None
self.suffix = None
self.highlights = None
self.number = None
class CBinError(Exception):
"""Dummy wrapper."""
| |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exception definitions."""
from keystoneauth1 import exceptions as _exc
from keystoneclient.i18n import _
ClientException = _exc.ClientException
"""The base exception class for all exceptions this library raises.
An alias of :py:exc:`keystoneauth1.exceptions.base.ClientException`
"""
ConnectionError = _exc.ConnectionError
"""Cannot connect to API service.
An alias of :py:exc:`keystoneauth1.exceptions.connection.ConnectionError`
"""
ConnectionRefused = _exc.ConnectFailure
"""Connection refused while trying to connect to API service.
An alias of :py:exc:`keystoneauth1.exceptions.connection.ConnectFailure`
"""
SSLError = _exc.SSLError
"""An SSL error occurred.
An alias of :py:exc:`keystoneauth1.exceptions.connection.SSLError`
"""
AuthorizationFailure = _exc.AuthorizationFailure
"""Cannot authorize API client.
An alias of :py:exc:`keystoneauth1.exceptions.auth.AuthorizationFailure`
"""
class ValidationError(ClientException):
"""Error in validation on API client side."""
pass
class UnsupportedVersion(ClientException):
"""User is trying to use an unsupported version of the API."""
pass
class CommandError(ClientException):
"""Error in CLI tool."""
pass
class AuthPluginOptionsMissing(AuthorizationFailure):
"""Auth plugin misses some options."""
def __init__(self, opt_names):
super(AuthPluginOptionsMissing, self).__init__(
_("Authentication failed. Missing options: %s") %
", ".join(opt_names))
self.opt_names = opt_names
class AuthSystemNotFound(AuthorizationFailure):
"""User has specified an AuthSystem that is not installed."""
def __init__(self, auth_system):
super(AuthSystemNotFound, self).__init__(
_("AuthSystemNotFound: %r") % auth_system)
self.auth_system = auth_system
class NoUniqueMatch(ClientException):
"""Multiple entities found instead of one."""
pass
EndpointException = _exc.CatalogException
"""Something is rotten in Service Catalog.
An alias of :py:exc:`keystoneauth1.exceptions.catalog.CatalogException`
"""
EndpointNotFound = _exc.EndpointNotFound
"""Could not find requested endpoint in Service Catalog.
An alias of :py:exc:`keystoneauth1.exceptions.catalog.EndpointNotFound`
"""
class AmbiguousEndpoints(EndpointException):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
super(AmbiguousEndpoints, self).__init__(
_("AmbiguousEndpoints: %r") % endpoints)
self.endpoints = endpoints
HttpError = _exc.HttpError
"""The base exception class for all HTTP exceptions.
An alias of :py:exc:`keystoneauth1.exceptions.http.HttpError`
"""
HTTPClientError = _exc.HTTPClientError
"""Client-side HTTP error.
Exception for cases in which the client seems to have erred.
An alias of :py:exc:`keystoneauth1.exceptions.http.HTTPClientError`
"""
HttpServerError = _exc.HttpServerError
"""Server-side HTTP error.
Exception for cases in which the server is aware that it has
erred or is incapable of performing the request.
An alias of :py:exc:`keystoneauth1.exceptions.http.HttpServerError`
"""
class HTTPRedirection(HttpError):
"""HTTP Redirection."""
message = _("HTTP Redirection")
class MultipleChoices(HTTPRedirection):
"""HTTP 300 - Multiple Choices.
Indicates multiple options for the resource that the client may follow.
"""
http_status = 300
message = _("Multiple Choices")
BadRequest = _exc.BadRequest
"""HTTP 400 - Bad Request.
The request cannot be fulfilled due to bad syntax.
An alias of :py:exc:`keystoneauth1.exceptions.http.BadRequest`
"""
Unauthorized = _exc.Unauthorized
"""HTTP 401 - Unauthorized.
Similar to 403 Forbidden, but specifically for use when authentication
is required and has failed or has not yet been provided.
An alias of :py:exc:`keystoneauth1.exceptions.http.Unauthorized`
"""
PaymentRequired = _exc.PaymentRequired
"""HTTP 402 - Payment Required.
Reserved for future use.
An alias of :py:exc:`keystoneauth1.exceptions.http.PaymentRequired`
"""
Forbidden = _exc.Forbidden
"""HTTP 403 - Forbidden.
The request was a valid request, but the server is refusing to respond
to it.
An alias of :py:exc:`keystoneauth1.exceptions.http.Forbidden`
"""
NotFound = _exc.NotFound
"""HTTP 404 - Not Found.
The requested resource could not be found but may be available again
in the future.
An alias of :py:exc:`keystoneauth1.exceptions.http.NotFound`
"""
MethodNotAllowed = _exc.MethodNotAllowed
"""HTTP 405 - Method Not Allowed.
A request was made of a resource using a request method not supported
by that resource.
An alias of :py:exc:`keystoneauth1.exceptions.http.MethodNotAllowed`
"""
NotAcceptable = _exc.NotAcceptable
"""HTTP 406 - Not Acceptable.
The requested resource is only capable of generating content not
acceptable according to the Accept headers sent in the request.
An alias of :py:exc:`keystoneauth1.exceptions.http.NotAcceptable`
"""
ProxyAuthenticationRequired = _exc.ProxyAuthenticationRequired
"""HTTP 407 - Proxy Authentication Required.
The client must first authenticate itself with the proxy.
An alias of :py:exc:`keystoneauth1.exceptions.http.ProxyAuthenticationRequired`
"""
RequestTimeout = _exc.RequestTimeout
"""HTTP 408 - Request Timeout.
The server timed out waiting for the request.
An alias of :py:exc:`keystoneauth1.exceptions.http.RequestTimeout`
"""
Conflict = _exc.Conflict
"""HTTP 409 - Conflict.
Indicates that the request could not be processed because of conflict
in the request, such as an edit conflict.
An alias of :py:exc:`keystoneauth1.exceptions.http.Conflict`
"""
Gone = _exc.Gone
"""HTTP 410 - Gone.
Indicates that the resource requested is no longer available and will
not be available again.
An alias of :py:exc:`keystoneauth1.exceptions.http.Gone`
"""
LengthRequired = _exc.LengthRequired
"""HTTP 411 - Length Required.
The request did not specify the length of its content, which is
required by the requested resource.
An alias of :py:exc:`keystoneauth1.exceptions.http.LengthRequired`
"""
PreconditionFailed = _exc.PreconditionFailed
"""HTTP 412 - Precondition Failed.
The server does not meet one of the preconditions that the requester
put on the request.
An alias of :py:exc:`keystoneauth1.exceptions.http.PreconditionFailed`
"""
RequestEntityTooLarge = _exc.RequestEntityTooLarge
"""HTTP 413 - Request Entity Too Large.
The request is larger than the server is willing or able to process.
An alias of :py:exc:`keystoneauth1.exceptions.http.RequestEntityTooLarge`
"""
RequestUriTooLong = _exc.RequestUriTooLong
"""HTTP 414 - Request-URI Too Long.
The URI provided was too long for the server to process.
An alias of :py:exc:`keystoneauth1.exceptions.http.RequestUriTooLong`
"""
UnsupportedMediaType = _exc.UnsupportedMediaType
"""HTTP 415 - Unsupported Media Type.
The request entity has a media type which the server or resource does
not support.
An alias of :py:exc:`keystoneauth1.exceptions.http.UnsupportedMediaType`
"""
RequestedRangeNotSatisfiable = _exc.RequestedRangeNotSatisfiable
"""HTTP 416 - Requested Range Not Satisfiable.
The client has asked for a portion of the file, but the server cannot
supply that portion.
An alias of
:py:exc:`keystoneauth1.exceptions.http.RequestedRangeNotSatisfiable`
"""
ExpectationFailed = _exc.ExpectationFailed
"""HTTP 417 - Expectation Failed.
The server cannot meet the requirements of the Expect request-header field.
An alias of :py:exc:`keystoneauth1.exceptions.http.ExpectationFailed`
"""
UnprocessableEntity = _exc.UnprocessableEntity
"""HTTP 422 - Unprocessable Entity.
The request was well-formed but was unable to be followed due to semantic
errors.
An alias of :py:exc:`keystoneauth1.exceptions.http.UnprocessableEntity`
"""
InternalServerError = _exc.InternalServerError
"""HTTP 500 - Internal Server Error.
A generic error message, given when no more specific message is suitable.
An alias of :py:exc:`keystoneauth1.exceptions.http.InternalServerError`
"""
HttpNotImplemented = _exc.HttpNotImplemented
"""HTTP 501 - Not Implemented.
The server either does not recognize the request method, or it lacks
the ability to fulfill the request.
An alias of :py:exc:`keystoneauth1.exceptions.http.HttpNotImplemented`
"""
BadGateway = _exc.BadGateway
"""HTTP 502 - Bad Gateway.
The server was acting as a gateway or proxy and received an invalid
response from the upstream server.
An alias of :py:exc:`keystoneauth1.exceptions.http.BadGateway`
"""
ServiceUnavailable = _exc.ServiceUnavailable
"""HTTP 503 - Service Unavailable.
The server is currently unavailable.
An alias of :py:exc:`keystoneauth1.exceptions.http.ServiceUnavailable`
"""
GatewayTimeout = _exc.GatewayTimeout
"""HTTP 504 - Gateway Timeout.
The server was acting as a gateway or proxy and did not receive a timely
response from the upstream server.
An alias of :py:exc:`keystoneauth1.exceptions.http.GatewayTimeout`
"""
HttpVersionNotSupported = _exc.HttpVersionNotSupported
"""HTTP 505 - HttpVersion Not Supported.
The server does not support the HTTP protocol version used in the request.
An alias of :py:exc:`keystoneauth1.exceptions.http.HttpVersionNotSupported`
"""
from_response = _exc.from_response
"""Returns an instance of :class:`HttpError` or subclass based on response.
An alias of :py:func:`keystoneauth1.exceptions.http.from_response`
"""
# NOTE(akurilin): This alias should be left here to support backwards
# compatibility until we are sure that usage of these exceptions in
# projects is correct.
HTTPNotImplemented = HttpNotImplemented
Timeout = RequestTimeout
HTTPError = HttpError
class CertificateConfigError(Exception):
"""Error reading the certificate."""
def __init__(self, output):
self.output = output
msg = _('Unable to load certificate.')
super(CertificateConfigError, self).__init__(msg)
class CMSError(Exception):
"""Error reading the certificate."""
def __init__(self, output):
self.output = output
msg = _('Unable to sign or verify data.')
super(CMSError, self).__init__(msg)
EmptyCatalog = _exc.EmptyCatalog
"""The service catalog is empty.
An alias of :py:exc:`keystoneauth1.exceptions.catalog.EmptyCatalog`
"""
DiscoveryFailure = _exc.DiscoveryFailure
"""Discovery of client versions failed.
An alias of :py:exc:`keystoneauth1.exceptions.discovery.DiscoveryFailure`
"""
VersionNotAvailable = _exc.VersionNotAvailable
"""Discovery failed as the version you requested is not available.
An alias of :py:exc:`keystoneauth1.exceptions.discovery.VersionNotAvailable`
"""
class MethodNotImplemented(ClientException):
"""Method not implemented by the keystoneclient API."""
MissingAuthPlugin = _exc.MissingAuthPlugin
"""An authenticated request is required but no plugin available.
An alias of :py:exc:`keystoneauth1.exceptions.auth_plugins.MissingAuthPlugin`
"""
NoMatchingPlugin = _exc.NoMatchingPlugin
"""There were no auth plugins that could be created from the parameters
provided.
An alias of :py:exc:`keystoneauth1.exceptions.auth_plugins.NoMatchingPlugin`
"""
class UnsupportedParameters(ClientException):
"""A parameter that was provided or returned is not supported.
:param list(str) names: Names of the unsupported parameters.
.. py:attribute:: names
Names of the unsupported parameters.
"""
def __init__(self, names):
self.names = names
m = _('The following parameters were given that are unsupported: %s')
super(UnsupportedParameters, self).__init__(m % ', '.join(self.names))
class InvalidResponse(ClientException):
"""The response from the server is not valid for this request."""
def __init__(self, response):
super(InvalidResponse, self).__init__()
self.response = response
| |
import unittest
import numpy as np
from mock import Mock, patch, PropertyMock
import repstruct.features.extract as extract
import repstruct.dataset as dataset
class TestExtract(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGetRgbFromLocations(self):
im = np.array([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]])
row_locations = np.array([0])
column_locations = np.array([0])
rgb = extract.get_rgb_from_locs(row_locations, column_locations, im)
self.assertEqual(3, len(rgb.shape))
self.assertEqual(1, rgb.shape[0])
self.assertEqual(1, rgb.shape[1])
self.assertEqual(3, rgb.shape[2])
self.assertEqual(im[0, 0, 0], rgb[0, 0, 0])
self.assertEqual(im[0, 0, 1], rgb[0, 0, 1])
self.assertEqual(im[0, 0, 2], rgb[0, 0, 2])
def testGetRgbFromLocationsMultiple(self):
im = np.array([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]])
row_locations = np.array([0, 1])
column_locations = np.array([0, 1])
rgb = extract.get_rgb_from_locs(row_locations, column_locations, im)
self.assertEqual(3, len(rgb.shape))
self.assertEqual(2, rgb.shape[0])
self.assertEqual(1, rgb.shape[1])
self.assertEqual(3, rgb.shape[2])
self.assertEqual(im[0, 0, 0], rgb[0, 0, 0])
self.assertEqual(im[0, 0, 1], rgb[0, 0, 1])
self.assertEqual(im[0, 0, 2], rgb[0, 0, 2])
self.assertEqual(im[1, 1, 0], rgb[1, 0, 0])
self.assertEqual(im[1, 1, 1], rgb[1, 0, 1])
self.assertEqual(im[1, 1, 2], rgb[1, 0, 2])
def testGetRgbFromLocationsFloat(self):
im = np.array([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]])
row_locations = np.array([0.4])
column_locations = np.array([1.9])
rgb = extract.get_rgb_from_locs(row_locations, column_locations, im)
self.assertEqual(3, len(rgb.shape))
self.assertEqual(1, rgb.shape[0])
self.assertEqual(1, rgb.shape[1])
self.assertEqual(3, rgb.shape[2])
self.assertEqual(im[0, 1, 0], rgb[0, 0, 0])
self.assertEqual(im[0, 1, 1], rgb[0, 0, 1])
self.assertEqual(im[0, 1, 2], rgb[0, 0, 2])
def testCreateNanArray(self):
columns = 10
result = extract.create_nan_array(columns)
self.assertEqual(columns, result.shape[0])
for value in result:
self.assertTrue(np.isnan(value))
def testRgbToHsCoordsBlackWhite(self):
rgb = np.array([[[0, 0, 0]], [[255, 255, 255]]])
result = extract.rgb_to_hs_coords(rgb)
self.assertEqual(rgb.shape[0], result.shape[0])
self.assertEqual(2, result.shape[1])
self.assertLess(np.linalg.norm(result[0]), 0.0000001)
self.assertLess(np.linalg.norm(result[1]), 0.0000001)
def testRgbToHsCoorsSingleColors(self):
rgb = np.array([[[255, 0, 0]], [[0, 0, 255]], [[0, 255, 0]]])
result = extract.rgb_to_hs_coords(rgb)
self.assertEqual(rgb.shape[0], result.shape[0])
self.assertEqual(2, result.shape[1])
self.assertLess(np.abs(np.linalg.norm(result[0]) - 1.), 0.0000001)
self.assertLess(np.abs(np.linalg.norm(result[1]) - 1.), 0.0000001)
self.assertLess(np.abs(np.linalg.norm(result[2]) - 1.), 0.0000001)
@patch('repstruct.features.extract.get_color_hist')
@patch('repstruct.features.descriptor.normalize_by_division')
@patch('repstruct.features.descriptor.classify_cosine')
@patch('repstruct.features.descriptor.normalize')
@patch('cv2.imread')
def testDescriptorExtractor(self, imread_mock, norm_mock, classify_mock, div_mock, hist_mock):
imread_mock.return_value = np.array([[[0, 0, 0]]])
desc_res = 'desc'
color_res = ['color', 'random']
div_mock.return_value = desc_res
hist_mock.side_effect = color_res
zero = np.zeros((1, 2))
classify_mock.return_value = zero
norm_mock.return_value = zero
feature_data = dataset.FeatureDataSet(None, None)
feature_data.load = Mock(return_value=(zero, zero))
descriptor_data = dataset.DescriptorDataSet(None)
descriptor_data.save = Mock()
collection_data = dataset.CollectionDataSet(None, None)
collection_data.path = 'path'
desc_cc = np.array([0., 0.])
desc_cc_norm = np.array([1., 1.])
color_cc = np.array([0., 0.])
color_cc_norm = np.array([1., 1.])
x = np.array([0., 1.])
y = np.array([0., 1.])
extractor = extract.DescriptorExtractor(feature_data, descriptor_data, collection_data,
desc_cc, desc_cc_norm, color_cc, color_cc_norm,
x, y)
im = 'im'
extractor(im)
descriptor_data.save.assert_called_with(im, desc_res, color_res[0], color_res[1])
def testGetColorHist(self):
im = np.array([[[0, 0, 0]], [[255, 0, 0]]], np.uint8)
rows = np.array([0, 1])
cols = np.array([0, 0])
cluster_centers = np.array([[0., 0.], [1., 0.]])
cluster_center_norm = np.ones(2, np.float)
result = extract.get_color_hist(im, rows, cols, cluster_centers, cluster_center_norm)
self.assertEqual(1, len(result.shape))
self.assertEqual(2, result.shape[0])
self.assertFalse(np.any(np.isnan(result)))
norm = np.linalg.norm(result)
self.assertLess(np.abs(norm - 1.), 0.0000001)
# Same number of items in the two clusters.
self.assertLess(np.abs(result[0] - result[1]), 0.0000001)
def testGetColorHistGrayscale(self):
im = np.array([[0], [255]], np.uint8)
rows = np.array([0, 1])
cols = np.array([0, 0])
cluster_centers = np.array([[0., 0.], [1., 0.]])
cluster_center_norm = np.ones(2, np.float)
result = extract.get_color_hist(im, rows, cols, cluster_centers, cluster_center_norm)
self.assertEqual(1, len(result.shape))
self.assertEqual(2, result.shape[0])
self.assertTrue(np.all(np.isnan(result)))
@patch('numpy.mod')
@patch('numpy.histogram')
@patch('scipy.io.loadmat')
@patch('repstruct.features.extract.DescriptorExtractor')
@patch('multiprocessing.Pool')
def testExtract(self, pool_mock, extract_mock, loadmat_mock, hist_mock, mod_mock):
pool_instance = pool_mock.return_value
pool_instance.map = Mock(return_value=0)
extractor_instance = extract_mock.return_value
zero_array = np.zeros((2, 1))
loadmat_mock.return_value = {'cbest': zero_array, 'idxbest': zero_array,
'ccbest': zero_array, 'idxcbest': zero_array,
'rands': {'x': zero_array, 'y': zero_array}}
hist_mock.return_value = (0, 0)
data = dataset.DataSet('tag')
data.collection = PropertyMock()
data.collection.images = Mock(return_value=np.array(['im1', 'im2', 'im3']))
data.collection.config = PropertyMock()
data.collection.config.processes = 1
extract.extract(data)
self.assertEqual(3, extractor_instance.call_count)
self.assertEqual(0, pool_instance.map.call_count)
@patch('numpy.mod')
@patch('numpy.histogram')
@patch('scipy.io.loadmat')
@patch('repstruct.features.extract.DescriptorExtractor')
@patch('multiprocessing.Pool')
def testExtractMultiProcess(self, pool_mock, extract_mock, loadmat_mock, hist_mock, mod_mock):
pool_instance = pool_mock.return_value
pool_instance.map = Mock(return_value=0)
extractor_instance = extract_mock.return_value
zero_array = np.zeros((2, 1))
loadmat_mock.return_value = {'cbest': zero_array, 'idxbest': zero_array,
'ccbest': zero_array, 'idxcbest': zero_array,
'rands': {'x': zero_array, 'y': zero_array}}
hist_mock.return_value = (0, 0)
data = dataset.DataSet('tag')
data.collection = PropertyMock()
data.collection.images = Mock(return_value=np.array(['im1', 'im2', 'im3']))
data.collection.config = PropertyMock()
data.collection.config.processes = 2
extract.extract(data)
self.assertEqual(0, extractor_instance.call_count)
self.assertEqual(1, pool_instance.map.call_count)
if __name__ == '__main__':
unittest.main()
| |
"""Test zha fan."""
from unittest.mock import AsyncMock, call, patch
import pytest
import zigpy.profiles.zha as zha
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.hvac as hvac
import zigpy.zcl.foundation as zcl_f
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_SPEED,
DOMAIN,
SERVICE_SET_SPEED,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
)
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.zha.core.discovery import GROUP_PROBE
from homeassistant.components.zha.core.group import GroupMember
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
from .common import (
async_enable_traffic,
async_find_group_entity_id,
async_test_rejoin,
find_entity_id,
get_zha_gateway,
send_attributes_report,
)
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e8"
@pytest.fixture
def zigpy_device(zigpy_device_mock):
"""Device tracker zigpy device."""
endpoints = {
1: {
"in_clusters": [hvac.Fan.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.ON_OFF_SWITCH,
}
}
return zigpy_device_mock(
endpoints, node_descriptor=b"\x02@\x8c\x02\x10RR\x00\x00\x00R\x00\x00"
)
@pytest.fixture
async def coordinator(hass, zigpy_device_mock, zha_device_joined):
"""Test zha fan platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.Groups.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee="00:15:8d:00:02:32:4f:32",
nwk=0x0000,
node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_fan_1(hass, zigpy_device_mock, zha_device_joined):
"""Test zha fan platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.Groups.cluster_id,
general.OnOff.cluster_id,
hvac.Fan.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.ON_OFF_LIGHT,
},
},
ieee=IEEE_GROUPABLE_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
await hass.async_block_till_done()
return zha_device
@pytest.fixture
async def device_fan_2(hass, zigpy_device_mock, zha_device_joined):
"""Test zha fan platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.Groups.cluster_id,
general.OnOff.cluster_id,
hvac.Fan.cluster_id,
general.LevelControl.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.ON_OFF_LIGHT,
},
},
ieee=IEEE_GROUPABLE_DEVICE2,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
await hass.async_block_till_done()
return zha_device
async def test_fan(hass, zha_device_joined_restored, zigpy_device):
"""Test zha fan platform."""
zha_device = await zha_device_joined_restored(zigpy_device)
cluster = zigpy_device.endpoints.get(1).fan
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_OFF
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the fan was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at fan
await send_attributes_report(hass, cluster, {1: 2, 0: 1, 2: 3})
assert hass.states.get(entity_id).state == STATE_ON
# turn off at fan
await send_attributes_report(hass, cluster, {1: 1, 0: 0, 2: 2})
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
cluster.write_attributes.reset_mock()
await async_turn_on(hass, entity_id)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call({"fan_mode": 2})
# turn off from HA
cluster.write_attributes.reset_mock()
await async_turn_off(hass, entity_id)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call({"fan_mode": 0})
# change speed from HA
cluster.write_attributes.reset_mock()
await async_set_speed(hass, entity_id, speed=fan.SPEED_HIGH)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call({"fan_mode": 3})
# test adding new fan to the network and HA
await async_test_rejoin(hass, zigpy_device, [cluster], (1,))
async def async_turn_on(hass, entity_id, speed=None):
"""Turn fan on."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
async def async_turn_off(hass, entity_id):
"""Turn fan off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
async def async_set_speed(hass, entity_id, speed=None):
"""Set speed for specified fan."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_SET_SPEED, data, blocking=True)
@patch(
"zigpy.zcl.clusters.hvac.Fan.write_attributes",
new=AsyncMock(return_value=zcl_f.WriteAttributesResponse.deserialize(b"\x00")[0]),
)
async def test_zha_group_fan_entity(hass, device_fan_1, device_fan_2, coordinator):
"""Test the fan entity for a ZHA group."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_fan_1._zha_gateway = zha_gateway
device_fan_2._zha_gateway = zha_gateway
member_ieee_addresses = [device_fan_1.ieee, device_fan_2.ieee]
members = [GroupMember(device_fan_1.ieee, 1), GroupMember(device_fan_2.ieee, 1)]
# test creating a group with 2 members
zha_group = await zha_gateway.async_create_zigpy_group("Test Group", members)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 2
for member in zha_group.members:
assert member.device.ieee in member_ieee_addresses
assert member.group == zha_group
assert member.endpoint is not None
entity_domains = GROUP_PROBE.determine_entity_domains(hass, zha_group)
assert len(entity_domains) == 2
assert LIGHT_DOMAIN in entity_domains
assert DOMAIN in entity_domains
entity_id = async_find_group_entity_id(hass, DOMAIN, zha_group)
assert hass.states.get(entity_id) is not None
group_fan_cluster = zha_group.endpoint[hvac.Fan.cluster_id]
dev1_fan_cluster = device_fan_1.device.endpoints[1].fan
dev2_fan_cluster = device_fan_2.device.endpoints[1].fan
await async_enable_traffic(hass, [device_fan_1, device_fan_2], enabled=False)
await hass.async_block_till_done()
# test that the fans were created and that they are unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [device_fan_1, device_fan_2])
# test that the fan group entity was created and is off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
group_fan_cluster.write_attributes.reset_mock()
await async_turn_on(hass, entity_id)
await hass.async_block_till_done()
assert len(group_fan_cluster.write_attributes.mock_calls) == 1
assert group_fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 2}
# turn off from HA
group_fan_cluster.write_attributes.reset_mock()
await async_turn_off(hass, entity_id)
assert len(group_fan_cluster.write_attributes.mock_calls) == 1
assert group_fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 0}
# change speed from HA
group_fan_cluster.write_attributes.reset_mock()
await async_set_speed(hass, entity_id, speed=fan.SPEED_HIGH)
assert len(group_fan_cluster.write_attributes.mock_calls) == 1
assert group_fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 3}
# test some of the group logic to make sure we key off states correctly
await send_attributes_report(hass, dev1_fan_cluster, {0: 0})
await send_attributes_report(hass, dev2_fan_cluster, {0: 0})
# test that group fan is off
assert hass.states.get(entity_id).state == STATE_OFF
await send_attributes_report(hass, dev2_fan_cluster, {0: 2})
await hass.async_block_till_done()
# test that group fan is speed medium
assert hass.states.get(entity_id).state == STATE_ON
await send_attributes_report(hass, dev2_fan_cluster, {0: 0})
await hass.async_block_till_done()
# test that group fan is now off
assert hass.states.get(entity_id).state == STATE_OFF
@pytest.mark.parametrize(
"plug_read, expected_state, expected_speed",
(
(None, STATE_OFF, None),
({"fan_mode": 0}, STATE_OFF, SPEED_OFF),
({"fan_mode": 1}, STATE_ON, SPEED_LOW),
({"fan_mode": 2}, STATE_ON, SPEED_MEDIUM),
({"fan_mode": 3}, STATE_ON, SPEED_HIGH),
),
)
async def test_fan_init(
hass,
zha_device_joined_restored,
zigpy_device,
plug_read,
expected_state,
expected_speed,
):
"""Test zha fan platform."""
cluster = zigpy_device.endpoints.get(1).fan
cluster.PLUGGED_ATTR_READS = plug_read
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == expected_state
assert hass.states.get(entity_id).attributes[ATTR_SPEED] == expected_speed
async def test_fan_update_entity(
hass,
zha_device_joined_restored,
zigpy_device,
):
"""Test zha fan platform."""
cluster = zigpy_device.endpoints.get(1).fan
cluster.PLUGGED_ATTR_READS = {"fan_mode": 0}
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_OFF
assert hass.states.get(entity_id).attributes[ATTR_SPEED] == SPEED_OFF
assert cluster.read_attributes.await_count == 1
await async_setup_component(hass, "homeassistant", {})
await hass.async_block_till_done()
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": entity_id}, blocking=True
)
assert hass.states.get(entity_id).state == STATE_OFF
assert hass.states.get(entity_id).attributes[ATTR_SPEED] == SPEED_OFF
assert cluster.read_attributes.await_count == 2
cluster.PLUGGED_ATTR_READS = {"fan_mode": 1}
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": entity_id}, blocking=True
)
assert hass.states.get(entity_id).state == STATE_ON
assert hass.states.get(entity_id).attributes[ATTR_SPEED] == SPEED_LOW
assert cluster.read_attributes.await_count == 3
| |
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state, deprecated
from .fixes import np_version
from .fixes import logsumexp as scipy_logsumexp
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
@deprecated("sklearn.utils.extmath.norm was deprecated in version 0.19 "
"and will be removed in 0.21. Use scipy.linalg.norm instead.")
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
return linalg.norm(x)
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = np.ravel(x, order='K')
if np.issubdtype(x.dtype, np.integer):
warnings.warn('Array type is integer, np.dot may overflow. '
'Data should be float type to avoid this issue',
UserWarning)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
@deprecated("sklearn.utils.extmath.fast_dot was deprecated in version 0.19 "
"and will be removed in 0.21. Use the equivalent np.dot instead.")
def fast_dot(a, b, out=None):
return np.dot(a, b, out)
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
Parameters
----------
a : array or sparse matrix
b : array or sparse matrix
dense_output : boolean, default False
When False, either ``a`` or ``b`` being sparse will yield sparse
output. When True, output will always be an array.
Returns
-------
dot_product : array or sparse matrix
sparse if ``a`` or ``b`` is sparse and ``dense_output=False``.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return np.dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix
size : integer
Size of the return array
n_iter : integer
Number of power iterations used to stabilize the result
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Returns
-------
Q : 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
if A.dtype.kind == 'f':
# Ensure f32 is preserved as f32
Q = Q.astype(A.dtype, copy=False)
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto',
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitly specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
@deprecated("sklearn.utils.extmath.logsumexp was deprecated in version 0.19 "
"and will be removed in 0.21. Use scipy.misc.logsumexp instead.")
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
return scipy_logsumexp(arr, axis)
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
@deprecated("sklearn.utils.extmath.pinvh was deprecated in version 0.19 "
"and will be removed in 0.21. Use scipy.linalg.pinvh instead.")
def pinvh(a, cond=None, rcond=None, lower=True):
return linalg.pinvh(a, cond, rcond, lower)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``scipy.special.expit``.
Parameters
----------
X : array-like, shape (M, N) or (M, )
Argument to the logistic function
out : array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out : array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like, shape (M, N)
Argument to the logistic function
copy : bool, optional
Copy X or not.
Returns
-------
out : array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
# sum is as unstable as cumsum for numpy < 1.9
if np_version < (1, 9):
return np.cumsum(arr, axis=axis, dtype=np.float64)
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobStatisticsVertexStage(Model):
"""The Data Lake Analytics job statistics vertex stage information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar data_read: the amount of data read, in bytes.
:vartype data_read: long
:ivar data_read_cross_pod: the amount of data read across multiple pods,
in bytes.
:vartype data_read_cross_pod: long
:ivar data_read_intra_pod: the amount of data read in one pod, in bytes.
:vartype data_read_intra_pod: long
:ivar data_to_read: the amount of data remaining to be read, in bytes.
:vartype data_to_read: long
:ivar data_written: the amount of data written, in bytes.
:vartype data_written: long
:ivar duplicate_discard_count: the number of duplicates that were
discarded.
:vartype duplicate_discard_count: int
:ivar failed_count: the number of failures that occured in this stage.
:vartype failed_count: int
:ivar max_vertex_data_read: the maximum amount of data read in a single
vertex, in bytes.
:vartype max_vertex_data_read: long
:ivar min_vertex_data_read: the minimum amount of data read in a single
vertex, in bytes.
:vartype min_vertex_data_read: long
:ivar read_failure_count: the number of read failures in this stage.
:vartype read_failure_count: int
:ivar revocation_count: the number of vertices that were revoked during
this stage.
:vartype revocation_count: int
:ivar running_count: the number of currently running vertices in this
stage.
:vartype running_count: int
:ivar scheduled_count: the number of currently scheduled vertices in this
stage
:vartype scheduled_count: int
:ivar stage_name: the name of this stage in job execution.
:vartype stage_name: str
:ivar succeeded_count: the number of vertices that succeeded in this
stage.
:vartype succeeded_count: int
:ivar temp_data_written: the amount of temporary data written, in bytes.
:vartype temp_data_written: long
:ivar total_count: the total vertex count for this stage.
:vartype total_count: int
:ivar total_failed_time: the amount of time that failed vertices took up
in this stage.
:vartype total_failed_time: timedelta
:ivar total_progress: the current progress of this stage, as a percentage.
:vartype total_progress: int
:ivar total_succeeded_time: the amount of time all successful vertices
took in this stage.
:vartype total_succeeded_time: timedelta
:ivar total_peak_mem_usage: the sum of the peak memory usage of all the
vertices in the stage, in bytes.
:vartype total_peak_mem_usage: long
:ivar total_execution_time: the sum of the total execution time of all the
vertices in the stage.
:vartype total_execution_time: timedelta
:param max_data_read_vertex: the vertex with the maximum amount of data
read.
:type max_data_read_vertex:
~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex
:param max_execution_time_vertex: the vertex with the maximum execution
time.
:type max_execution_time_vertex:
~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex
:param max_peak_mem_usage_vertex: the vertex with the maximum peak memory
usage.
:type max_peak_mem_usage_vertex:
~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex
:ivar estimated_vertex_cpu_core_count: the estimated vertex CPU core
count.
:vartype estimated_vertex_cpu_core_count: int
:ivar estimated_vertex_peak_cpu_core_count: the estimated vertex peak CPU
core count.
:vartype estimated_vertex_peak_cpu_core_count: int
:ivar estimated_vertex_mem_size: the estimated vertex memory size, in
bytes.
:vartype estimated_vertex_mem_size: long
:param allocated_container_cpu_core_count: the statistics information for
the allocated container CPU core count.
:type allocated_container_cpu_core_count:
~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics
:param allocated_container_mem_size: the statistics information for the
allocated container memory size.
:type allocated_container_mem_size:
~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics
:param used_vertex_cpu_core_count: the statistics information for the used
vertex CPU core count.
:type used_vertex_cpu_core_count:
~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics
:param used_vertex_peak_mem_size: the statistics information for the used
vertex peak memory size.
:type used_vertex_peak_mem_size:
~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics
"""
_validation = {
'data_read': {'readonly': True},
'data_read_cross_pod': {'readonly': True},
'data_read_intra_pod': {'readonly': True},
'data_to_read': {'readonly': True},
'data_written': {'readonly': True},
'duplicate_discard_count': {'readonly': True},
'failed_count': {'readonly': True},
'max_vertex_data_read': {'readonly': True},
'min_vertex_data_read': {'readonly': True},
'read_failure_count': {'readonly': True},
'revocation_count': {'readonly': True},
'running_count': {'readonly': True},
'scheduled_count': {'readonly': True},
'stage_name': {'readonly': True},
'succeeded_count': {'readonly': True},
'temp_data_written': {'readonly': True},
'total_count': {'readonly': True},
'total_failed_time': {'readonly': True},
'total_progress': {'readonly': True},
'total_succeeded_time': {'readonly': True},
'total_peak_mem_usage': {'readonly': True},
'total_execution_time': {'readonly': True},
'estimated_vertex_cpu_core_count': {'readonly': True},
'estimated_vertex_peak_cpu_core_count': {'readonly': True},
'estimated_vertex_mem_size': {'readonly': True},
}
_attribute_map = {
'data_read': {'key': 'dataRead', 'type': 'long'},
'data_read_cross_pod': {'key': 'dataReadCrossPod', 'type': 'long'},
'data_read_intra_pod': {'key': 'dataReadIntraPod', 'type': 'long'},
'data_to_read': {'key': 'dataToRead', 'type': 'long'},
'data_written': {'key': 'dataWritten', 'type': 'long'},
'duplicate_discard_count': {'key': 'duplicateDiscardCount', 'type': 'int'},
'failed_count': {'key': 'failedCount', 'type': 'int'},
'max_vertex_data_read': {'key': 'maxVertexDataRead', 'type': 'long'},
'min_vertex_data_read': {'key': 'minVertexDataRead', 'type': 'long'},
'read_failure_count': {'key': 'readFailureCount', 'type': 'int'},
'revocation_count': {'key': 'revocationCount', 'type': 'int'},
'running_count': {'key': 'runningCount', 'type': 'int'},
'scheduled_count': {'key': 'scheduledCount', 'type': 'int'},
'stage_name': {'key': 'stageName', 'type': 'str'},
'succeeded_count': {'key': 'succeededCount', 'type': 'int'},
'temp_data_written': {'key': 'tempDataWritten', 'type': 'long'},
'total_count': {'key': 'totalCount', 'type': 'int'},
'total_failed_time': {'key': 'totalFailedTime', 'type': 'duration'},
'total_progress': {'key': 'totalProgress', 'type': 'int'},
'total_succeeded_time': {'key': 'totalSucceededTime', 'type': 'duration'},
'total_peak_mem_usage': {'key': 'totalPeakMemUsage', 'type': 'long'},
'total_execution_time': {'key': 'totalExecutionTime', 'type': 'duration'},
'max_data_read_vertex': {'key': 'maxDataReadVertex', 'type': 'JobStatisticsVertex'},
'max_execution_time_vertex': {'key': 'maxExecutionTimeVertex', 'type': 'JobStatisticsVertex'},
'max_peak_mem_usage_vertex': {'key': 'maxPeakMemUsageVertex', 'type': 'JobStatisticsVertex'},
'estimated_vertex_cpu_core_count': {'key': 'estimatedVertexCpuCoreCount', 'type': 'int'},
'estimated_vertex_peak_cpu_core_count': {'key': 'estimatedVertexPeakCpuCoreCount', 'type': 'int'},
'estimated_vertex_mem_size': {'key': 'estimatedVertexMemSize', 'type': 'long'},
'allocated_container_cpu_core_count': {'key': 'allocatedContainerCpuCoreCount', 'type': 'ResourceUsageStatistics'},
'allocated_container_mem_size': {'key': 'allocatedContainerMemSize', 'type': 'ResourceUsageStatistics'},
'used_vertex_cpu_core_count': {'key': 'usedVertexCpuCoreCount', 'type': 'ResourceUsageStatistics'},
'used_vertex_peak_mem_size': {'key': 'usedVertexPeakMemSize', 'type': 'ResourceUsageStatistics'},
}
def __init__(self, max_data_read_vertex=None, max_execution_time_vertex=None, max_peak_mem_usage_vertex=None, allocated_container_cpu_core_count=None, allocated_container_mem_size=None, used_vertex_cpu_core_count=None, used_vertex_peak_mem_size=None):
super(JobStatisticsVertexStage, self).__init__()
self.data_read = None
self.data_read_cross_pod = None
self.data_read_intra_pod = None
self.data_to_read = None
self.data_written = None
self.duplicate_discard_count = None
self.failed_count = None
self.max_vertex_data_read = None
self.min_vertex_data_read = None
self.read_failure_count = None
self.revocation_count = None
self.running_count = None
self.scheduled_count = None
self.stage_name = None
self.succeeded_count = None
self.temp_data_written = None
self.total_count = None
self.total_failed_time = None
self.total_progress = None
self.total_succeeded_time = None
self.total_peak_mem_usage = None
self.total_execution_time = None
self.max_data_read_vertex = max_data_read_vertex
self.max_execution_time_vertex = max_execution_time_vertex
self.max_peak_mem_usage_vertex = max_peak_mem_usage_vertex
self.estimated_vertex_cpu_core_count = None
self.estimated_vertex_peak_cpu_core_count = None
self.estimated_vertex_mem_size = None
self.allocated_container_cpu_core_count = allocated_container_cpu_core_count
self.allocated_container_mem_size = allocated_container_mem_size
self.used_vertex_cpu_core_count = used_vertex_cpu_core_count
self.used_vertex_peak_mem_size = used_vertex_peak_mem_size
| |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import os.path
import pytest
from unittest import TestCase
import shutil
import bigdl.orca.data
import bigdl.orca.data.pandas
from bigdl.orca import OrcaContext
from bigdl.dllib.nncontext import *
from bigdl.orca.data.image import write_tfrecord, read_tfrecord
class TestSparkBackend(TestCase):
def setup_method(self, method):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
def test_header_and_names(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
# Default header="infer"
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
data = data_shard.collect()
assert len(data) == 2, "number of shard should be 2"
df = data[0]
assert "location" in df.columns
file_path = os.path.join(self.resource_path, "orca/data/no_header.csv")
# No header, default to be '0','1','2'
data_shard = bigdl.orca.data.pandas.read_csv(file_path, header=None)
df2 = data_shard.collect()[0]
assert '0' in df2.columns and '2' in df2.columns
# Specify names as header
data_shard = bigdl.orca.data.pandas.read_csv(
file_path, header=None, names=["ID", "sale_price", "location"])
df3 = data_shard.collect()[0]
assert "sale_price" in df3.columns
def test_usecols(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path, usecols=[0, 1])
data = data_shard.collect()
df = data[0]
assert "sale_price" in df.columns
assert "location" not in df.columns
data_shard = bigdl.orca.data.pandas.read_csv(file_path, usecols=["ID"])
data = data_shard.collect()
df2 = data[0]
assert "ID" in df2.columns and "location" not in df2.columns
def filter_col(name):
return name == "sale_price"
data_shard = bigdl.orca.data.pandas.read_csv(file_path, usecols=filter_col)
data = data_shard.collect()
df3 = data[0]
assert "sale_price" in df3.columns and "location" not in df3.columns
def test_dtype(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path, dtype="float")
data = data_shard.collect()
df = data[0]
assert df.location.dtype == "float64"
assert df.ID.dtype == "float64"
data_shard = bigdl.orca.data.pandas.read_csv(file_path, dtype={"sale_price": np.float32})
data = data_shard.collect()
df2 = data[0]
assert df2.sale_price.dtype == "float32" and df2.ID.dtype == "int64"
def test_squeeze(self):
import pandas as pd
file_path = os.path.join(self.resource_path, "orca/data/single_column.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path, squeeze=True)
data = data_shard.collect()
df = data[0]
assert isinstance(df, pd.Series)
def test_index_col(self):
file_path = os.path.join(self.resource_path, "orca/data/csv/morgage1.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path, index_col="ID")
data = data_shard.collect()
df = data[0]
assert 100529 in df.index
def test_mix(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path, header=0, names=['user', 'item'],
usecols=[0, 1])
data = data_shard.collect()
df = data[0]
assert "user" in df.columns
assert "item" in df.columns
with self.assertRaises(Exception) as context:
data_shard = bigdl.orca.data.pandas.read_csv(file_path, header=0,
names=['ID', 'location'], usecols=["ID"])
data = data_shard.collect()
self.assertTrue('Passed names did not match usecols'
in str(context.exception))
data_shard = bigdl.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item'], usecols=[0, 1],
dtype={0: np.float32, 1: np.int32})
data = data_shard.collect()
df2 = data[0]
assert df2.user.dtype == "float32" and df2.item.dtype == "int32"
data_shard = bigdl.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item', 'location'],
usecols=[1, 2])
data = data_shard.collect()
df2 = data[0]
assert "user" not in df2.columns
assert "item" in df2.columns
assert "location" in df2.columns
data_shard = bigdl.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item', 'rating'],
usecols=['user', 'item'],
dtype={0: np.float32, 1: np.int32})
data = data_shard.collect()
df2 = data[0]
assert df2.user.dtype == "float32" and df2.item.dtype == "int32"
with self.assertRaises(Exception) as context:
data_shard = bigdl.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item'], usecols=[0, 1],
dtype={1: np.float32, 2: np.int32})
data = data_shard.collect()
self.assertTrue('column index to be set type is not in current dataframe'
in str(context.exception))
def test_read_invalid_path(self):
file_path = os.path.join(self.resource_path, "abc")
with self.assertRaises(Exception) as context:
xshards = bigdl.orca.data.pandas.read_csv(file_path)
# This error is raised by pyspark.sql.utils.AnalysisException
self.assertTrue('Path does not exist' in str(context.exception))
def test_read_json(self):
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = bigdl.orca.data.pandas.read_json(file_path)
data = data_shard.collect()
df = data[0]
assert "timestamp" in df.columns and "value" in df.columns
data_shard = bigdl.orca.data.pandas.read_json(file_path, names=["time", "value"])
data = data_shard.collect()
df2 = data[0]
assert "time" in df2.columns and "value" in df2.columns
data_shard = bigdl.orca.data.pandas.read_json(file_path, usecols=[0])
data = data_shard.collect()
df3 = data[0]
assert "timestamp" in df3.columns and "value" not in df3.columns
data_shard = bigdl.orca.data.pandas.read_json(file_path, dtype={"value": "float"})
data = data_shard.collect()
df4 = data[0]
assert df4.value.dtype == "float64"
def test_read_parquet(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
sc = init_nncontext()
from pyspark.sql.functions import col
spark = OrcaContext.get_spark_session()
df = spark.read.csv(file_path, header=True)
df = df.withColumn('sale_price', col('sale_price').cast('int'))
temp = tempfile.mkdtemp()
df.write.parquet(os.path.join(temp, "test_parquet"))
data_shard2 = bigdl.orca.data.pandas.read_parquet(os.path.join(temp, "test_parquet"))
assert data_shard2.num_partitions() == 2, "number of shard should be 2"
data = data_shard2.collect()
df = data[0]
assert "location" in df.columns
data_shard2 = bigdl.orca.data.pandas.read_parquet(os.path.join(temp, "test_parquet"),
columns=['ID', 'sale_price'])
data = data_shard2.collect()
df = data[0]
assert len(df.columns) == 2
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
schema = StructType([StructField("ID", StringType(), True),
StructField("sale_price", IntegerType(), True),
StructField("location", StringType(), True)])
data_shard3 = bigdl.orca.data.pandas.read_parquet(os.path.join(temp, "test_parquet"),
columns=['ID', 'sale_price'],
schema=schema)
data = data_shard3.collect()
df = data[0]
assert str(df['sale_price'].dtype) == 'int64'
shutil.rmtree(temp)
def test_write_read_imagenet(self):
raw_data = os.path.join(self.resource_path, "imagenet_to_tfrecord")
temp_dir = tempfile.mkdtemp()
try:
write_tfrecord(format="imagenet", imagenet_path=raw_data, output_path=temp_dir)
data_dir = os.path.join(temp_dir, "train")
train_dataset = read_tfrecord(format="imagenet", path=data_dir, is_training=True)
train_dataset.take(1)
finally:
shutil.rmtree(temp_dir)
if __name__ == "__main__":
pytest.main([__file__])
| |
from __future__ import absolute_import, division, print_function
__authors__ = ["Russell Hewett, Stuart Mumford"]
__email__ = "stuart@mumford.me.uk"
import os
import glob
from collections import OrderedDict
import numpy as np
import astropy.io.fits
import sunpy
from sunpy.map.mapbase import GenericMap, MAP_CLASSES
from sunpy.map.header import MapMeta
from sunpy.map.compositemap import CompositeMap
from sunpy.map.mapcube import MapCube
from sunpy.io.file_tools import read_file
from sunpy.io.header import FileHeader
from sunpy.util.net import download_file
from sunpy.util import expand_list
from sunpy.util.datatype_factory_base import BasicRegistrationFactory
from sunpy.util.datatype_factory_base import NoMatchError
from sunpy.util.datatype_factory_base import MultipleMatchError
from sunpy.util.datatype_factory_base import ValidationFunctionError
from sunpy.extern import six
from sunpy.extern.six.moves.urllib.request import urlopen
# Make a mock DatabaseEntry class if sqlalchemy is not installed
try:
from sunpy.database.tables import DatabaseEntry
except ImportError:
class DatabaseEntry(object):
pass
__all__ = ['Map', 'MapFactory']
class MapFactory(BasicRegistrationFactory):
"""
Map(*args, **kwargs)
Map factory class. Used to create a variety of Map objects. Valid map types
are specified by registering them with the factory.
Examples
--------
>>> import sunpy.map
>>> sunpy.data.download_sample_data(overwrite=False) # doctest: +SKIP
>>> import sunpy.data.sample
>>> mymap = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
The SunPy Map factory accepts a wide variety of inputs for creating maps
* Preloaded tuples of (data, header) pairs
>>> mymap = sunpy.map.Map((data, header)) # doctest: +SKIP
headers are some base of `dict` or `collections.OrderedDict`, including `sunpy.io.header.FileHeader` or `sunpy.map.header.MapMeta` classes.
* data, header pairs, not in tuples
>>> mymap = sunpy.map.Map(data, header) # doctest: +SKIP
* File names
>>> mymap = sunpy.map.Map('file1.fits') # doctest: +SKIP
* All fits files in a directory by giving a directory
>>> mymap = sunpy.map.Map('local_dir/sub_dir') # doctest: +SKIP
* Some regex globs
>>> mymap = sunpy.map.Map('eit_*.fits') # doctest: +SKIP
* URLs
>>> mymap = sunpy.map.Map(url_str) # doctest: +SKIP
* DatabaseEntry
>>> mymap = sunpy.map.Map(db_result) # doctest: +SKIP
* Lists of any of the above
>>> mymap = sunpy.map.Map(['file1.fits', 'file2.fits', 'file3.fits', 'directory1/']) # doctest: +SKIP
* Any mixture of the above not in a list
>>> mymap = sunpy.map.Map((data, header), data2, header2, 'file1.fits', url_str, 'eit_*.fits') # doctest: +SKIP
"""
def _read_file(self, fname, **kwargs):
""" Read in a file name and return the list of (data, meta) pairs in
that file. """
# File gets read here. This needs to be generic enough to seamlessly
#call a fits file or a jpeg2k file, etc
pairs = read_file(fname, **kwargs)
new_pairs = []
for pair in pairs:
filedata, filemeta = pair
assert isinstance(filemeta, FileHeader)
#This tests that the data is more than 1D
if len(np.shape(filedata)) > 1:
data = filedata
meta = MapMeta(filemeta)
new_pairs.append((data, meta))
return new_pairs
def _validate_meta(self, meta):
"""
Validate a meta argument.
"""
if isinstance(meta, astropy.io.fits.header.Header):
return True
elif isinstance(meta, dict):
return True
else:
return False
def _parse_args(self, *args, **kwargs):
"""
Parses an args list for data-header pairs. args can contain any
mixture of the following entries:
* tuples of data,header
* data, header not in a tuple
* filename, which will be read
* directory, from which all files will be read
* glob, from which all files will be read
* url, which will be downloaded and read
* lists containing any of the above.
Example
-------
self._parse_args(data, header,
(data, header),
['file1', 'file2', 'file3'],
'file4',
'directory1',
'*.fits')
"""
data_header_pairs = list()
already_maps = list()
# Account for nested lists of items
args = expand_list(args)
# For each of the arguments, handle each of the cases
i = 0
while i < len(args):
arg = args[i]
# Data-header pair in a tuple
if ((type(arg) in [tuple, list]) and
len(arg) == 2 and
isinstance(arg[0], np.ndarray) and
self._validate_meta(arg[1])):
arg[1] = OrderedDict(arg[1])
data_header_pairs.append(arg)
# Data-header pair not in a tuple
elif (isinstance(arg, np.ndarray) and
self._validate_meta(args[i+1])):
pair = (args[i], OrderedDict(args[i+1]))
data_header_pairs.append(pair)
i += 1 # an extra increment to account for the data-header pairing
# File name
elif (isinstance(arg,six.string_types) and
os.path.isfile(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
pairs = self._read_file(path, **kwargs)
data_header_pairs += pairs
# Directory
elif (isinstance(arg,six.string_types) and
os.path.isdir(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
files = [os.path.join(path, elem) for elem in os.listdir(path)]
for afile in files:
data_header_pairs += self._read_file(afile, **kwargs)
# Glob
elif (isinstance(arg,six.string_types) and '*' in arg):
files = glob.glob( os.path.expanduser(arg) )
for afile in files:
data_header_pairs += self._read_file(afile, **kwargs)
# Already a Map
elif isinstance(arg, GenericMap):
already_maps.append(arg)
# A URL
elif (isinstance(arg,six.string_types) and
_is_url(arg)):
default_dir = sunpy.config.get("downloads", "download_dir")
url = arg
path = download_file(url, default_dir)
pairs = self._read_file(path, **kwargs)
data_header_pairs += pairs
# A database Entry
elif isinstance(arg, DatabaseEntry):
data_header_pairs += self._read_file(arg.path, **kwargs)
else:
raise ValueError("File not found or invalid input")
i += 1
#TODO:
# In the end, if there are already maps it should be put in the same
# order as the input, currently they are not.
return data_header_pairs, already_maps
def __call__(self, *args, **kwargs):
""" Method for running the factory. Takes arbitrary arguments and
keyword arguments and passes them to a sequence of pre-registered types
to determine which is the correct Map-type to build.
Arguments args and kwargs are passed through to the validation
function and to the constructor for the final type. For Map types,
validation function must take a data-header pair as an argument.
Parameters
----------
composite : boolean, optional
Indicates if collection of maps should be returned as a CompositeMap
cube : boolean, optional
Indicates if collection of maps should be returned as a MapCube
silence_errors : boolean, optional
If set, ignore data-header pairs which cause an exception.
Notes
-----
Extra keyword arguments are passed through to `sunpy.io.read_file` such
as `memmap` for FITS files.
"""
# Hack to get around Python 2.x not backporting PEP 3102.
composite = kwargs.pop('composite', False)
cube = kwargs.pop('cube', False)
silence_errors = kwargs.pop('silence_errors', False)
data_header_pairs, already_maps = self._parse_args(*args, **kwargs)
new_maps = list()
# Loop over each registered type and check to see if WidgetType
# matches the arguments. If it does, use that type.
for pair in data_header_pairs:
data, header = pair
meta = MapMeta(header)
try:
new_map = self._check_registered_widgets(data, meta, **kwargs)
except (NoMatchError, MultipleMatchError, ValidationFunctionError):
if not silence_errors:
raise
except:
raise
new_maps.append(new_map)
new_maps += already_maps
# If the list is meant to be a cube, instantiate a map cube
if cube:
return MapCube(new_maps, **kwargs)
# If the list is meant to be a composite map, instantiate one
if composite:
return CompositeMap(new_maps, **kwargs)
if len(new_maps) == 1:
return new_maps[0]
return new_maps
def _check_registered_widgets(self, data, meta, **kwargs):
candidate_widget_types = list()
for key in self.registry:
# Call the registered validation function for each registered class
if self.registry[key](data, meta, **kwargs):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if n_matches == 0:
if self.default_widget_type is None:
raise NoMatchError("No types match specified arguments and no default is set.")
else:
candidate_widget_types = [self.default_widget_type]
elif n_matches > 1:
raise MultipleMatchError("Too many candidate types identified ({0}). Specify enough keywords to guarantee unique type identification.".format(n_matches))
# Only one is found
WidgetType = candidate_widget_types[0]
return WidgetType(data, meta, **kwargs)
def _is_url(arg):
try:
urlopen(arg)
except:
return False
return True
class InvalidMapInput(ValueError):
"""Exception to raise when input variable is not a Map instance and does
not point to a valid Map input file."""
pass
class InvalidMapType(ValueError):
"""Exception to raise when an invalid type of map is requested with Map
"""
pass
class NoMapsFound(ValueError):
"""Exception to raise when input does not point to any valid maps or files
"""
pass
Map = MapFactory(default_widget_type=GenericMap,
additional_validation_functions=['is_datasource_for'])
Map.registry = MAP_CLASSES
| |
import requests
from requests_oauthlib import OAuth1
class BricklinkException(Exception):
def __init__(self, code, message, description):
self.code = code
self.message = message
self.description = description
super(BricklinkException, self).__init__(self)
def __str__(self):
return "%d - %s: %s" % (self.code, self.message, self.description)
class BricklinkRequester(object):
""" Helper class which performs the actual REST calls to Bricklink """
BRICKLINK_URL = "https://api.bricklink.com/api/store/v1"
def _parse_optional_params(self, **optional):
optional_params = []
for key, value in optional.items():
if value:
optional_params.append("%s=%s" % (key, str(value)))
return "?%s" % ("&".join(optional_params))
def _parse_response(self, response):
if response['meta']['code'] != 200:
raise BricklinkException(**response['meta'])
return response['data']
def __init__(self, oauth_consumer_key, oauth_consumer_secret,
oauth_access_token, oauth_access_token_secret):
"""
Creates object which allows authenticated REST calls to Bricklink
:param oauth_consumer_key: The Consumer key provided by Bricklink
:param oauth_consumer_secret: The Consumer secret provided by Bricklink
:param oauth_access_token: The Access Token provided by Bricklink
:param oauth_access_token_secret: The Access Token Secret provided by Bricklink
"""
self._oauth = OAuth1(
oauth_consumer_key,
oauth_consumer_secret,
oauth_access_token,
oauth_access_token_secret
)
def get(self, path, **optional):
"""
Performs a GET REST call to the Bricklink API
:param path: The Bricklink API path
:param optional: All optional parameters that neesd to be passed to Bricklink
:return: BricklinkException when request failed, otherwise the parsed JSON from the API
"""
response = requests.get(
self.BRICKLINK_URL + path + self._parse_optional_params(**optional),
auth=self._oauth
).json()
return self._parse_response(response)
class BricklinkApi(object):
"""
Class represents the Bricklink (https://bricklink.com) API
"""
def __init__(self, requester):
"""
Creates object which allows commands to the Bricklink API
:param requester: Helper object that performs the actual REST calls to the Bricklink API
"""
self._requester = requester
def getCatalogItem(self, type, no):
"""
Returns information about the specified item in the Bricklink catalog
:param type: The type of item. Acceptable values are:
MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX
:param no: Identification number of the item
:return: If the call is successful it returns a catalog item with the following data structure:
{
'item': {
'no': string,
'name': string,
'type': string (MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX),
'category_id': integer
},
'alternate_no': string,
'image_url': string,
thumbnail_url': string,
'weight': fixed point number (2 decimals),
'dim_x': string (2 decimals),
'dim_y': string (2 decimals),
'dim_z': string (2 decimals),
'year_released': integer,
'description': string,
'is_obsolete': boolean,
'language_code': string
}
"""
return self._requester.get("/items/%s/%s" % (type, no))
def getCatalogItemImage(self, type, no, color_id):
"""
Returns the image URL of the specified item by color
:param type: The type of item. Acceptable values are:
MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX
:param no: Identification number of the item
:param color_id: Bricklink color id
:return: If the call is successful it returns a catalog item with the following data structure
{
'color_id': integer,
'thumbnail_url': string,
'type': string (MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX),
'no': string
}
"""
return self._requester.get("/items/%s/%s/images/%d" % (type, no, color_id))
def getCatalogSupersets(self, type, no, color_id = None):
"""
Returns a list of items that included the specified item
:param type: The type of item. Acceptable values are:
MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX
:param no: Identification number of the item
:param color_id: (Optional) Bricklink color id
:return: If the call is successful it returns a list of superset entries with the following data structure:
[
{
'color_id': integer,
'entries': [
{
'quantity': integer,
'appears_as': string (A: Alternate, C: Counterpart, E: Extra, R: Regular),
'item' => {
'no': string,
'name': string,
'type': string (MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX),
'category_id': integer
}
},
{
etc...
}
]
},
{
etc...
}
]
"""
return self._requester.get(
"/items/%s/%s/supersets" % (type, no),
color_id=color_id)
def getCatalogSubsets(self, type, no, color_id = None, box = None, instruction = None,
break_minifigs = None, break_subsets = None):
"""
Returns a list of items that are included in the specified item
:param type: The type of item. Acceptable values are:
MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX
:param no: Identification number of the item
:param color_id: (Optional) Bricklink color id
:param box: (Optional) Indicates whether the set includes the original box
:param instruction: (Optional) Indicates whether the set includes the original instruction
:param break_minifigs: (Optional) Indicates whether the result breaks down minifigs as parts
:param break_subsets: (Optional) Indicates whether the result breaks down sub sets as parts
:return: If the call is successful it returns a list of subset entries with the following data structure:
[
{
'match_no': integer,
'entries': [
{
'color_id': integer,
'quantity': integer,
'extra_quantity': integer,
'is_alternate': boolean,
'is_counterpart': boolean,
'item': {
'no': string,
'name': string,
'type': string (MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX),
'category_id': integer
}
},
{
etc...
}
]
},
{
etc...
}
]
"""
return self._requester.get(
"/items/%s/%s/subsets" % (type, no),
box=box,
break_minifigs=break_minifigs,
break_subsets=break_subsets,
color_id=color_id,
instruction=instruction
)
def getCatalogPriceGuide(self, type, no, color_id = None, guide_type = None, new_or_used = None,
country_code = None, region = None, currency_code = None, vat = None):
"""
Returns the price statistics of the specified item
:param type: The type of item. Acceptable values are:
MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX
:param no: Identification number of the item
:param color_id: (Optional) Bricklink color id
:param guide_type: (Optional) Indicates which statistics should be provided. Acceptable values are:
"sold": get the price statistics of "Last 6 months sales"
"stock": get the price statistics of "Current items for sale" (default)
:param new_or_used: (Optional) Indicates the condition of the items that are included in the statistics.
Acceptable values are:
"N": new item (default), "U": used item
:param country_code: (Optional) The result includes only items in stores which are located in the specified country
:param region: (Optional) The result includes only items in stores which are located in the specified region
:param currency_code: (Optional) The currency in which prices should be returned
:param vat: (Optional) Indicates that price will include VAT for items from VAT enabled stores.
Acceptable values are: "N": exclude VAT (default), "Y": include VAT
:return: If the call is successful it returns a price guide resource with the following data structure:
{
'item': {
'no': string,
'type': string (MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX)
},
'new_or_used': string (N: New, U: Used)
'currency_code': string,
'min_price': fixed point number (4 decimals),
'max_price': fixed point number (4 decimals),
'avg_price': fixed point number (4 decimals),
'qty_avg_price': fixed_point_number (4 decimals),
'unit_quantity': integer,
'total_quantity': integer,
'price_detail': [
{
'quantity': integer,
'unit_price': fixed point number (4 decimals),
'shipping_available': string
__OR__
'quantity': integer,
'unit_price': integer,
'seller_country_code': string,
'buyer_country_code': string,
'date_ordered': timestamp
},
{
etc...
}
]
}
"""
return self._requester.get("/items/%s/%s/price" % (type, no),
color_id=color_id,
guide_type=guide_type,
new_or_used=new_or_used,
country_code=country_code,
region=region,
currency_code=currency_code,
vat=vat
)
def getCatalogKnownColors(self, type, no):
"""
Returns currently known colors of the item
:param type: The type of item. Acceptable values are:
MINIFIG, PART, SET, BOOK, GEAR, CATALOG, INSTRUCTION, UNSORTED_LOT, ORIGINAL_BOX
:param no: Identification number of the item
:return: If the call is successful it returns a list of known colors with the following data structure:
[
{
'color_id': integer,
'quantity': integer
},
{
etc...
}
]
"""
return self._requester.get("/items/%s/%s/colors" % (type, no))
def getColorList(self):
"""
Retrieves a list of colors defined in the Bricklink catalog
:return: A list of defined colors with the following data structure:
[
{
'color_id': integer,
'color_name': string,
'color_code': string,
'color_type': string
},
{
etc...
}
]
"""
return self._requester.get("/colors")
def getColor(self, color_id):
"""
Retrieves information about a specific Bricklink color
:param color_id: The Bricklink color id
:return: If the call is successful it returns color information in the following data structure:
{
'color_id': integer,
'color_name': string,
'color_code': string,
'color_type': string
}
"""
return self._requester.get("/colors/%s" % color_id)
def getCategoryList(self):
"""
Retrieves a list of all categories defined in the Bricklink catalog
:return: If the call is successful it returns a list Bricklink catalog categories in the following data structure:
[
{
'category_id': integer,
'category_name': string,
'category_parent': integer (0 is root)
},
{
etc...
}
]
"""
return self._requester.get("/categories")
def getCategory(self, category_id):
"""
Retrieves information about a specific Bricklink catalog category
:param category_id: The bricklink category ID
:return: If the call is successful it returns information about a category in the following data structure:
{
'category_id': integer,
'category_name': string,
'category_parent': integer (0 is root)
}
"""
return self._requester.get("/categories/%d" % category_id)
def getElementId(self, type, no):
"""
Retrieves Part-color-code (A.K.A. element id) of a specificed item
:param type: The type of item. Acceptable values: PART
:param no: Identification number of the item
:return: If the call is successful it returns a list of item mapping resources in the following data structure:
[
{
'item': {
'no': string,
'type': string (PART)
},
'color_id': integer,
'color_name': string,
'element_id': string
},
{
etc...
}
]
"""
return self._requester.get("/item_mapping/%s/%s" % (type, no))
def getItemNumber(self, element_id):
"""
Retrieves a BL Catalog item number by Part-color-code (A.K.A. element id)
:param element_id: Element ID of the item in a specified color
:return: If the call is successful it returns a list of item mapping resources in the following data structure:
[
{
'item': {
'no': string,
'type': string (PART)
},
'color_id': integer,
'color_name': string,
'element_id': string
},
{
etc...
}
]
"""
return self._requester.get("/item_mapping/%s" % element_id)
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import policy
from openstack_dashboard.dashboards.project.volumes \
import tables as volume_tables
class LaunchSnapshot(volume_tables.LaunchVolume):
name = "launch_snapshot"
def get_link_url(self, datum):
base_url = reverse(self.url)
vol_id = "%s:snap" % self.table.get_object_id(datum)
params = urlencode({"source_type": "volume_snapshot_id",
"source_id": vol_id})
return "?".join([base_url, params])
def allowed(self, request, snapshot=None):
if snapshot:
if (snapshot._volume and
getattr(snapshot._volume, 'bootable', '') == 'true'):
return snapshot.status == "available"
return False
class LaunchSnapshotNG(LaunchSnapshot):
name = "launch_snapshot_ng"
verbose_name = _("Launch as Instance")
url = "horizon:project:snapshots:index"
classes = ("btn-launch", )
ajax = False
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchSnapshot, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
url = reverse(self.url)
vol_id = self.table.get_object_id(datum)
ngclick = "modal.openLaunchInstanceWizard(" \
"{successUrl: '%s', snapshotId: '%s'})" % (url, vol_id)
self.attrs.update({
"ng-controller": "LaunchInstanceModalController as modal",
"ng-click": ngclick
})
return "javascript:void(0);"
class DeleteVolumeSnapshot(policy.PolicyTargetMixin, tables.DeleteAction):
help_text = _("Deleted volume snapshots are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Volume Snapshot",
u"Delete Volume Snapshots",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Volume Snapshot",
u"Scheduled deletion of Volume Snapshots",
count
)
policy_rules = (("volume", "volume:delete_snapshot"),)
policy_target_attrs = (("project_id",
'os-extended-snapshot-attributes:project_id'),)
def delete(self, request, obj_id):
api.cinder.volume_snapshot_delete(request, obj_id)
class EditVolumeSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Snapshot")
url = "horizon:project:snapshots:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:update_snapshot"),)
policy_target_attrs = (("project_id",
'os-extended-snapshot-attributes:project_id'),)
def allowed(self, request, snapshot=None):
return snapshot.status == "available"
def get_link_url(self, datum):
params = urlencode({"success_url": self.table.get_full_url()})
snapshot_id = self.table.get_object_id(datum)
return "?".join([reverse(self.url, args=(snapshot_id,)), params])
class CreateVolumeFromSnapshot(tables.LinkAction):
name = "create_from_snapshot"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"snapshot_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
if volume and cinder.is_volume_service_enabled(request):
return volume.status == "available"
return False
class UpdateMetadata(tables.LinkAction):
name = "update_metadata"
verbose_name = _("Update Metadata")
ajax = False
attrs = {"ng-controller": "MetadataModalHelperController as modal"}
def __init__(self, **kwargs):
kwargs['preempt'] = True
super(UpdateMetadata, self).__init__(**kwargs)
def get_link_url(self, datum):
obj_id = self.table.get_object_id(datum)
self.attrs['ng-click'] = (
"modal.openMetadataModal('volume_snapshot', '%s', true)" % obj_id)
return "javascript:void(0);"
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, snapshot_id):
snapshot = cinder.volume_snapshot_get(request, snapshot_id)
snapshot._volume = cinder.volume_get(request, snapshot.volume_id)
return snapshot
class SnapshotVolumeNameColumn(tables.WrappingColumn):
def get_raw_data(self, snapshot):
volume = snapshot._volume
return volume.name if volume else _("Unknown")
def get_link_url(self, snapshot):
volume = snapshot._volume
if volume:
volume_id = volume.id
return reverse(self.link, args=(volume_id,))
class VolumeSnapshotsFilterAction(tables.FilterAction):
def filter(self, table, snapshots, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [snapshot for snapshot in snapshots
if query in snapshot.name.lower()]
class VolumeDetailsSnapshotsTable(volume_tables.VolumesTableBase):
name = tables.WrappingColumn(
"name",
verbose_name=_("Name"),
link="horizon:project:snapshots:detail")
class Meta(object):
name = "volume_snapshots"
verbose_name = _("Volume Snapshots")
pagination_param = 'snapshot_marker'
prev_pagination_param = 'prev_snapshot_marker'
table_actions = (VolumeSnapshotsFilterAction, DeleteVolumeSnapshot,)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False):
launch_actions = (LaunchSnapshot,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True):
launch_actions = (LaunchSnapshotNG,) + launch_actions
row_actions = ((CreateVolumeFromSnapshot,) + launch_actions +
(EditVolumeSnapshot, DeleteVolumeSnapshot,
UpdateMetadata))
row_class = UpdateRow
status_columns = ("status",)
permissions = [
('openstack.services.volume', 'openstack.services.volumev2'),
]
class VolumeSnapshotsTable(VolumeDetailsSnapshotsTable):
volume_name = SnapshotVolumeNameColumn(
"name",
verbose_name=_("Volume Name"),
link="horizon:project:volumes:detail")
class Meta(VolumeDetailsSnapshotsTable.Meta):
pass
| |
#!/usr/bin/env python
"""
Author: David Quail, September, 2017.
Description:
LearningForeground contains a collection of GVF's. It accepts new state representations, learns, and then takes action.
"""
import threading
import yaml
from TileCoder import *
from GVF import *
from GridWorld import *
from BehaviorPolicy import *
import time
import numpy
"""
sets up the subscribers and starts to broadcast the results in a thread every 0.1 seconds
"""
alpha = 0.1
numberOfActiveFeatures = 10 #When using GVF predictive state represeentations
#numberOfActiveFeatures = 1
def makeSeeColorGammaFunction(color):
def gammaFunction(colorObservation):
if colorObservation.colorLookingAt == color:
return 0
else:
return 1
return gammaFunction
def zeroGamma(state):
return 0
def timestepCumulant(state):
return 1
def turnLeftPolicy(state):
return "L"
def turnRightPolicy(state):
return "R"
def goForwardPolicy(state):
return "F"
class LearningForeground:
def __init__(self):
self.gridWorld = initWorldFromJson('CompassWorld.json')
self.gridWorld.currentColumn = 1
self.gridWorld.currentRow = 1
self.gridWorld.currentOrientation = 270
self.demons = []
self.behaviorPolicy = BehaviorPolicy()
self.lastAction = 0
self.currentAction = 0 #Bit of a hack to allow state representations based on GVFs to peak at last action and current action
# self.featureRepresentationLength = 6*6*4 + 6 #6 by 6 grid, 4 orientations, + 6 color bits
self.featureRepresentationLength = 6 + 2 + 10*6*3 #= 206. ie.6 color bits 10 + 2 bump bits + GVF bits (10GVF * 6 possible values * 3 actions)
#Initialize the demons appropriately depending on what test you are runnning by commenting / uncommenting
self.demons = self.createGVFs()
self.previousState = False
def createStepsToRedGVFs(self):
gvfs = []
color = 'red'
gvfName = "StepsToWallGVF. Color: " + color + ", action: " + "F"
gvfStraight = GVF(self.featureRepresentationLength,
alpha / numberOfActiveFeatures, isOffPolicy=True, name=gvfName)
gvfStraight.gamma = makeSeeColorGammaFunction(color)
gvfStraight.cumulant = timestepCumulant #TODO - Future cumulants need to be outputs from other GVFS
gvfStraight.policy = goForwardPolicy
gvfs.append(gvfStraight)
gvfName = "StepsToWallGVF. Color: " + color + ", action: " + "LF"
gvfTurn = GVF(self.featureRepresentationLength,
alpha / numberOfActiveFeatures, isOffPolicy=True, name=gvfName)
gvfTurn.gamma = zeroGamma
def turnCumulant(state):
return 1 + gvfStraight.prediction(state)
gvfTurn.cumulant = turnCumulant
gvfTurn.policy = turnLeftPolicy
gvfs.append(gvfTurn)
return gvfs
def createAllColorGVFs(self):
gvfs = []
colors = ['red', 'orange', 'yellow', 'green', 'blue']
for i in range(0, 5):
color = colors[i]
#Create the GVF that calculates number of steps to see a certain color if moving straight
gvfName = "StepsToWallGVF. Color: " + color + ", action: " + "F"
gvfStraight = GVF(self.featureRepresentationLength,
alpha / numberOfActiveFeatures, isOffPolicy=True, name=gvfName)
gvfStraight.gamma = makeSeeColorGammaFunction(color)
gvfStraight.cumulant = timestepCumulant #TODO - Future cumulants need to be outputs from other GVFS
gvfStraight.policy = goForwardPolicy
gvfs.append(gvfStraight)
#Create the GVF that calculates the number of steps to a certain color if turning left then going straight
gvfName = "StepsToWallGVF. Color: " + color + ", action: " + "LF"
gvfTurn = GVF(self.featureRepresentationLength,
alpha / numberOfActiveFeatures, isOffPolicy=True, name=gvfName)
gvfTurn.gamma = zeroGamma
def turnCumulant(state):
return 1 + gvfStraight.prediction(state)
gvfTurn.cumulant = turnCumulant
gvfTurn.policy = turnLeftPolicy
gvfs.append(gvfTurn)
return gvfs
def createStepsToWallForwardAndLeftAllColorsGVFs(self):
gvfs = []
colors = ['red', 'orange', 'yellow', 'green', 'blue']
for i in range(0, 5):
color = colors[i]
gvfName = "StepsToWallGVF. Color: " + color + ", action: " + "F"
gvfOff = GVF(self.featureRepresentationLength,
alpha / numberOfActiveFeatures, isOffPolicy=True, name=gvfName)
gvfOff.gamma = makeSeeColorGammaFunction(color)
gvfOff.cumulant = timestepCumulant # TODO - Future cumulants need to be outputs from other GVFS
gvfOff.policy = goForwardPolicy
gvfs.append(gvfOff)
return gvfs
def createGVFs(self):
# Create GVFs that predict how long it takes to get to the end. One on policy. And one off policy - going straight there.
return self.createAllColorGVFs()
#return self.createStepsToRedGVFs()
"""
Create a feature representation using the existing GVFs, history and immediate observation
"""
def createFeatureRepresentation(self, grid, reward, observation):
#return self.createFullyObservableRepresentation(grid, reward, observation)
return self.createPartiallyObservableRepresentation(grid, reward, observation)
"""
Returns information which includes:
- the previous GVF outputs for the previous state (8 GVFs X 3 possible actions)
- Bit corresponding to the color observed
- Bit corresponding to the bump sensor
"""
def createPartiallyObservableRepresentation(self, grid, reward, observation):
rep = numpy.zeros(self.featureRepresentationLength)
#Determine color bit (0 to 5)
colorBit = 0
colorObserved = observation['color']
if colorObserved == "red":
colorBit = 0
elif colorObserved == "orange":
colorBit = 1
elif colorObserved == "yellow":
colorBit = 2
elif colorObserved == "green":
colorBit = 3
elif colorObserved == "blue":
colorBit = 4
else:
colorBit = 5
rep[colorBit] = 1
#Determine the Bump bit
bumpBit = 6
if observation['didBump']:
bumpBit = 6
else:
bumpBit = 7
rep[bumpBit] = 1
#Determine all of the GVF bits
firstDemonBit = 8
demonIterator = 0
for demon in self.demons:
priorDemonOutput = 0
if self.lastAction:
priorDemonOutput = demon.prediction(self.previousState)
demonBit = 5 #Assuming the 5th bit is set, indicating the prediction is 6 steps or more away
if priorDemonOutput < 5:
demonBit = int(round(priorDemonOutput))
actionMultiplier = 0
if self.currentAction == "L":
actionMultiplier = 0
elif self.currentAction == "F":
actionMultiplier = 1
else:
actionMultiplier = 2
actualDemonBit = firstDemonBit + demonIterator * 6 * 3 + demonBit + actionMultiplier * 3
rep[actualDemonBit] = 1
demonIterator = demonIterator + 1
#TODO - Remember when culling to change the GVF (ie. replace it in its location in the gvfs array. Rather than deleting it and appending a new
return rep
"""
Returns information for the exact location in the gridworld as well as orientation (6X6X4) as well as the color it's
facing (6 possible colors)
"""
def createFullyObservableRepresentation(self, grid, reward, observation):
rep = numpy.zeros(self.featureRepresentationLength)
gridIndex = (self.gridWorld.currentRow - 1) * 6 + (self.gridWorld.currentRow - 1)
orientationMultiplier = 0
if self.gridWorld.currentOrientation == 90:
orientationMultiplier = 1
elif self.gridWorld.currentOrientation == 180:
orientationMultiplier = 2
elif self.gridWorld.currentOrientation == 270:
orientationMultiplier = 3
else:
orientationMultiplier = 0
gridIndex = gridIndex + 6*6* orientationMultiplier
rep[gridIndex] = 1
colorLookingAt = self.gridWorld.currentColorSeen()
colorIdx = 0
if (colorLookingAt == "red"):
colorIdx = 0
elif colorLookingAt == 'orange':
colorIdx = 1
elif colorLookingAt == 'yellow':
colorIdx = 2
elif colorLookingAt == 'green':
colorIdx = 3
elif colorLookingAt == 'blue':
colorIdx = 4
elif colorLookingAt == 'white':
colorIdx = 5
else:
colorIdx = 6
colorIdx = colorIdx + 6*6*4
#rep[colorIdx] = 1 TODO - change to include color bit
return rep
def start(self):
"""
self.previousState = StateRepresentation()
self.previousState.didBump = False
self.previousState.colorLookingAt = self.gridWorld.currentColorSeen()
self.previousState.X = self.createFeatureRepresentation(grid=False, reward = 0, observation = {'didBump':False, 'colorLookingAt':self.previousState.colorLookingAt})
"""
print("Initial world:")
self.gridWorld.printWorld()
i = 0
while (True):
if i %50000 == 0:
print("========== Timestep: " + str(i))
i = i + 1
action = self.behaviorPolicy.policy(self.previousState)
self.currentAction = action
print("Grid world before action: ")
self.gridWorld.printWorld()
print("Action being taken: " + str(action))
(grid, reward, observation) = self.gridWorld.takeAction(action)
featureRep = self.createFeatureRepresentation(grid, reward, observation)
stateRepresentation = StateRepresentation()
stateRepresentation.X = featureRep
"""
self.lastAction = ""
self.lastX = []
self.X = []
self.colorLookingAt = ""
self.didBump = False
self.lastPredictions = []
"""
stateRepresentation.colorLookingAt = observation["color"]
stateRepresentation.didBump = observation["didBump"]
self.updateDemons(self.previousState, action, stateRepresentation)
if not self.previousState:
self.previousState = StateRepresentation()
self.lastAction = action
"""
# TODO - Remove after testing - Hack to reset the position to upper left after 7 moves
if i % 7 == 0:
self.gridWorld.currentRow = 1
self.gridWorld.currentColumn = 1
stateRepresentation.didBump = False
stateRepresentation.colorLookingAt = "white"
featurerep = self.createFullyObservableRepresentation("", "", "")
stateRepresentation.X = featureRep
"""
self.previousState = stateRepresentation
def updateDemons(self, oldState, action, newState):
if self.previousState:
#Learning
for demon in self.demons:
predBefore = demon.prediction(self.previousState)
demon.learn(oldState, action, newState)
print("Demon " + demon.name + " prediction before: " + str(predBefore))
print("Demon" + demon.name + " prediction after: " + str(demon.prediction(self.previousState)))
def start():
foreground = LearningForeground()
foreground.start()
start()
| |
#
# (C) Copyright 2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Base interfaces of PyXMPP2.
:Variables:
- `NO_CHANGE`: constant used as the default for some function arguments
"""
# pylint: disable-msg=R0201
__docformat__ = "restructuredtext en"
import logging
from abc import ABCMeta, abstractmethod
try:
# pylint: disable=E0611
from abc import abstractclassmethod
except ImportError:
# pylint: disable=C0103
abstractclassmethod = classmethod
from copy import deepcopy
# pylint: disable=W0611
from .mainloop.interfaces import Event, QUIT, EventHandler, event_handler
# pylint: disable=W0611
from .mainloop.interfaces import TimeoutHandler, timeout_handler
class Resolver(metaclass=ABCMeta):
"""Abstract base class for asynchronous DNS resolvers to be used
with PyxMPP.
"""
@abstractmethod
def resolve_srv(self, domain, service, protocol, callback):
"""Start looking up an SRV record for `service` at `domain`.
`callback` will be called with a properly sorted list of (hostname,
port) pairs on success. The list will be empty on error and it will
contain only (".", 0) when the service is explicitely disabled.
:Parameters:
- `domain`: domain name to look up
- `service`: service name e.g. 'xmpp-client'
- `protocol`: protocol name, e.g. 'tcp'
- `callback`: a function to be called with a list of received
addresses
:Types:
- `domain`: `str`
- `service`: `str`
- `protocol`: `str`
- `callback`: function accepting a single argument
"""
raise NotImplementedError
@abstractmethod
def resolve_address(self, hostname, callback, allow_cname = True):
"""Start looking up an A or AAAA record.
`callback` will be called with a list of IPv4 or IPv6 address literals
on success. The list will be empty on error.
:Parameters:
- `hostname`: the host name to look up
- `callback`: a function to be called with a list of received
addresses
- `allow_cname`: `True` if CNAMEs should be followed
:Types:
- `hostname`: `str`
- `callback`: function accepting a single argument
- `allow_cname`: `bool`
"""
raise NotImplementedError
class XMPPTransport(metaclass=ABCMeta):
"""Abstract base class for XMPP transport implementations."""
@abstractmethod
def set_target(self, stream):
"""Make the `stream` the target for this transport instance.
The 'stream_start', 'stream_end' and 'stream_element' methods
of the target will be called when appropriate content is received.
:Parameters:
- `stream`: the stream handler to receive stream content
from the transport
:Types:
- `stream`: `StreamBase`
"""
pass
@abstractmethod
def send_stream_head(self, stanza_namespace, stream_from, stream_to,
stream_id = None, version = '1.0', language = None):
"""
Send stream head via the transport.
:Parameters:
- `stanza_namespace`: namespace of stream stanzas (e.g.
'jabber:client')
- `stream_from`: the 'from' attribute of the stream. May be `None`.
- `stream_to`: the 'to' attribute of the stream. May be `None`.
- `version`: the 'version' of the stream.
- `language`: the 'xml:lang' of the stream
:Types:
- `stanza_namespace`: `str`
- `stream_from`: `str`
- `stream_to`: `str`
- `version`: `str`
- `language`: `str`
"""
# pylint: disable-msg=R0913
pass
@abstractmethod
def restart(self):
"""Restart the stream after SASL or StartTLS handshake.
For the initiator a new call to `send_stream_head` is required too."""
pass
@abstractmethod
def send_stream_tail(self):
"""
Send stream tail via the transport.
"""
pass
@abstractmethod
def send_element(self, element):
"""
Send an element via the transport.
"""
pass
@abstractmethod
def is_connected(self):
"""
Check if the transport is connected.
:Return: `True` if is connected.
"""
return False
@abstractmethod
def disconnect(self):
"""
Gracefully disconnect the connection.
"""
pass
class StanzaRoute:
"""Base class for objects that can send and receive stanzas."""
# pylint: disable=W0232
@abstractmethod
def send(self, stanza):
"""Send stanza through this route."""
pass
@abstractmethod
def uplink_receive(self, stanza):
"""Handle stanza received from 'uplink'."""
pass
class XMPPFeatureHandler(metaclass=ABCMeta):
"""Base class for objects handling incoming stanzas.
The subclasses should provide methods decorated with one of:
- `iq_get_stanza_handler`: for methods handling ``<iq type="get"/>``
stanzas.
- `iq_set_stanza_handler`: for methods handling ``<iq type="set"/>``
stanzas.
- `message_stanza_handler`: for methods handling ``<message />`` stanzas
- `presence_stanza_handler`: for methods handling ``<presence />``
stanzas
:Ivariables:
- `stanza_processor`: a stanza processor where this object was
registered most recently (injected by `StanzaProcessor`)
:Types:
- `stanza_processor`: `StanzaProcessor`
"""
stanza_processor = None
def _iq_handler(iq_type, payload_class, payload_key, usage_restriction):
"""Method decorator generator for decorating <iq type='get'/> stanza
handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `usage_restriction`: `str`
"""
def decorator(func):
"""The decorator"""
func._pyxmpp_stanza_handled = ("iq", iq_type)
func._pyxmpp_payload_class_handled = payload_class
func._pyxmpp_payload_key = payload_key
func._pyxmpp_usage_restriction = usage_restriction
return func
return decorator
def iq_get_stanza_handler(payload_class, payload_key = None,
usage_restriction = "post-auth"):
"""Method decorator generator for decorating <iq type='get'/> stanza
handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `usage_restriction`: `str`
"""
return _iq_handler("get", payload_class, payload_key, usage_restriction)
def iq_set_stanza_handler(payload_class, payload_key = None,
usage_restriction = "post-auth"):
"""Method decorator generator for decorating <iq type='set'/> stanza
handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `usage_restriction`: `str`
"""
return _iq_handler("set", payload_class, payload_key, usage_restriction)
def _stanza_handler(element_name, stanza_type, payload_class, payload_key,
usage_restriction):
"""Method decorator generator for decorating <message/> or <presence/>
stanza handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `element_name`: "message" or "presence"
- `stanza_type`: expected value of the 'type' attribute of the stanza
- `payload_class`: payload class expected
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `element_name`: `str`
- `stanza_type`: `str`
- `payload_class`: subclass of `StanzaPayload`
- `usage_restriction`: `str`
"""
def decorator(func):
"""The decorator"""
func._pyxmpp_stanza_handled = (element_name, stanza_type)
func._pyxmpp_payload_class_handled = payload_class
func._pyxmpp_payload_key = payload_key
func._pyxmpp_usage_restriction = usage_restriction
return func
return decorator
def message_stanza_handler(stanza_type = None, payload_class = None,
payload_key = None, usage_restriction = "post-auth"):
"""Method decorator generator for decorating <message/>
stanza handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `stanza_type`: expected value of the 'type' attribute of the stanza.
`None` means all types except 'error'
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `stanza_type`: `str`
- `usage_restriction`: `str`
"""
if stanza_type is None:
stanza_type = "normal"
return _stanza_handler("message", stanza_type, payload_class, payload_key,
usage_restriction)
def presence_stanza_handler(stanza_type = None, payload_class = None,
payload_key = None, usage_restriction = "post-auth"):
"""Method decorator generator for decorating <presence/>
stanza handler methods in `XMPPFeatureHandler` subclasses.
:Parameters:
- `payload_class`: payload class expected
- `stanza_type`: expected value of the 'type' attribute of the stanza.
- `payload_key`: payload class specific filtering key
- `usage_restriction`: optional usage restriction: "pre-auth" or
"post-auth"
:Types:
- `payload_class`: subclass of `StanzaPayload`
- `stanza_type`: `str`
- `usage_restriction`: `str`
"""
return _stanza_handler("presence", stanza_type, payload_class, payload_key,
usage_restriction)
class StanzaPayload(metaclass=ABCMeta):
"""Abstract base class for stanza payload objects.
Subclasses are used to encapsulate stanza payload data
and to reference payload type in stanza handlers or when
requesting particular payload from a stanza.
"""
@abstractclassmethod
def from_xml(cls, element):
"""Create a `cls` instance from an XML element.
:Parameters:
- `element`: the XML element
:Types:
- `element`: :etree:`ElementTree.Element`
"""
# pylint: disable=E0213
raise NotImplementedError
@abstractmethod
def as_xml(self):
"""Return the XML representation of the payload.
:returntype: :etree:`ElementTree.Element`
"""
raise NotImplementedError
def copy(self):
"""Return a deep copy of self."""
return deepcopy(self)
@property
def handler_key(self):
"""Defines a key which may be used when registering handlers
for stanzas with this payload."""
# pylint: disable-msg=R0201
return None
def payload_element_name(element_name):
"""Class decorator generator for decorationg
`StanzaPayload` subclasses.
:Parameters:
- `element_name`: XML element qname handled by the class
:Types:
- `element_name`: `str`
"""
def decorator(klass):
"""The payload_element_name decorator."""
# pylint: disable-msg=W0212,W0404
from .stanzapayload import STANZA_PAYLOAD_CLASSES
from .stanzapayload import STANZA_PAYLOAD_ELEMENTS
if hasattr(klass, "_pyxmpp_payload_element_name"):
klass._pyxmpp_payload_element_name.append(element_name)
else:
klass._pyxmpp_payload_element_name = [element_name]
if element_name in STANZA_PAYLOAD_CLASSES:
logger = logging.getLogger('pyxmpp.payload_element_name')
logger.warning("Overriding payload class for {0!r}".format(
element_name))
STANZA_PAYLOAD_CLASSES[element_name] = klass
STANZA_PAYLOAD_ELEMENTS[klass].append(element_name)
return klass
return decorator
class StreamFeatureHandled(object):
"""Object returned by a stream feature handler for recognized and handled
features.
"""
# pylint: disable-msg=R0903
def __init__(self, feature_name, mandatory = False):
self.feature_name = feature_name
self.mandatory = mandatory
def __repr__(self):
if self.mandatory:
return "StreamFeatureHandled({0!r}, mandatory = True)".format(
self.feature_name)
else:
return "StreamFeatureHandled({0!r})".format(self.feature_name)
def __str__(self):
return self.feature_name
class StreamFeatureNotHandled(object):
"""Object returned by a stream feature handler for recognized,
but unhandled features.
"""
# pylint: disable-msg=R0903
def __init__(self, feature_name, mandatory = False):
self.feature_name = feature_name
self.mandatory = mandatory
def __repr__(self):
if self.mandatory:
return "StreamFeatureNotHandled({0!r}, mandatory = True)".format(
self.feature_name)
else:
return "StreamFeatureNotHandled({0!r})".format(self.feature_name)
def __str__(self):
return self.feature_name
class StreamFeatureHandler(metaclass=ABCMeta):
"""Base class for stream feature handlers.
The `handle_stream_features` and `make_stream_features` should
process and populate the ``<stream::features/>`` element as needed.
Other methods, decorated with the `stream_element_handler` decorated,
will be called to handle matching stream element.
"""
def handle_stream_features(self, stream, features):
"""Handle features announced by the stream peer.
[initiator only]
:Parameters:
- `stream`: the stream
- `features`: the features element just received
:Types:
- `stream`: `StreamBase`
- `features`: :etree:`ElementTree.Element`
:Return:
- `StreamFeatureHandled` instance if a feature was recognized and
handled
- `StreamFeatureNotHandled` instance if a feature was recognized
but not handled
- `None` if no feature was recognized
"""
# pylint: disable-msg=W0613,R0201
return False
def make_stream_features(self, stream, features):
"""Update the features element announced by the stream.
[receiver only]
:Parameters:
- `stream`: the stream
- `features`: the features element about to be sent
:Types:
- `stream`: `StreamBase`
- `features`: :etree:`ElementTree.Element`
"""
# pylint: disable-msg=W0613,R0201
return False
def stream_element_handler(element_name, usage_restriction = None):
"""Method decorator generator for decorating stream element
handler methods in `StreamFeatureHandler` subclasses.
:Parameters:
- `element_name`: stream element QName
- `usage_restriction`: optional usage restriction: "initiator" or
"receiver"
:Types:
- `element_name`: `str`
- `usage_restriction`: `str`
"""
def decorator(func):
"""The decorator"""
func._pyxmpp_stream_element_handled = element_name
func._pyxmpp_usage_restriction = usage_restriction
return func
return decorator
class _NO_CHANGE(object):
"""Class for the `NO_CHANGE` constant.
"""
# pylint: disable=C0103,R0903
def __str__(self):
return "NO_CHANGE"
def __repr__(self):
return "NO_CHANGE"
NO_CHANGE = _NO_CHANGE()
del _NO_CHANGE
# vi: sts=4 et sw=4
| |
from __future__ import unicode_literals
import json
from django.test import TestCase, override_settings
from django.utils.http import urlquote
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission, Group
from django.core.files.uploadedfile import SimpleUploadedFile
from django.template.defaultfilters import filesizeformat
# Get the chars that Django considers safe to leave unescaped in a URL
# This list changed in Django 1.8: https://github.com/django/django/commit/e167e96cfea670422ca75d0b35fe7c4195f25b63
try:
from django.utils.http import RFC3986_SUBDELIMS
urlquote_safechars = RFC3986_SUBDELIMS + str('/~:@')
except ImportError: # < Django 1,8
urlquote_safechars = '/'
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Collection, GroupCollectionPermission
from wagtail.wagtailimages.utils import generate_signature
from .utils import Image, get_test_image_file
class TestImageIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/index.html')
self.assertContains(response, "Add an image")
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class TestImageAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:add'), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# as standard, only the root collection exists and so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
def test_get_with_collections(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Evil plans")
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
self.assertContains(response, '<label for="id_collection">')
self.assertContains(response, "Evil plans")
def test_add(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
# Test that the file_size field was set
self.assertTrue(image.file_size)
# Test that it was placed in the root collection
root_collection = Collection.get_first_root_node()
self.assertEqual(image.collection, root_collection)
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_add_with_external_file_storage(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
self.assertTrue(Image.objects.filter(title="Test image").exists())
def test_add_no_file_selected(self):
response = self.post({
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(response, 'form', 'file', "This field is required.")
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1)
def test_add_too_large_file(self):
file_content = get_test_image_file().file.getvalue()
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', file_content),
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(
response, 'form', 'file',
"This file is too big ({file_size}). Maximum filesize {max_file_size}.".format(
file_size=filesizeformat(len(file_content)),
max_file_size=filesizeformat(1),
)
)
def test_add_with_collections(self):
root_collection = Collection.get_first_root_node()
evil_plans_collection = root_collection.add_child(name="Evil plans")
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
'collection': evil_plans_collection.id,
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that it was placed in the Evil Plans collection
image = images.first()
self.assertEqual(image.collection, evil_plans_collection)
class TestImageAddViewWithLimitedCollectionPermissions(TestCase, WagtailTestUtils):
def setUp(self):
add_image_permission = Permission.objects.get(
content_type__app_label='wagtailimages', codename='add_image'
)
admin_permission = Permission.objects.get(
content_type__app_label='wagtailadmin', codename='access_admin'
)
root_collection = Collection.get_first_root_node()
self.evil_plans_collection = root_collection.add_child(name="Evil plans")
conspirators_group = Group.objects.create(name="Evil conspirators")
conspirators_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=conspirators_group,
collection=self.evil_plans_collection,
permission=add_image_permission
)
user = get_user_model().objects.create_user(
username='moriarty',
email='moriarty@example.com',
password='password'
)
user.groups.add(conspirators_group)
self.client.login(username='moriarty', password='password')
def get(self, params={}):
return self.client.get(reverse('wagtailimages:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:add'), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# user only has access to one collection, so no 'Collection' option
# is displayed on the form
self.assertNotContains(response, '<label for="id_collection">')
def test_add(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# User should be redirected back to the index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Image should be created in the 'evil plans' collection,
# despite there being no collection field in the form, because that's the
# only one the user has access to
self.assertTrue(Image.objects.filter(title="Test image").exists())
self.assertEqual(
Image.objects.get(title="Test image").collection,
self.evil_plans_collection
)
class TestImageEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:edit', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:edit', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
@override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True)
def test_with_usage_count(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
self.assertContains(response, "Used 0 times")
expected_url = '/admin/images/usage/%d/' % self.image.id
self.assertContains(response, expected_url)
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_simple_with_external_storage(self):
# The view calls get_file_size on the image that closes the file if
# file_size wasn't prevously populated.
# The view then attempts to reopen the file when rendering the template
# which caused crashes when certian storage backends were in use.
# See #1397
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def test_edit(self):
response = self.post({
'title': "Edited",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was edited
image = Image.objects.get(id=self.image.id)
self.assertEqual(image.title, "Edited")
def test_edit_with_new_image_file(self):
file_content = get_test_image_file().file.getvalue()
# Change the file size of the image
self.image.file_size = 100000
self.image.save()
response = self.post({
'title': "Edited",
'file': SimpleUploadedFile('new.png', file_content),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image file size changed (assume it changed to the correct value)
image = Image.objects.get(id=self.image.id)
self.assertNotEqual(image.file_size, 100000)
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_edit_with_new_image_file_and_external_storage(self):
file_content = get_test_image_file().file.getvalue()
# Change the file size of the image
self.image.file_size = 100000
self.image.save()
response = self.post({
'title': "Edited",
'file': SimpleUploadedFile('new.png', file_content),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image file size changed (assume it changed to the correct value)
image = Image.objects.get(id=self.image.id)
self.assertNotEqual(image.file_size, 100000)
def test_with_missing_image_file(self):
self.image.file.delete(False)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
class TestImageDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:delete', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:delete', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
def test_delete(self):
response = self.post({
'hello': 'world'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was deleted
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 0)
class TestImageChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_filter_by_tag(self):
for i in range(0, 10):
image = Image.objects.create(
title="Test image %d is even better than the last one" % i,
file=get_test_image_file(),
)
if i % 2 == 0:
image.tags.add('even')
response = self.get({'tag': "even"})
self.assertEqual(response.status_code, 200)
# Results should include images tagged 'even'
self.assertContains(response, "Test image 2 is even better")
# Results should not include images that just have 'even' in the title
self.assertNotContains(response, "Test image 3 is even better")
class TestImageChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:image_chosen', args=(self.image.id,)), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/image_chosen.js')
# TODO: Test posting
class TestImageChooserUploadView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser_upload'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_upload(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Check response
self.assertEqual(response.status_code, 200)
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
def test_upload_no_file_selected(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# The form should have an error
self.assertFormError(response, 'uploadform', 'file', "This field is required.")
@override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage')
def test_upload_with_external_storage(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Check response
self.assertEqual(response.status_code, 200)
# Check that the image was created
self.assertTrue(Image.objects.filter(title="Test image").exists())
class TestMultipleImageUploader(TestCase, WagtailTestUtils):
"""
This tests the multiple image upload views located in wagtailimages/views/multiple.py
"""
def setUp(self):
self.login()
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_add(self):
"""
This tests that the add view responds correctly on a GET request
"""
# Send request
response = self.client.get(reverse('wagtailimages:add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1000)
def test_add_max_file_size_context_variables(self):
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.context['max_filesize'], 1000)
self.assertEqual(
response.context['error_max_file_size'], "This file is too big. Maximum filesize 1000\xa0bytes."
)
def test_add_post(self):
"""
This tests that a POST request to the add view saves the image and returns an edit form
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check image
self.assertIn('image', response.context)
self.assertEqual(response.context['image'].title, 'test.png')
self.assertTrue(response.context['image'].file_size)
# Check form
self.assertIn('form', response.context)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], response.context['image'].id)
self.assertTrue(response_json['success'])
def test_add_post_noajax(self):
"""
This tests that only AJAX requests are allowed to POST to the add view
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {})
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_nofile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_badfile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', b"This is not an image!"),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertNotIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertIn('error_message', response_json)
self.assertFalse(response_json['success'])
self.assertEqual(
response_json['error_message'], "Not a supported image format. Supported formats: GIF, JPEG, PNG."
)
def test_edit_get(self):
"""
This tests that a GET request to the edit view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:edit_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_edit_post(self):
"""
This tests that a POST request to the edit view edits the image
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_edit_post_noajax(self):
"""
This tests that a POST request to the edit view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
})
# Check response
self.assertEqual(response.status_code, 400)
def test_edit_post_validation_error(self):
"""
This tests that a POST request to the edit page returns a json document with "success=False"
and a form with the validation error indicated
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "", # Required
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "This field is required.")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertFalse(response_json['success'])
def test_delete_get(self):
"""
This tests that a GET request to the delete view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_delete_post(self):
"""
This tests that a POST request to the delete view deletes the image
"""
# Send request
response = self.client.post(reverse(
'wagtailimages:delete_multiple', args=(self.image.id, )
), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Make sure the image is deleted
self.assertFalse(Image.objects.filter(id=self.image.id).exists())
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_delete_post_noajax(self):
"""
This tests that a POST request to the delete view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 400)
class TestURLGeneratorView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/url_generator.html')
def test_get_bad_permissions(self):
"""
This tests that the view returns a "permission denied" redirect if a user without correct
permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_home'))
class TestGenerateURLView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
content_json = json.loads(response.content.decode())
self.assertEqual(set(content_json.keys()), set(['url', 'preview_url']))
expected_url = 'http://localhost/images/%(signature)s/%(image_id)d/fill-800x600/' % {
'signature': urlquote(generate_signature(self.image.id, 'fill-800x600').decode(), safe=urlquote_safechars),
'image_id': self.image.id,
}
self.assertEqual(content_json['url'], expected_url)
expected_preview_url = reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600'))
self.assertEqual(content_json['preview_url'], expected_preview_url)
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'You do not have permission to generate a URL for this image.',
}))
def test_get_bad_image(self):
"""
This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id + 1, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 404)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Cannot find image.',
}))
def test_get_bad_filter_spec(self):
"""
This tests that the view gives a 400 response if the user attempts to use it with an invalid filter spec
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Invalid filter spec.',
}))
class TestPreviewView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
Test a valid GET request to the view
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'image/png')
def test_get_invalid_filter_spec(self):
"""
Test that an invalid filter spec returns a 400 response
This is very unlikely to happen in reality. A user would have
to create signature for the invalid filter spec which can't be
done with Wagtails built in URL generator. We should test it
anyway though.
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
class TestEditOnlyPermissions(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Create a user with change_image permission but not add_image
user = get_user_model().objects.create_user(
username='changeonly', email='changeonly@example.com', password='password'
)
change_permission = Permission.objects.get(content_type__app_label='wagtailimages', codename='change_image')
admin_permission = Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
image_changers_group = Group.objects.create(name='Image changers')
image_changers_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(
group=image_changers_group,
collection=Collection.get_first_root_node(),
permission=change_permission
)
user.groups.add(image_changers_group)
self.assertTrue(self.client.login(username='changeonly', password='password'))
def test_get_index(self):
response = self.client.get(reverse('wagtailimages:index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/index.html')
# user should not get an "Add an image" button
self.assertNotContains(response, "Add an image")
# user should be able to see images not owned by them
self.assertContains(response, "Test image")
def test_search(self):
response = self.client.get(reverse('wagtailimages:index'), {'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_get_add(self):
response = self.client.get(reverse('wagtailimages:add'))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_get_edit(self):
response = self.client.get(reverse('wagtailimages:edit', args=(self.image.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def test_get_delete(self):
response = self.client.get(reverse('wagtailimages:delete', args=(self.image.id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
def test_get_add_multiple(self):
response = self.client.get(reverse('wagtailimages:add_multiple'))
# permission should be denied
self.assertRedirects(response, reverse('wagtailadmin_home'))
class TestImageAddMultipleView(TestCase, WagtailTestUtils):
def test_as_superuser(self):
self.login()
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
def test_as_ordinary_editor(self):
user = get_user_model().objects.create_user(username='editor', email='editor@email.com', password='password')
add_permission = Permission.objects.get(content_type__app_label='wagtailimages', codename='add_image')
admin_permission = Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
image_adders_group = Group.objects.create(name='Image adders')
image_adders_group.permissions.add(admin_permission)
GroupCollectionPermission.objects.create(group=image_adders_group, collection=Collection.get_first_root_node(), permission=add_permission)
user.groups.add(image_adders_group)
self.client.login(username='editor', password='password')
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
| |
def for_grandpa():
"""
Future Additions:
1. Mulitple page #s per entry ---------------- DONE -----
2. Last name then first name ----------------- DONE -----
3. Corrections to the index (remove data) ---- DONE -----
4. Saving an index as a file (.txt?) --------- DONE -----
5. Naming each index ------------------------- DONE -----
6. Correcting naming scheme ------------------ DONE -----
"""
import re
dict1 = {}
print()
index_name = input("What is the name of the index you want to create or \
open? ")
print()
if not index_name.endswith(".txt"):
index_name += ".txt"
try:
file = open(index_name)
lines = file.readlines()
print("Continuing the text file " + index_name)
for line in lines[5:]:
match = re.match("(.+)\s\.{4,55}\s([\d\s,]+)", line)
if match:
name = match.group(1)
pages = match.group(2)
pages = pages[:-1]
pages += ","
for page in pages.split():
if name in dict1:
dict1[name].append(int(page[:-1]))
else:
list1 = []
list1.append(int(page[:-1]))
dict1[name] = list1
except FileNotFoundError:
file = open(index_name, "a+")
print("Creating a new text file called " + index_name)
file.close()
print()
flag = True
while flag:
answer1 = input('Do you want to input, edit, or save? (Type \
either "input", "edit", or "save") ')
print()
if answer1.startswith("S") or answer1.startswith("s") or \
answer1.startswith('"S') or answer1.startswith('"s'):
flag = False
elif answer1.startswith("E") or answer1.startswith("e") or \
answer1.startswith('"E') or answer1.startswith('"e'):
to_be_edited = input("Do you want to edit a name, \
date, or phrase? ")
print()
if to_be_edited.startswith("N") or to_be_edited.startswith("n"):
name = input("What is their name? ")
if "," in name:
key = name
else:
individual_names = name.split()
if len(individual_names) <= 1:
key = name
else:
key = individual_names[-1] + ","
individual_names = individual_names[:-1]
for i in individual_names:
key += " " + i
elif to_be_edited.startswith("D") or to_be_edited.startswith("d"):
key = input("What date do you want to edit? ")
print()
elif to_be_edited.startswith("P") or to_be_edited.startswith("p"):
key = input("What phrase do you want to edit? (Must be exact) ")
print()
if key in dict1.keys():
pages = ""
for i in sorted(dict1[key]):
pages += str(i) + ", "
pages = pages[:-2]
print("The page numbers associated with that entry are: " +
pages)
remove = input("Please type here all the page numbers you'd \
like to remove (Ex: 28, 57, 90): ")
print()
remove += ","
to_remove = []
for item in remove.split():
to_remove.append(int(item[:-1]))
difference = set(dict1[key]) - set(to_remove)
del dict1[key]
dict1[key] = list(difference)
elif key not in dict1.keys():
print("It looks like the term you entered isn't stored in the \
file. Perhaps you mistyped it?")
print()
else:
answer2 = input("Do you want to input a name, date, or phrase? ")
print()
if answer2.startswith("N") or answer2.startswith("n"):
initial_name = input("What is their name? ")
if "," in initial_name:
name = initial_name
else:
individual_names = initial_name.split()
if len(individual_names) <= 1:
name = initial_name
else:
name = individual_names[-1] + ","
individual_names = individual_names[:-1]
for i in individual_names:
name += " " + i
pages = input(
"What pages was the name on? (Ex: 34, 32, 56, 338) ")
print()
pages = pages + ","
for page in pages.split():
if name in dict1:
dict1[name].append(int(page[:-1]))
else:
list1 = []
list1.append(int(page[:-1]))
dict1[name] = list1
elif answer2.startswith("D") or answer2.startswith("d"):
date = input("What date do you want to input? ")
print()
pages = input(
"What pages was the date on? (Ex: 34, 32, 56, 338) ")
print()
pages += ","
for page in pages.split():
if date in dict1:
dict1[date].append(int(page[:-1]))
else:
list1 = []
list1.append(int(page[:-1]))
dict1[date] = list1
elif answer2.startswith("P") or answer2.startswith("p"):
phrase = input("What phrase do you want to input? ")
print()
pages = input(
"What pages was the phrase on? (Ex: 34, 32, 56, 338) ")
print()
pages += ","
for page in pages.split():
if phrase in dict1:
dict1[phrase].append(int(page[:-1]))
else:
list1 = []
list1.append(int(page[:-1]))
dict1[phrase] = list1
file = open(index_name, "w") # NOTICE: deletes the file and then rewrites
file.write(index_name)
file.write("\n\n\n")
file.write("Name/Date/Phrase " + 39 * "." + " Page Numbers")
file.write("\n\n")
for key, values in sorted(dict1.items()):
string1 = ""
for value in sorted(values):
string1 += str(value) + ", "
file.write(key + " " + ((55 - int(len(key))) * ".") + " "
+ string1[:-2])
file.write("\n\n")
print("\n" * 5)
print(index_name)
print()
print()
print("Name/Date/Phrase " + 39 * "." + " Page Numbers")
print()
for key, values in sorted(dict1.items()):
string1 = ""
for value in sorted(values):
string1 += str(value) + ", "
print(key + " " + ((55 - int(len(key))) * ".") + " " + string1[:-2])
print()
for_grandpa()
| |
import pygame, random
from gameUtils import loadImage, loadSoundFile, collide_edges
from config import *
# ----------------------------------------------------
class Mario(pygame.sprite.Sprite):
def __init__(self, screen):
pygame.sprite.Sprite.__init__(self)
image, rect = loadImage('mario.png', -1)
imagerun, rect = loadImage('mario_run.png', -1)
self.stand = pygame.transform.scale2x(image).convert()
self.run = pygame.transform.scale2x(imagerun).convert()
self.standL = pygame.transform.flip(self.stand, 1, 0).convert()
self.runL = pygame.transform.flip(self.run, 1, 0).convert()
self.image = self.stand
self.rect = self.image.get_rect()
self.area = screen.get_rect()
self.rect.topleft = (250, 200)
self.x_vel, self.y_vel = 0, 0
self.jumping = False
self.midair = True
self.walkingRight = True
self.running = False
self.fireball_lock = False
self.jump_count = 0
self.score = 0
self.isDead = False
self.animTick = 0
self.jumpFX = loadSoundFile("jump.wav")
self.hitFX = loadSoundFile("hit.wav")
def update(self):
# get all keys currently being pressed
keys = pygame.key.get_pressed()
self.running = False
# increase/decrease x_vel if left/right keys are being pressed
# then reduce x_vel according to friction
if keys[pygame.K_RIGHT]:
self.x_vel += MARIO_X_ACC
self.walkingRight = True
self.running = True
if keys[pygame.K_LEFT]:
self.x_vel -= MARIO_X_ACC
self.running = True
self.walkingRight = False
self.x_vel *= MARIO_FRICTION
if self.midair:
self.animTick = 0
oldrect = self.rect
self.image = self.run
self.rect = self.image.get_rect()
self.rect.midbottom = oldrect.midbottom
if not self.walkingRight:
self.image = self.runL
elif not self.running:
self.animTick = 0
oldrect = self.rect
self.image = self.stand
self.rect = self.image.get_rect()
self.rect.midbottom = oldrect.midbottom
if not self.walkingRight:
self.image = self.standL
else:
self.animTick += 1
if self.animTick == 10:
self.animTick = 0
oldrect = self.rect
if self.walkingRight:
if self.image == self.stand or self.image == self.standL:
self.image = self.run
else:
self.image = self.stand
else:
if self.image == self.run or self.image == self.runL:
self.image = self.standL
else:
self.image = self.runL
self.rect = self.image.get_rect()
self.rect.midbottom = oldrect.midbottom
# if jumping, set y_vel to jump velocity
if keys[pygame.K_SPACE] and not self.midair:
self.jumpFX.play()
self.jump_count += 1
self.midair = True
self.y_vel = MARIO_JUMP_VEL
# effect of gravity pulling Mario to earth
self.y_vel += MARIO_GRAVITY
# is Mario shooting a fireball?
if keys[pygame.K_e]:
if not self.fireball_lock and Fireball.count <= MARIO_MAX_FIREBALLS :
self.fireball_lock = True
pos = self.rect.midright
if not self.walkingRight:
pos = self.rect.midleft
FIREBALLS.add( Fireball( pos, self.walkingRight ) )
else:
if self.fireball_lock:
self.fireball_lock = False
# move Mario
self.rect.move_ip((self.x_vel, self.y_vel))
# check collision with blocks. If Mario's underneath, stop him. If above, Mario
# stands on blocks. If colliding from the side, he stops moving horizontally
for block in BLOCKS:
if block.rect.colliderect(self.rect):
top, right, bottom, left = collide_edges(self.rect, block.rect)
if top:
self.y_vel = 0
self.rect.top = block.rect.bottom
elif bottom:
self.y_vel = 0
self.rect.bottom = block.rect.top
self.midair = False
elif right:
self.x_vel = 0
self.rect.right = block.rect.left
elif left:
self.x_vel = 0
self.rect.left = block.rect.right
# if he jumps on top of a shell, shell dies and mario gets another point.
# mario bounces of shell, shell falls off screen. If he hits shell otherwise,
# game ends
already_rebounded = False
for shell in ENEMIES:
if shell.rect.colliderect(self.rect):
top, right, bottom, left = collide_edges(self.rect, shell.rect)
if bottom:
if not already_rebounded:
self.y_vel *= -1
already_rebounded = True
shell.fall()
self.hitFX.play()
self.score += 1
elif top:
self.isDead = True
elif right:
self.isDead = True
elif left:
self.isDead = True
if self.rect.left < 0:
self.rect.left = 0
elif self.rect.right > self.area.width:
self.rect.right = self.area.width
if self.rect.bottom > self.area.height - 45:
self.rect.bottom = self.area.height - 45
self.y_vel = 0
self.midair = False
elif self.rect.top < 0:
self.rect.top = 0
# ----------------------------------------------------
class Fireball(pygame.sprite.Sprite):
image = None
blueimage = None
sound = None
count = 0
total_count = 0
def __init__(self, start_pos, goingRight):
pygame.sprite.Sprite.__init__(self)
Fireball.total_count += 1
if Fireball.image is None:
Fireball.image, rect = loadImage("fireball.png", -1)
Fireball.blueimage, rect = loadImage("bluefireball.png", -1)
Fireball.FX = loadSoundFile("fireball.wav")
self.blue = False
if random.choice( range(1,11) ) == 10: # roughly 1 out of 10 fireballs is blue
self.blue = True
if self.blue:
self.image = Fireball.blueimage
self.vel_y = BLUE_Y_VEL
self.vel_x = random.choice( BLUE_X_VELS )
else:
self.image = Fireball.image
self.vel_y = RED_Y_VEL
self.vel_x = random.choice( RED_X_VELS )
if not goingRight:
self.vel_x *= -1
self.rect = self.image.get_rect()
self.rect.center = start_pos
Fireball.FX.play()
def update(self):
Fireball.count = len( FIREBALLS )
self._move()
def _move(self):
self.vel_y += FIREBALL_GRAV
self.rect.move_ip((self.vel_x, self.vel_y))
for block in BLOCKS:
if block.rect.colliderect(self.rect):
top, right, bottom, left = collide_edges(self.rect, block.rect)
if top:
self.vel_y = 0
self.rect.top = block.rect.bottom
elif bottom:
self.vel_y *= -1
self.rect.bottom = block.rect.top
elif right:
self.vel_x *= -1
self.rect.right = block.rect.left
elif left:
self.vel_x *= -1
self.rect.left = block.rect.right
if self.rect.right < 0:
self.kill()
elif self.rect.left > WIDTH:
self.kill()
if self.rect.bottom >= HEIGHT - 45:
self.rect.bottom = HEIGHT - 45
self.vel_y = RED_Y_VEL
if self.blue:
self.vel_y = BLUE_Y_VEL
# ----------------------------------------------------
class Shell(pygame.sprite.Sprite):
image = None
def __init__(self, screen, start_pos):
pygame.sprite.Sprite.__init__(self)
if Shell.image is None:
tempimage, rect = loadImage("shell.png", -1)
Shell.image = pygame.transform.scale2x(tempimage).convert()
self.image = Shell.image
self.rect = self.image.get_rect()
self.rect.center = start_pos
self.area = screen.get_rect()
self.onGround = False
self.falling = False
self.vel_y = SHELL_Y_VEL
self.vel_x = SHELL_X_VEL
if start_pos[0] > self.area.width / 2:
self.vel_x *= -1
def fall(self):
self.falling = True
def update(self):
self._move()
def _move(self):
# if the shell hasn't been jumped on
if not self.falling:
if not self.onGround:
self.vel_y += SHELL_GRAVITY
self.rect.move_ip((self.vel_x, self.vel_y))
if self.rect.right < 0:
self.kill()
elif self.rect.left > self.area.width:
self.kill()
if self.rect.bottom >= self.area.height - 45:
self.rect.bottom = self.area.height - 45
self.vel_y = 0
self.onGround = True
# if the shell HAS been jumped on
else:
self.rect.move_ip((self.vel_x, 4))
if self.rect.right < 0:
self.kill()
elif self.rect.left > self.area.width:
self.kill()
if self.rect.top > self.area.height:
self.kill()
# ----------------------------------------------------
class PowBlock(pygame.sprite.Sprite):
image = None
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
if PowBlock.image is None:
PowBlock.image, rect = loadImage("pow.png", -1)
PowBlock.image = pygame.transform.scale2x(PowBlock.image).convert()
self.image = PowBlock.image
self.rect = self.image.get_rect()
self.rect.center = pos
#self.rect.topleft = pos
self.onGround = False
def update(self):
pass
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different algorithms of reduction and broadcasting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.contrib.distribute.python import cross_tower_utils
from tensorflow.contrib.distribute.python import values as value_lib
from tensorflow.python.client import device_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_util
def check_destinations(destinations):
"""Checks whether `destinations` is not empty.
Args:
destinations: a DistributedValues, Variable, string or a list of strings.
Returns:
Boolean which is True if `destinations` is not empty.
"""
# Calling bool() on a ResourceVariable is not allowed.
if isinstance(destinations, resource_variable_ops.ResourceVariable):
return bool(destinations.device)
return bool(destinations)
def validate_destinations(destinations):
if not isinstance(
destinations,
(value_lib.DistributedValues, resource_variable_ops.ResourceVariable,
value_lib.AggregatingVariable, six.string_types, list)):
raise ValueError("destinations must be one of a `DistributedValues` object,"
" a tf.Variable object, a device string, a list of device "
"strings")
if not check_destinations(destinations):
raise ValueError("destinations can not be empty")
def _make_tensor_into_per_device(input_tensor):
"""Converts a single tensor into a PerDevice object."""
if isinstance(input_tensor, (tuple, list)):
raise ValueError("Cannot convert `input_tensor` to a `PerDevice` object, "
"got %r but expected a object that is not a tuple or list."
% (input_tensor,))
if isinstance(input_tensor, value_lib.PerDevice):
return input_tensor
try:
device = input_tensor.device
except AttributeError:
raise ValueError("Cannot convert `input_tensor` to a `PerDevice` object "
"because it doesn't have device set.")
return value_lib.PerDevice({device: input_tensor})
def _normalize_value_destination_pairs(value_destination_pairs):
"""Converts each tensor into a PerDevice object in the input list."""
result = []
if not isinstance(value_destination_pairs, (list, tuple)):
raise ValueError("`value_destination_pairs` should be a list or tuple")
for pair in value_destination_pairs:
if not isinstance(pair, tuple):
raise ValueError(
"Each element of `value_destination_pairs` should be a tuple.")
if len(pair) != 2:
raise ValueError("Each element of `value_destination_pairs` should be a "
"tuple of size 2.")
per_device = _make_tensor_into_per_device(pair[0])
result.append((per_device, pair[1]))
return result
def _validate_value_destination_pairs(value_destination_pairs):
# TODO(yuefengz): raise exceptions instead of returning False.
# pylint: disable=g-missing-docstring
if not value_destination_pairs: return False
if not isinstance(value_destination_pairs, (list, tuple)): return False
if not all([isinstance(pair, tuple) for pair in value_destination_pairs]):
return False
if not all([isinstance(v[0], value_lib.PerDevice)
for v in value_destination_pairs]):
return False
return True
# TODO(yuefengz): consider calling this function in the caller of
# CrossDeviceOps.
def get_devices_from(destinations):
if isinstance(destinations, value_lib.DistributedValues):
return list(destinations.devices)
elif isinstance(destinations, (resource_variable_ops.ResourceVariable,
value_lib.AggregatingVariable)):
return [destinations.device]
elif isinstance(destinations, six.string_types):
return [device_util.resolve(destinations)]
elif isinstance(destinations, (list, tuple)):
return [device_util.resolve(destination) for destination in destinations]
else:
return [destinations.device]
def _devices_match(left, right):
return set(get_devices_from(left)) == set(get_devices_from(right))
def _all_devices_match(value_destination_pairs):
if not all([_devices_match(v, d) for v, d in value_destination_pairs]):
return False
if not all([_devices_match(v, value_destination_pairs[0][0])
for v, _ in value_destination_pairs[1:]]):
return False
return True
def _simple_broadcast(value, destinations):
index = {}
devices = get_devices_from(destinations)
for d in devices:
index[d] = cross_tower_utils.copy_tensor_or_indexed_slices_to_device(
value, d)
return value_lib.Mirrored(index)
def _simple_reduce(per_device_value, reduce_to_device, accumulation_fn,
aggregation):
# pylint: disable=g-missing-docstring
all_values = []
count = 0
for v in per_device_value._index.values(): # pylint: disable=protected-access
if isinstance(v, value_lib.MapOutput):
v_list = v.get()
if not v_list:
continue
count += len(v_list)
# Sum within each device before aggregating across devices.
# TODO(yuefengz): Check whether it helps to use accumulation_fn here.
v = cross_tower_utils.aggregate_tensors_or_indexed_slices(
v_list, math_ops.add_n)
else:
count += 1
all_values.append(v)
if not all_values:
raise ValueError("`per_device_value` must be non-empty")
with ops.device(reduce_to_device):
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
reduced = cross_tower_utils.aggregate_tensors_or_indexed_slices(
all_values, accumulation_fn)
if aggregation == vs.VariableAggregation.MEAN:
reduced = cross_tower_utils.divide_by_n_tensors_or_indexed_slices(
reduced, count)
elif aggregation != vs.VariableAggregation.SUM:
raise ValueError("`aggregation` must be VariableAggregation.SUM "
"or VariableAggregation.MEAN.")
return reduced
class CrossDeviceOps(object):
"""Base class for cross-device reduction and broadcasting algorithms."""
def __init__(self):
pass
def reduce(self, aggregation, per_device_value, destinations):
"""Reduce `per_device_value` to `destinations`.
It runs the reduction operation defined by `aggregation` and put the
result on `destinations`.
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
per_device_value: a PerDevice object or a tensor with device set.
destinations: the reduction destinations.
Returns:
a Mirrored object.
Raises:
ValueError: if per_device_value is not a PerDevice object.
"""
if not isinstance(per_device_value, value_lib.PerDevice):
per_device_value = _make_tensor_into_per_device(per_device_value)
validate_destinations(destinations)
return self._reduce(aggregation, per_device_value, destinations)
def batch_reduce(self, aggregation, value_destination_pairs):
"""Reduce PerDevice objects in a batch.
Reduce each first element in `value_destination_pairs` to each second
element which indicates the destinations.
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
value_destination_pairs: a list or a tuple of tuples of PerDevice objects
(or tensors with device set if there is one device) and destinations.
Returns:
a list of Mirrored objects.
Raises:
ValueError: if `value_destination_pairs` is not a list or a tuple of
tuples of PerDevice objects and destinations
"""
if not _validate_value_destination_pairs(value_destination_pairs):
# If the first element of each pair is a tensor, we try to turn it into a
# PerDevice object.
value_destination_pairs = _normalize_value_destination_pairs(
value_destination_pairs)
for _, d in value_destination_pairs:
validate_destinations(d)
return self._batch_reduce(aggregation, value_destination_pairs)
def broadcast(self, tensor, destinations):
"""Broadcast the `tensor` to destinations.
Args:
tensor: the tensor to broadcast.
destinations: the broadcast destinations.
Returns:
a Mirrored object.
"""
validate_destinations(destinations)
return self._broadcast(tensor, destinations)
def _reduce(self, aggregation, per_device_value, destinations):
raise NotImplementedError(
"_reduce method must be implemented in descendants.")
def _batch_reduce(self, aggregation, value_destination_pairs):
raise NotImplementedError(
"_batch_reduce method must be implemented in descendants.")
def _broadcast(self, tensor, destinations):
return _simple_broadcast(tensor, destinations)
class ReductionToOneDeviceCrossDeviceOps(CrossDeviceOps):
"""Always do reduction to one device first and then do broadcasting.
Batch reduction is done by reduction on each element one by one.
"""
def __init__(self, reduce_to_device=None, accumulation_fn=math_ops.add_n):
"""Constructor.
Args:
reduce_to_device: the intermediate device to reduce to. If None, reduce
to the first device in `destinations` of the reduce() method.
accumulation_fn: a function that does accumulation.
"""
self.reduce_to_device = reduce_to_device
self.accumulation_fn = accumulation_fn
super(ReductionToOneDeviceCrossDeviceOps, self).__init__()
def _reduce(self, aggregation, per_device_value, destinations):
if check_destinations(destinations):
devices = get_devices_from(destinations)
else:
devices = get_devices_from(per_device_value)
reduce_to_device = self.reduce_to_device or devices[0]
reduced = _simple_reduce(per_device_value, reduce_to_device,
self.accumulation_fn, aggregation)
return self.broadcast(reduced, devices)
def _batch_reduce(self, aggregation, value_destination_pairs):
return [
self._reduce(aggregation, t, destinations=v)
for t, v in value_destination_pairs
]
def _group_value_by_device(per_device_values):
"""Group values into sublists by their devices.
This grouping is needed to call the all-reduce library because it expects a
list of the following form:
[[(grad0_gpu0, v0_gpu0), (grad1_gpu0, v1_gpu0), (grad2_gpu0, v2_gpu0) ...],
[(grad0_gpu1, v0_gpu1), (grad1_gpu1, v1_gpu1), (grad2_gpu1, v2_gpu1) ...],
[(grad0_gpu2, v0_gpu2), (grad1_gpu0, v1_gpu2), (grad2_gpu0, v2_gpu2) ...],
...
]
Args:
per_device_values: a list of PerDevice obejcts.
Returns:
a list of lists, each sublist has components for its corresponding device of
PerDevice objects, paired with a None.
"""
destinations = per_device_values[0].devices
grouped = [[] for _ in range(len(destinations))]
for per_device_value in per_device_values:
# pylint: disable=protected-access
for i, v in enumerate(per_device_value._index.values()):
assert per_device_value.devices == destinations
grouped[i].append((v, None))
return grouped
def _ungroup_and_make_mirrored(grouped_reduced,
destinations,
aggregation,
num_between_graph_workers=1):
"""Ungroup results from all-reduce and make Mirrored objects.
Each all-reduce result will be divided by the number of destinations before
Mirrored objects are created if aggregation is "mean".
Args:
grouped_reduced: a list of lists, each sublist has components for each
device, paired with a None. It is the result from
cross_tower_utils.aggregate_gradients_using*.
destinations: a list of device strings for returned Mirrored objects.
aggregation: Indicates how a variable will be aggregated. Accepted values
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
num_between_graph_workers: number of workers in the between-graph
replication.
Returns:
a list of Mirrored objects.
"""
index = [{} for _ in range(len(grouped_reduced[0]))]
for d, per_device_reduced in enumerate(grouped_reduced):
for i, (v, _) in enumerate(per_device_reduced):
if aggregation == vs.VariableAggregation.MEAN:
index[i][destinations[d]] = v / (
len(destinations) * num_between_graph_workers)
else:
index[i][destinations[d]] = v
return [value_lib.Mirrored(v) for v in index]
class ConcatAndSplitPacker(object):
"""Concatenate and split tensors for reduction."""
def __init__(self, num_packs=1):
"""Initialize the ConcatAndSplitPacker object.
Args:
num_packs: specifies the number of split packs that will be
formed.
Raises:
ValueError: if num_packs is not greater than 0.
"""
if num_packs <= 0:
raise ValueError("num_packs must be greater than zero.")
self.num_packs = num_packs
def pack(self, grouped_grads_and_vars):
"""Pack tensors."""
self.grouped_grads_and_vars = grouped_grads_and_vars
self.all_device_shapes = []
self.all_device_sizes = []
device_grad_packs = []
for device_grads_and_vars in grouped_grads_and_vars:
with ops.colocate_with(device_grads_and_vars[0][0]):
# Flatten all the grads.
flat_grads = [
array_ops.reshape(g, [-1]) for g, _ in device_grads_and_vars
]
# Remember the original shape of all the grads.
device_shapes = [array_ops.shape(g) for g, _ in device_grads_and_vars]
# Remember the original sizes of all the grads.
device_sizes = [array_ops.size(g) for g, _ in device_grads_and_vars]
# Concat all the flat grads into a big flat tensor.
concat_grads = array_ops.concat(flat_grads, 0)
# Split the big tensor into num_splits packs. In cases where the
# total size is not divisible num_splits, the last pack gets
# more elements.
# TODO(zhengxq): it is also possible to optimize away all the concat
# as well.
num_splits = self.num_packs
# The array_ops.size function will sometimes remove static shapes. So if
# all gradient shapes are defined, we use another method to get the
# total size.
# TODO(yuefengz): move this logic to array_ops.size.
if all([g.shape.is_fully_defined() for g, _ in device_grads_and_vars]):
total_grad_size = sum(
[g.shape.num_elements() for g, _ in device_grads_and_vars])
else:
total_grad_size = array_ops.size(concat_grads)
split_size = total_grad_size // num_splits
split_size_last = total_grad_size - split_size * (num_splits - 1)
split_sizes = [split_size] * (num_splits - 1) + [split_size_last]
grad_packs = array_ops.split(concat_grads, split_sizes)
# Ready to aggregate the repacked gradients, with fake variables.
# TODO(zhengxq): It is hacky to have to use fake variables.
# We should remove the need for variables in
# aggregate_gradients_using*.
device_grad_packs.append(zip(grad_packs, [None] * num_splits))
self.all_device_shapes.append(device_shapes)
self.all_device_sizes.append(device_sizes)
return device_grad_packs
def unpack(self, summed_device_grad_packs):
"""Reverse the pack."""
aggregated_device_grads = []
for (summed_device_grad_packs,
device_grads_and_vars, device_shapes, device_sizes) in zip(
summed_device_grad_packs, self.grouped_grads_and_vars,
self.all_device_shapes, self.all_device_sizes):
# pylint: enable=line-too-long
# Reverse the packing operations in the previous steps. Form the
# summed gradients back into their original shapes.
with ops.colocate_with(summed_device_grad_packs[0][0]):
# Form a list of the summed grad packs.
device_grad_packs = [g for g, _ in summed_device_grad_packs]
# Concat them back into a big flat tensor.
device_grads_concat = array_ops.concat(device_grad_packs, 0)
# Split the tensors back into their original sizes.
grads_with_sizes = array_ops.split(device_grads_concat, device_sizes)
# Reshape the tensors back into their original shapes.
grads_with_shapes = [
array_ops.reshape(grad, shape)
for shape, grad in zip(device_shapes, grads_with_sizes)
]
# Form the list with the original list of variables.
summed_device_grads = [
(g, v) for g, (_, v) in zip(grads_with_shapes,
device_grads_and_vars)
]
aggregated_device_grads.append(summed_device_grads)
return aggregated_device_grads
class AggregateSmallTensorPacker(object):
"""Concatenate small gradient tensors together for reduction."""
def __init__(self,
agg_small_grads_max_bytes=1048576,
agg_small_grads_max_group=16):
"""Initialize the AggregateSmallTensorPacker object.
Args:
agg_small_grads_max_bytes: largest tensor eligible for aggregation,
in number of bytes.
agg_small_grads_max_group: largest permitted aggregation of small
tensors.
Raises:
ValueError: if `agg_small_grads_max_bytes` or `agg_small_grads_max_group`
is not greater than 0.
"""
if agg_small_grads_max_bytes <= 0 or agg_small_grads_max_group <= 0:
raise ValueError("agg_small_grads_max_bytes and agg_small_grads_max_group"
" should both be greater than zero.")
self.agg_small_grads_max_bytes = agg_small_grads_max_bytes
self.agg_small_grads_max_group = agg_small_grads_max_group
def pack(self, grouped_grads_and_vars):
"""Aggregate small tensors."""
if (self.agg_small_grads_max_bytes > 0 and
self.agg_small_grads_max_group > 0):
device_grads, self.packing = cross_tower_utils.pack_small_tensors(
grouped_grads_and_vars,
max_bytes=self.agg_small_grads_max_bytes,
max_group=self.agg_small_grads_max_group)
return device_grads
def unpack(self, summed_device_grad_packs):
"""Reverse the aggregation process."""
return cross_tower_utils.unpack_small_tensors(summed_device_grad_packs,
self.packing)
def _pack_tensors(device_grads,
num_packs=0,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=0):
"""Pack tensors if specified."""
if num_packs > 0:
tensor_packer = ConcatAndSplitPacker(num_packs)
device_grad_packs = tensor_packer.pack(device_grads)
elif agg_small_grads_max_bytes > 0 and agg_small_grads_max_group > 0:
tensor_packer = AggregateSmallTensorPacker(agg_small_grads_max_bytes,
agg_small_grads_max_group)
device_grad_packs = tensor_packer.pack(device_grads)
else:
tensor_packer = None
device_grad_packs = device_grads
return device_grad_packs, tensor_packer
def _unpack_tensors(reduced, tensor_packer=None):
"""Unpack tensors if they are packed before all-reduce."""
if tensor_packer:
return tensor_packer.unpack(reduced)
return reduced
class AllReduceCrossDeviceOps(CrossDeviceOps):
"""Reduction using all reduce."""
def __init__(self,
all_reduce_alg="nccl",
num_packs=1,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=10):
"""All-reduce implementation of CrossDeviceOps.
Before performing all-reduce, tensors will be repacked or aggregated for
more efficient cross-device transportation:
1) If `num_packs` is non-zero, pack values into
`num_packs` splits.
2) Otherwise, if `agg_small_grads_max_bytes` > 0 and
`agg_small_grads_max_group` > 0, aggregate values smaller than
`agg_small_grads_max_bytes` into groups with at most
`agg_small_grads_max_group` values.
3) Otherwise, no repacking or grouping will happen.
Args:
all_reduce_alg: the all-reduce algorithm to use, currently only "nccl" or
"hierarchical_copy" are supported.
num_packs: see above.
agg_small_grads_max_bytes: see above.
agg_small_grads_max_group: see above.
tensors.
"""
self._all_reduce_alg = all_reduce_alg
self._num_packs = num_packs
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
super(AllReduceCrossDeviceOps, self).__init__()
def _reduce(self, aggregation, per_device_value, destinations):
contains_indexed_slices = cross_tower_utils.contains_indexed_slices(
per_device_value)
if (_devices_match(per_device_value, destinations)
and not context.executing_eagerly()
and not contains_indexed_slices):
return self._batch_all_reduce(aggregation, [per_device_value])[0]
else:
if contains_indexed_slices:
logging.log_first_n(
logging.WARN,
"Efficient allreduce is not supported for IndexedSlices.", 10)
if check_destinations(destinations):
devices = get_devices_from(destinations)
else:
devices = get_devices_from(per_device_value)
reduce_to_device = devices[0]
reduced = _simple_reduce(per_device_value, reduce_to_device,
math_ops.add_n, aggregation)
return self.broadcast(reduced, devices)
def _batch_reduce(self, aggregation, value_destination_pairs):
all_devices_match = _all_devices_match(value_destination_pairs)
contains_indexed_slices = cross_tower_utils.contains_indexed_slices(
value_destination_pairs)
if (all_devices_match and not context.executing_eagerly()
and not contains_indexed_slices):
return self._batch_all_reduce(aggregation,
[v[0] for v in value_destination_pairs])
else:
if not all_devices_match:
logging.log_first_n(logging.WARN,
"Efficient batch_reduce is not supported if "
"destinations are different.",
10)
return [
self._reduce(aggregation, t, destinations=v)
for t, v in value_destination_pairs
]
def _batch_all_reduce(self, aggregation, per_device_values):
"""All reduce algorithm in a batch."""
logging.log_first_n(
logging.INFO, "batch_all_reduce invoked for batches size = %d with "
"algorithm = %s, num_packs = %d, agg_small_grads_max_bytes = %d and "
"agg_small_grads_max_group = %d" %
(len(per_device_values), self._all_reduce_alg, self._num_packs,
self._agg_small_grads_max_bytes, self._agg_small_grads_max_group), 10)
destinations = per_device_values[0].devices
grouped = _group_value_by_device(per_device_values)
device_grad_packs, tensor_packer = _pack_tensors(
grouped, self._num_packs, self._agg_small_grads_max_bytes,
self._agg_small_grads_max_group)
# The actual aggregation of the repacked gradients. Note that they are
# sharded among different aggregation trees. So it is important to strike
# the balance on num_splits.
if self._all_reduce_alg == "nccl":
# TODO(yuefengz): merge this into the all-reduce library.
reduced = cross_tower_utils.aggregate_gradients_using_nccl(
device_grad_packs)
else:
# TODO(yuefengz): check that gpu ids in `destinations` are in ascending
# order.
reduced = (
cross_tower_utils.aggregate_gradients_using_hierarchical_copy(
destinations, device_grad_packs))
reduced = _unpack_tensors(reduced, tensor_packer)
return _ungroup_and_make_mirrored(reduced, per_device_values[0].devices,
aggregation)
# For compatibility with code using the old name of `AllReduceCrossDeviceOps`.
AllReduceCrossTowerOps = AllReduceCrossDeviceOps
AllReduceSpecTuple = collections.namedtuple("AllReduceSpecTuple",
"alg shards limit")
class MultiWorkerAllReduce(AllReduceCrossDeviceOps):
"""All-reduce algorithms for distributed TensorFlow."""
def __init__(self,
worker_devices,
num_gpus_per_worker,
all_reduce_spec=("pscpu/pscpu", 2, -1),
num_packs=0,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=10):
"""Initialize the all-reduce algorithm.
Args:
worker_devices: a list of device strings for workers participating in
all-reduce.
num_gpus_per_worker: number of GPU devices per worker.
all_reduce_spec: a tuple or a named tuple or a list of tuples specifying
the all-reduce algorithm.
1. The first element of a tuple is the name of the all-reduce algorithm.
Valid algorithm names are: "nccl", "nccl/xring", "nccl/rechd",
"nccl/pscpu", "xring", "pscpu", "psgpu", "pscpu/pscpu". Algorithms with
a "/" are hierarchical, so two all-reduces are executed, the first one
aggregates tensors within a worker and the second aggregates across
workers.
2. The second element of a tuple is the number of shards when doing
all-reduce. Let's say its values is M, each tensor after packing will be
split into M shards and then M parallel all-reduces would be performed
before finally they are concatenated backed into a complete tensor.
3. The third element is the maximum size of tensors that will be
applicable for the algorithm specified by the first element. For
example, if all_reduce_spec=[("nccl", 2, 1024), ("pscpu/pscpu", 2, -1)],
tensors with size not larger than 1024 bytes will be applied a 2-shard
"nccl" all-reduce and other tensors will be applied a 2-shard
"pscpu/pscpu" algorithm. The third elements should be in increasing
order across tuples and end with -1 which indicates infinity.
num_packs: see AllReduceCrossDeviceOps.
agg_small_grads_max_bytes: see AllReduceCrossDeviceOps.
agg_small_grads_max_group: see AllReduceCrossDeviceOps.
"""
self._worker_devices = worker_devices
self._num_gpus_per_worker = num_gpus_per_worker
super(MultiWorkerAllReduce, self).__init__(
num_packs=num_packs,
agg_small_grads_max_bytes=agg_small_grads_max_bytes,
agg_small_grads_max_group=agg_small_grads_max_group)
def validate_and_complete_spec(spec):
"""Validate and complete the all-reduce spec."""
# TODO(yuefengz): support namedtuple.
if not isinstance(spec, tuple):
raise ValueError(
"A tuple is expected for all-reduce spec: %r" % all_reduce_spec)
if not spec or len(spec) > 3:
raise ValueError(
"Too many elements in the all-reduce spec tuple: %r" % spec)
if len(spec) == 1:
return AllReduceSpecTuple(spec[0], 1, -1)
elif len(spec) == 2:
return AllReduceSpecTuple(spec[0], spec[1], -1)
else:
return AllReduceSpecTuple(*spec)
self._all_reduce_spec = []
if isinstance(all_reduce_spec, six.string_types):
self._all_reduce_spec.append(AllReduceSpecTuple(all_reduce_spec, 1, -1))
elif isinstance(all_reduce_spec, tuple):
self._all_reduce_spec.append(validate_and_complete_spec(all_reduce_spec))
elif isinstance(all_reduce_spec, list):
self._all_reduce_spec = [
validate_and_complete_spec(spec) for spec in all_reduce_spec
]
def _batch_all_reduce(self, aggregation, per_device_values):
"""All reduce algorithm in a batch."""
logging.log_first_n(
logging.INFO,
"distributed batch_all_reduce invoked for batches size = %d with "
"allreduce_spec = %r, num_packs = %d, agg_small_grads_max_bytes = %d "
"and agg_small_grads_max_group = %d" %
(len(per_device_values), self._all_reduce_spec, self._num_packs,
self._agg_small_grads_max_bytes, self._agg_small_grads_max_group), 10)
destinations = sorted(per_device_values[0].devices)
device_grads = _group_value_by_device(per_device_values)
# The all reduce library requires fully defined shapes.
# TODO(yuefengz): when tensor sharding is not needed, static shapes are not
# required as well.
for device_grad in device_grads:
for grad, _ in device_grad:
if not grad.shape.is_fully_defined():
raise ValueError("Shape is unknown for node %r" % grad)
remaining_grads = device_grads
aggregated_grads = []
for spec_tuple in self._all_reduce_spec:
if spec_tuple.limit < 0:
this_grads = remaining_grads
remaining_grads = []
else:
(this_grads, remaining_grads) = cross_tower_utils.split_grads_by_size(
spec_tuple.limit, remaining_grads)
if this_grads:
device_grad_packs, tensor_packer = _pack_tensors(
this_grads, self._num_packs, self._agg_small_grads_max_bytes,
self._agg_small_grads_max_group)
range_agg_grads = cross_tower_utils.sum_gradients_all_reduce(
self._worker_devices, device_grad_packs, len(self._worker_devices),
spec_tuple.alg, spec_tuple.shards, range(self._num_gpus_per_worker))
range_agg_grads = _unpack_tensors(range_agg_grads, tensor_packer)
if not aggregated_grads:
aggregated_grads = range_agg_grads
else:
assert len(aggregated_grads) == len(range_agg_grads)
for i in range(len(aggregated_grads)):
aggregated_grads[i] += range_agg_grads[i]
assert not remaining_grads
return _ungroup_and_make_mirrored(aggregated_grads, destinations,
aggregation)
# TODO(yuefengz): support in-graph collective all-reduce.
class CollectiveAllReduce(CrossDeviceOps):
"""All-reduce cross device ops using collective ops.
In the between-graph replicated training, it will still do all-reduces across
all workers and then put results on the right destinations.
"""
def __init__(self,
num_workers=1,
num_gpus_per_worker=0,
all_reduce_merge_scope=32,
collective_keys=None):
"""Initializes the object.
Args:
num_workers: number of workers in the between-graph replicated training.
num_gpus_per_worker: number of GPUs per worker.
all_reduce_merge_scope: size of groups into which to partition consecutive
gradients grouped under a common 'allreduce' name scope. This is useful
for some optimization of collective ops.
collective_keys: an optional CollectiveKey object.
"""
self._num_workers = num_workers
self._num_gpus_per_worker = num_gpus_per_worker
self._all_reduce_merge_scope = all_reduce_merge_scope
self._collective_keys = collective_keys or cross_tower_utils.CollectiveKeys(
)
super(CollectiveAllReduce, self).__init__()
# TODO(yuefengz, tucker): is indexed slices supported by collective ops?
def _reduce(self, aggregation, per_device_value, destinations):
if cross_tower_utils.contains_indexed_slices(per_device_value):
raise ValueError(
"`IndexSlices` is not supported for Collective All-Reduce.")
if context.executing_eagerly():
raise ValueError(
"Eager execution is not supported for Collective All-Reduce")
all_reduced = self._batch_all_reduce(aggregation, [per_device_value])[0]
if _devices_match(per_device_value, destinations):
return all_reduced
else:
index = {}
for d in get_devices_from(destinations):
# pylint: disable=protected-access
if d in all_reduced._index:
index[d] = all_reduced._index[d]
else:
with ops.control_dependencies(list(
all_reduced._index.values())), ops.device(d):
index[d] = array_ops.identity(list(all_reduced._index.values())[0])
return value_lib.Mirrored(index)
def _batch_reduce(self, aggregation, value_destination_pairs):
if cross_tower_utils.contains_indexed_slices(value_destination_pairs):
raise ValueError(
"`IndexSlices` is not supported for Collective All-Reduce.")
if context.executing_eagerly():
raise ValueError(
"Eager execution is not supported for Collective All-Reduce")
all_devices_match = _all_devices_match(value_destination_pairs)
if all_devices_match:
return self._batch_all_reduce(aggregation,
[v[0] for v in value_destination_pairs])
else:
if not all_devices_match:
logging.log_first_n(
logging.WARN, "Efficient batch_reduce is not supported if "
"destinations are different.", 10)
return [
self._reduce(aggregation, t, destinations=v)
for t, v in value_destination_pairs
]
def _batch_all_reduce(self, aggregation, per_device_values):
"""All-reduce across all workers in a batch."""
if context.executing_eagerly():
raise ValueError(
"Eager execution with collective ops is not supported yet.")
logging.log_first_n(
logging.INFO, "Collective All-reduce invoked with batches size = %d, "
"num_workers = %d" % (len(per_device_values), self._num_workers), 10)
grouped_by_device = _group_value_by_device(per_device_values)
grouped_by_var = list(zip(*grouped_by_device))
# grouped_by_var is grouped by variables and takes the following format:
# [((grad0_gpu0, v0_gpu0), (grad0_gpu1, v0_gpu1), (grad0_gpu2, v0_gpu2) ..),
# ((grad1_gpu0, v1_gpu0), (grad1_gpu1, v1_gpu1), (grad1_gpu0, v1_gpu2) ..),
# ((grad2_gpu0, v2_gpu0), (grad2_gpu1, v2_gpu1), (grad2_gpu0, v2_gpu2) ..),
# ...
# ]
chunked_gv = [
grouped_by_var[x:x + self._all_reduce_merge_scope]
for x in range(0, len(grouped_by_var), self._all_reduce_merge_scope)
]
reduced_gv_list = []
for chunk in chunked_gv:
with ops.name_scope("allreduce"):
for grad_and_vars in chunk:
scaled_grads = [g for g, _ in grad_and_vars]
collective_reduced = cross_tower_utils.build_collective_reduce(
scaled_grads, self._num_workers, self._collective_keys, "Add",
"Id")
result = []
for (_, v), g in zip(grad_and_vars, collective_reduced):
result.append([g, v])
reduced_gv_list.append(result)
new_device_grads = [list(x) for x in zip(*reduced_gv_list)]
return _ungroup_and_make_mirrored(
new_device_grads,
per_device_values[0].devices,
aggregation,
num_between_graph_workers=self._num_workers)
_dgx1_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7],
[0, 5, 6, 7], [1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6]]
def _has_dgx1_like_links(gpu_links):
if not gpu_links:
return False
# TODO(yuefengz): figure out the right topology for hierarchial copy if
# number of gpus are less than 8.
if len(gpu_links) < 8:
return False
for i, (gpu_link, dgx1_link) in enumerate(zip(gpu_links, _dgx1_links)):
if (set(gpu_link) != set(dgx1_link) and
set(gpu_link) != set(dgx1_link + [i])):
return False
return True
def _choose_all_reduce_algorithm(device_links):
if _has_dgx1_like_links(device_links):
logging.info("Configured hierarchical_copy with num_packs=%d",
len(device_links))
return AllReduceCrossDeviceOps(
"hierarchical_copy", num_packs=len(device_links))
else:
logging.info("Configured nccl all-reduce.")
return AllReduceCrossDeviceOps("nccl", num_packs=1)
def choose_the_best(devices, session_config=None):
"""Find the best subclass of CrossDeviceOps given a tensorflow session.
Args:
devices: a list of devices passed for distribute strategy.
session_config: a tensorflow session config or None. If None, it will make
deciesion based on all local devices.
Returns:
a subclass of CrossDeviceOps.
"""
requested_devices = set([device_util.canonicalize(d) for d in devices])
machine_devices = device_lib.list_local_devices(session_config=session_config)
using_devices = []
for d in machine_devices:
if device_util.canonicalize(d.name) in requested_devices:
using_devices.append(d)
else:
logging.info(
"Device is available but not used by distribute strategy: %s", d.name)
if len(using_devices) != len(requested_devices):
logging.warning("Not all devices in distribute strategy are visible by "
"TensorFlow sessions.")
return ReductionToOneDeviceCrossDeviceOps()
if any([d.device_type.lower() != "gpu" for d in using_devices]):
logging.warning("Not all devices in DistributionStrategy are visible to "
"TensorFlow session.")
return ReductionToOneDeviceCrossDeviceOps()
device_links = [[] for _ in range(len(using_devices))]
for i, device in enumerate(using_devices):
for link in device.locality.links.link:
device_links[i].append(link.device_id)
return _choose_all_reduce_algorithm(device_links)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import with_statement
import hashlib
import logging
import os
import os.path
import pipes
import random
import shutil
import string
from stat import S_IRUSR
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import urllib2
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
SPARK_EC2_VERSION = "1.2.0"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
VALID_SPARK_VERSIONS = set([
"0.7.3",
"0.8.0",
"0.8.1",
"0.9.0",
"0.9.1",
"0.9.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"1.2.0",
])
DEFAULT_SPARK_VERSION = SPARK_EC2_VERSION
DEFAULT_SPARK_GITHUB_REPO = "https://github.com/apache/spark"
# Default location to get the spark-ec2 scripts (and ami-list) from
DEFAULT_SPARK_EC2_GITHUB_REPO = "https://github.com/mesos/spark-ec2"
DEFAULT_SPARK_EC2_BRANCH = "branch-1.3"
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
prog="spark-ec2",
version="%prog {v}".format(v=SPARK_EC2_VERSION),
usage="%prog [options] <action> <cluster_name>\n\n"
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region zone to launch instances in")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies) (default: a single zone chosen at random)")
parser.add_option(
"-a", "--ami",
help="Amazon Machine Image ID to use")
parser.add_option("--master-ami",
help="Amazon Machine Image ID to use for the Master")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"--spark-git-repo",
default=DEFAULT_SPARK_GITHUB_REPO,
help="Github repo from which to checkout supplied commit hash (default: %default)")
parser.add_option(
"--spark-ec2-git-repo",
default=DEFAULT_SPARK_EC2_GITHUB_REPO,
help="Github repo from which to checkout spark-ec2 (default: %default)")
parser.add_option(
"--spark-ec2-git-branch",
default=DEFAULT_SPARK_EC2_BRANCH,
help="Github repo branch of spark-ec2 to use (default: %default)")
parser.add_option(
"--hadoop-major-version", default="1",
help="Major version of Hadoop (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0." +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--placement-group", type="string", default=None,
help="Which placement group to try and launch " +
"instances into. Assumes placement group is already " +
"created.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMI's interpret this as an initialization script)")
parser.add_option(
"--security-group-prefix", type="string", default=None,
help="Use this prefix for the security group rather than the cluster name.")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
parser.add_option(
"--subnet-id", default=None,
help="VPC subnet to launch instances in")
parser.add_option(
"--vpc-id", default=None,
help="VPC to launch instances in")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name, vpc_id):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Spark EC2 group", vpc_id)
def get_validate_spark_version(version, repo):
if "." in version:
version = version.replace("v", "")
if version not in VALID_SPARK_VERSIONS:
print >> stderr, "Don't know about Spark version: {v}".format(v=version)
sys.exit(1)
return version
else:
github_commit_url = "{repo}/commit/{commit_hash}".format(repo=repo, commit_hash=version)
request = urllib2.Request(github_commit_url)
request.get_method = lambda: 'HEAD'
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
print >> stderr, "Couldn't validate Spark commit: {url}".format(url=github_commit_url)
print >> stderr, "Received HTTP response code of {code}.".format(code=e.code)
sys.exit(1)
return version
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2014-06-20
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
EC2_INSTANCE_TYPES = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.2xlarge": "pvm",
"c3.4xlarge": "pvm",
"c3.8xlarge": "pvm",
"c3.large": "pvm",
"c3.xlarge": "pvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"i2.xlarge": "hvm",
"m1.large": "pvm",
"m1.medium": "pvm",
"m1.small": "pvm",
"m1.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m2.xlarge": "pvm",
"m3.2xlarge": "hvm",
"m3.large": "hvm",
"m3.medium": "hvm",
"m3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"t1.micro": "pvm",
"t2.medium": "hvm",
"t2.micro": "hvm",
"t2.small": "hvm",
}
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
def get_spark_ami(opts):
if opts.instance_type in EC2_INSTANCE_TYPES:
instance_type = EC2_INSTANCE_TYPES[opts.instance_type]
else:
instance_type = "pvm"
print >> stderr,\
"Don't recognize %s, assuming type is pvm" % opts.instance_type
# URL prefix from which to fetch AMI information
ami_prefix = "{r}/{b}/ami-list".format(
r=opts.spark_ec2_git_repo.replace("https://github.com", "https://raw.github.com", 1),
b=opts.spark_ec2_git_branch)
ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
try:
ami = urllib2.urlopen(ami_path).read().strip()
print "Spark AMI: " + ami
except:
print >> stderr, "Could not resolve AMI at: " + ami_path
sys.exit(1)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
sys.exit(1)
if opts.key_pair is None:
print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print "Setting up security groups..."
if opts.security_group_prefix is None:
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
else:
master_group = get_or_make_group(conn, opts.security_group_prefix + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, opts.security_group_prefix + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
if opts.vpc_id is None:
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
else:
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
master_group.authorize('tcp', 8080, 8081, authorized_address)
master_group.authorize('tcp', 18080, 18080, authorized_address)
master_group.authorize('tcp', 19999, 19999, authorized_address)
master_group.authorize('tcp', 50030, 50030, authorized_address)
master_group.authorize('tcp', 50070, 50070, authorized_address)
master_group.authorize('tcp', 60070, 60070, authorized_address)
master_group.authorize('tcp', 4040, 4045, authorized_address)
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
if opts.vpc_id is None:
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
else:
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
slave_group.authorize('tcp', 8080, 8081, authorized_address)
slave_group.authorize('tcp', 50060, 50060, authorized_address)
slave_group.authorize('tcp', 50075, 50075, authorized_address)
slave_group.authorize('tcp', 60060, 60060, authorized_address)
slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print >> stderr, ("ERROR: There are already instances running in " +
"group %s or %s" % (master_group.name, slave_group.name))
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
if opts.master_ami is None:
opts.master_ami = opts.ami
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
try:
master_image = conn.get_all_images(image_ids=[opts.master_ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.master_ami
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print "Waiting for spot instances to be granted..."
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves)
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print >> stderr, ("WARNING: %d instances are still running" % running)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
slave_nodes += slave_res.instances
print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id)
i += 1
# Launch or resume masters
if existing_masters:
print "Starting master..."
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = master_image.run(key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
master_nodes = master_res.instances
print "Launched master in %s, regid = %s" % (zone, master_res.id)
# This wait time corresponds to SPARK-4983
print "Waiting for AWS to propagate instance metadata..."
time.sleep(5)
# Give the instances descriptive names
for master in master_nodes:
master.add_tag(
key='Name',
value='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
for slave in slave_nodes:
slave.add_tag(
key='Name',
value='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
# Return all the instances
return (master_nodes, slave_nodes)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters and slaves
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
reservations = conn.get_all_reservations()
master_nodes = []
slave_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for inst in active:
group_names = [g.name for g in inst.groups]
if (cluster_name + "-master") in group_names:
master_nodes.append(inst)
elif (cluster_name + "-slaves") in group_names:
slave_nodes.append(inst)
if any((master_nodes, slave_nodes)):
print "Found %d master(s), %d slaves" % (len(master_nodes), len(slave_nodes))
if master_nodes != [] or not die_on_error:
return (master_nodes, slave_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print >> sys.stderr, "ERROR: Could not find master in group " + cluster_name + "-master"
else:
print >> sys.stderr, "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Generating cluster's SSH key on master..."
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print "Transferring cluster's SSH key to slaves..."
for slave in slave_nodes:
print slave.public_dns_name
ssh_write(slave.public_dns_name, opts, ['tar', 'x'], dot_ssh_tar)
modules = ['spark', 'ephemeral-hdfs', 'persistent-hdfs',
'mapreduce', 'spark-standalone', 'tachyon']
if opts.hadoop_major_version == "1":
modules = filter(lambda x: x != "mapreduce", modules)
if opts.ganglia:
modules.append('ganglia')
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
print "Cloning spark-ec2 scripts from {r}/tree/{b} on master...".format(
r=opts.spark_ec2_git_repo, b=opts.spark_ec2_git_branch)
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone {r} -b {b} spark-ec2".format(r=opts.spark_ec2_git_repo,
b=opts.spark_ec2_git_branch)
)
print "Deploying files to master..."
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
print "Running setup on master..."
setup_spark_cluster(master, opts)
print "Done!"
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print "Spark standalone cluster started at http://%s:8080" % master
if opts.ganglia:
print "Ganglia started at http://%s:5080/ganglia" % master
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
)
return s.returncode == 0
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
if not is_ssh_available(host=i.ip_address, opts=opts):
return False
else:
return True
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(5 * num_attempts) # seconds
for i in cluster_instances:
i.update()
statuses = conn.get_all_instance_status(instance_ids=[i.id for i in cluster_instances])
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print "Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
)
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2014-06-20
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c3.large": 2,
"c3.xlarge": 2,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"g2.2xlarge": 1,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"i2.xlarge": 1,
"m1.large": 2,
"m1.medium": 1,
"m1.small": 1,
"m1.xlarge": 4,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m2.xlarge": 1,
"m3.2xlarge": 2,
"m3.large": 1,
"m3.medium": 1,
"m3.xlarge": 2,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"r3.large": 1,
"r3.xlarge": 1,
"t1.micro": 0,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print >> stderr, ("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = master_nodes[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built Spark deploy
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": "%d" % opts.worker_instances,
"spark_master_opts": opts.master_opts
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n" +
"Please check that you have provided the correct --identity-file and " +
"--key-pair parameters and try again.".format(host))
else:
raise e
print >> stderr, \
"Error executing remote command, retrying after 30 seconds: {0}".format(e)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print >> stderr, \
"Error {0} while executing remote command, retrying after 30 seconds".format(status)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total / num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to start up.",
DeprecationWarning
)
if opts.identity_file is not None:
if not os.path.exists(opts.identity_file):
print >> stderr,\
"ERROR: The identity file '{f}' doesn't exist.".format(f=opts.identity_file)
sys.exit(1)
file_mode = os.stat(opts.identity_file).st_mode
if not (file_mode & S_IRUSR) or not oct(file_mode)[-2:] == '00':
print >> stderr, "ERROR: The identity file must be accessible only by you."
print >> stderr, 'You can fix this with: chmod 400 "{f}"'.format(f=opts.identity_file)
sys.exit(1)
if opts.instance_type not in EC2_INSTANCE_TYPES:
print >> stderr, "Warning: Unrecognized EC2 instance type for instance-type: {t}".format(
t=opts.instance_type)
if opts.master_instance_type != "":
if opts.master_instance_type not in EC2_INSTANCE_TYPES:
print >> stderr, \
"Warning: Unrecognized EC2 instance type for master-instance-type: {t}".format(
t=opts.master_instance_type)
# Since we try instance types even if we can't resolve them, we check if they resolve first
# and, if they do, see if they resolve to the same virtualization type.
if opts.instance_type in EC2_INSTANCE_TYPES and \
opts.master_instance_type in EC2_INSTANCE_TYPES:
if EC2_INSTANCE_TYPES[opts.instance_type] != \
EC2_INSTANCE_TYPES[opts.master_instance_type]:
print >> stderr, \
"Error: spark-ec2 currently does not support having a master and slaves with " + \
"different AMI virtualization types."
print >> stderr, "master instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.master_instance_type])
print >> stderr, "slave instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.instance_type])
sys.exit(1)
if opts.ebs_vol_num > 8:
print >> stderr, "ebs-vol-num cannot be greater than 8"
sys.exit(1)
# Prevent breaking ami_prefix (/, .git and startswith checks)
# Prevent forks with non spark-ec2 names for now.
if opts.spark_ec2_git_repo.endswith("/") or \
opts.spark_ec2_git_repo.endswith(".git") or \
not opts.spark_ec2_git_repo.startswith("https://github.com") or \
not opts.spark_ec2_git_repo.endswith("spark-ec2"):
print >> stderr, "spark-ec2-git-repo must be a github repo and it must not have a " \
"trailing / or .git. " \
"Furthermore, we currently only support forks named spark-ec2."
sys.exit(1)
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print >> sys.stderr, "ERROR: You have to start at least 1 slave"
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
print "Are you sure you want to destroy the cluster %s?" % cluster_name
print "The following instances will be terminated:"
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
for inst in master_nodes + slave_nodes:
print "> %s" % inst.public_dns_name
msg = "ALL DATA ON ALL NODES WILL BE LOST!!\nDestroy cluster %s (y/N): " % cluster_name
response = raw_input(msg)
if response == "y":
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
attempt = 1
while attempt <= 3:
print "Attempt %d" % attempt
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print "Deleting rules in security group " + group.name
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
conn.delete_security_group(group.name)
print "Deleted security group " + group.name
except boto.exception.EC2ResponseError:
success = False
print "Failed to delete security group " + group.name
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print "Failed to delete all security groups after 3 tries."
print "Try re-running in a few minutes."
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Rebooting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print "Rebooting " + inst.id
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print master_nodes[0].public_dns_name
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
def main():
try:
real_main()
except UsageError, e:
print >> stderr, "\nError:\n", e
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| |
import os
import sys
import time
from fabric.api import env, local, sudo
from fabric.colors import blue, cyan, green, magenta, red, white, yellow # noqa
from fabric.context_managers import settings as fabric_settings
from fabric.contrib.files import exists
from fabric.decorators import task
from fabric.utils import abort
from jinja2 import Template
from bootmachine import known_hosts
import settings
@task
def configure(match="'*'"):
"""
Run salt state.highstate on hosts that match.
Usage:
fab master configurator.configure
"""
highstate(match)
@task
def highstate(match="'*'"):
"""
Run salt state.highstate on hosts that match.
Usage:
fab master configurator.highstate
"""
if env.host != env.master_server.public_ip:
abort("tried to highstate on a non-master server")
sudo("salt {regex} state.highstate".format(regex=match))
@task
def upload_saltstates():
"""
Upload the salt states to all servers.
It's a little more complicated then it needs to be,
because we have to rsync as root and root login is disabled.
Usage:
fab master configurator.upload_saltstates
"""
if env.host != env.master_server.public_ip:
abort("tried to upload salttates on a non-master server")
if not exists(settings.REMOTE_STATES_DIR, use_sudo=True):
sudo("mkdir --parents {0}".format(settings.REMOTE_STATES_DIR))
# catch rsync issue with emacs autosave files
temp_files = []
for path in (settings.LOCAL_STATES_DIR, settings.LOCAL_PILLARS_DIR):
for match in ("#*#", ".#*"):
temp_file = local('find {0} -name "{1}"'.format(path, match), capture=True)
if temp_file:
temp_files.append(temp_file)
if temp_files:
print(red("Temp files must not exist in the saltstates or pillars dirs."))
for temp_file in temp_files:
print(yellow("found: {0}".format(temp_file)))
abort("Found temp files in the saltstates or pillars dirs.")
# rsync pillar and salt files to the fabric users local directory
local('rsync -a -e "ssh -p {0}" --rsync-path="sudo rsync" {1} {2}@{3}:{4}'.format(
env.port, settings.LOCAL_STATES_DIR, env.user, env.host, settings.REMOTE_STATES_DIR))
local('rsync -a -e "ssh -p {0}" --rsync-path="sudo rsync" {1} {2}@{3}:{4}'.format(
env.port, settings.LOCAL_PILLARS_DIR, env.user, env.host, settings.REMOTE_PILLARS_DIR))
@task
def pillar_update():
"""
Update the pillar files with the current server info.
Usage:
fab master configuration salt.pillar_update
"""
if env.host != env.master_server.public_ip:
abort("tried to pillar_update on a non-master server")
local_pillars_dir = settings.LOCAL_PILLARS_DIR
remote_pillars_dir = settings.REMOTE_PILLARS_DIR
if not exists(remote_pillars_dir, use_sudo=True):
sudo("mkdir --parents {0}".format(remote_pillars_dir))
bootmachine_sls_j2 = Template(
open(os.path.join(local_pillars_dir, "bootmachine.sls.j2"), "r", 0).read())
bootmachine_sls = open(os.path.join(local_pillars_dir, "bootmachine.sls"), "w", 0)
bootmachine_sls.write(bootmachine_sls_j2.render(
bootmachine_servers=env.bootmachine_servers,
salt_remote_states_dir=settings.REMOTE_STATES_DIR,
salt_remote_pillars_dir=remote_pillars_dir,
saltmaster_hostname=settings.MASTER,
saltmaster_public_ip=env.master_server.public_ip,
saltmaster_private_ip=env.master_server.private_ip,
ssh_port=settings.SSH_PORT,
ssh_users=settings.SSH_USERS,
))
# TODO: only upload and refresh when file has changes
home_dir = local("eval echo ~${0}".format(env.user), capture=True)
if exists(home_dir, use_sudo=True):
scp_dir = home_dir
else:
scp_dir = "/tmp/"
try:
local("scp -P {0} {1} {2}@{3}:{4}".format(
env.port,
os.path.join(local_pillars_dir, "bootmachine.sls"),
env.user,
env.host,
os.path.join(scp_dir, "bootmachine.sls")))
except:
known_hosts.update(env.host)
local("scp -P {0} {1} {2}@{3}:$(eval echo ~${4})bootmachine.sls".format(
env.port,
os.path.join(local_pillars_dir, "bootmachine.sls"),
env.user,
env.host,
scp_dir))
sudo("mv {0} {1}".format(
os.path.join(scp_dir, "bootmachine.sls"),
os.path.join(remote_pillars_dir, "bootmachine.sls")))
sudo("salt '*' saltutil.refresh_pillar &") # background because it hangs on debian 6
@task
def update_master_iptables():
"""
Update iptables rules for salt, on the salt master,
to accept newley booted minions.
Usage:
fab master configurator.update_master_iptables
"""
if env.host != env.master_server.public_ip:
abort("tried to update_master_iptables on a non-master server")
configurator_ports = ["4505", "4506"] # get from settings.py?
# Get the line in the iptables chain for inserting the new minon's
with fabric_settings(warn_only=True):
insert_line = sudo("iptables -L --line-numbers | grep {0}".format(configurator_ports[0]))
if not insert_line:
print(yellow("NOTE: iptables are wide open during first boot of a master"))
return
for port in configurator_ports:
match = sudo("iptables -nvL | grep {0}".format(port))
for server in env.bootmachine_servers:
if server.private_ip not in match:
sudo("iptables -I INPUT {0} -s {1} -m state --state new -m tcp -p tcp \
--dport {2} -j ACCEPT".format(insert_line[0], server.private_ip, port))
@task
def launch():
"""
After the salt packages are installed, accept the new minions,
upload states.
Usage:
fab master configurator.launch
"""
if env.host != env.master_server.public_ip:
abort("tried to launch on a non-master server")
upload_saltstates()
# add ipaddresses from the newly booted servers to the pillar and update
local("fab master configurator.update_master_iptables")
pillar_update()
time.sleep(10) # sleep a little to give minions a chance to become visible
accept_minions()
local("fab master configurator.restartall")
@task
def accept_minions():
"""
Accept salt-key's for all minions.
Usage:
fab master configurator.accept_minions
"""
if env.host != env.master_server.public_ip:
abort("tried to accept minions on a non-master server")
def __get_accepted_minions():
"""TODO: remove when all distros support salt 0.10.5"""
try:
accepted = eval(sudo("salt-key --yes --out raw --list acc"))
except:
accepted = eval(sudo("salt-key --raw-out --list acc"))
if type(accepted) == dict:
return accepted["minions"]
else:
return accepted # support salt version < 0.10.5
minions = __get_accepted_minions()
slept = 0
while len(minions) != len(settings.SERVERS):
unaccepted = [s["servername"] for s in settings.SERVERS if s["servername"] not in minions]
with fabric_settings(warn_only=True):
for server in unaccepted:
sudo("salt-key --quiet --accept={0} --yes".format(server))
minions = __get_accepted_minions()
if len(minions) != len(settings.SERVERS):
local("fab master configurator.restartall")
time.sleep(5)
slept += 5
print(yellow("there are still unaccpeted keys, trying again."))
if slept > 300:
abort("After 5 minutes of attempts, there still exist unaccpeted keys.")
print(green("all keys have been accepted."))
@task
def list_minions():
"""
List all minions.
Usage:
fab master configurator.list_minions
"""
if env.host != env.master_server.public_ip:
abort("tried to list minions from a non-master server")
sudo("salt-key --list all")
@task
def change_master_ip(ip_address):
"TODO: allowing changing of the master ip, say on a master only rebuild."
raise NotImplementedError()
@task
def restartall():
"""
Restart first the salt master than all minion daemons.
Usage:
fab master configurator.restartall
"""
if env.host != env.master_server.public_ip:
abort("tried to restartall from a non-master server")
for server in env.bootmachine_servers:
stop(__getdistro_setenv(server.name))
time.sleep(5)
start(__getdistro_setenv(env.master_server.name))
for server in env.bootmachine_servers:
if server.name != env.master_server.name:
start(__getdistro_setenv(server.name))
env.servername = env.master_server.name
env.host = server.public_ip
env.host_string = "{0}:{1}".format(env.master_server.public_ip,
env.master_server.port)
env.hosts = [env.master_server.public_ip]
env.port = env.master_server.port
env.user = env.master_server.user
def revoke(servername):
"""
Simply revoke a minion's key by servername
"""
sudo("salt-key --quiet --yes --delete={0}".format(servername))
def install(distro):
"""
Install salt.
Simply wrap salt's install method for the chosen distro and installer.
"""
installer = getattr(settings, "SALT_INSTALLER_{0}".format(distro.DISTRO))
distro.install_salt(installer)
def setup(distro):
"""
Setup salt's configuration files and ensure it is enabled at reboot.
Simply wrap salt's setup method for the chosen distro.
"""
distro.setup_salt()
def start(distro):
"""
Start the salt master and minion daemons.
Simply wrap salt's start method for the chosen distro.
"""
distro.start_salt()
def stop(distro):
"""
Stop the salt master and minion daemons.
Simply wrap salt's stop method for the chosen distro.
"""
distro.stop_salt()
def __getdistro_setenv(servername):
"""
"""
server = [s for s in env.bootmachine_servers
if s.name == servername][0]
env.servername = server.name
env.host = server.public_ip
env.host_string = "{0}:{1}".format(server.public_ip, server.port)
env.hosts = [server.public_ip]
env.port = server.port
env.user = server.user
distro_module = [s for s in settings.SERVERS
if s["servername"] == server.name][0]["distro_module"]
try:
__import__(distro_module)
return sys.modules[distro_module]
except ImportError:
abort("Unable to import the module: {0}".format(distro_module))
| |
# from JumpScale import j
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
import gzip
import os
import tarfile
import sys
import shutil
import tempfile
import platform
import subprocess
import time
#how to import without extensions (jumpscale is not required)
#from core.installtools import InstallTools
class InstallTools():
def __init__(self):
if platform.system().lower()=="windows":
self.TYPE="WIN"
self.BASE="%s/"%os.environ["JBASE"].replace("\\","/")
while self.BASE[-1]=="/":
self.BASE=self.BASE[:-1]
self.BASE+="/"
self.TMP=tempfile.gettempdir().replace("\\","/")
else:
self.TYPE="LINUX"
self.BASE="/opt/qself.BASE6"
self.TMP="/tmp"
self.debug=False
def enableQshell(self):
pass
#@todo
def download(self,url,to):
os.chdir(self.TMP)
print('Downloading %s ' % (url))
handle = urlopen(url)
with open(to, 'wb') as out:
while True:
data = handle.read(1024)
if len(data) == 0: break
out.write(data)
handle.close()
out.close()
def chdir(seld,ddir):
os.chdir(ddir)
# def execute(self,command, timeout=60,tostdout=True):
# try:
# proc = subprocess.Popen(command, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
# except Exception,e:
# raise RuntimeError("Cannot execute cmd:%s, could not launch process, error was %s"%(command,e))
# poll_seconds = .250
# deadline = time.time()+timeout
# while time.time() < deadline and proc.poll() == None:
# time.sleep(poll_seconds)
# if proc.poll() == None:
# if float(sys.version[:3]) >= 2.6:
# proc.terminate()
# raise RuntimeError("Cannot execute cmd:%s, timeout"%(command))
# stdout, stderr = proc.communicate()
# if stdout.strip()=="":
# stdout=stderr
# if proc.returncode<>0:
# raise RuntimeError("Cannot execute cmd:%s, error was %s"%(command,stderr))
# return stdout
def log(self,msg,level=0):
# print(msg)
pass
def isUnix(self):
if sys.platform.lower().find("linux")!=-1:
return True
return False
def isWindows(self):
if sys.platform.lower().find("linux")==1:
return True
return False
def execute(self, command , dieOnNonZeroExitCode=True, outputToStdout=True, useShell = False, ignoreErrorOutput=False):
"""Executes a command, returns the exitcode and the output
@param command: command to execute
@param dieOnNonZeroExitCode: boolean to die if got non zero exitcode
@param outputToStdout: boolean to show/hide output to stdout
@param ignoreErrorOutput standard stderror is added to stdout in out result, if you want to make sure this does not happen put on True
@rtype: integer represents the exitcode plus the output of the executed command
if exitcode is not zero then the executed command returned with errors
"""
# Since python has no non-blocking readline() call, we implement it ourselves
# using the following private methods.
#
# We choose for line buffering, i.e. whenever we receive a full line of output (terminated by \n)
# on stdout or stdin of the child process, we log it
#
# When the process terminates, we log the final lines (and add a \n to them)
self.log("exec:%s" % command)
def _logentry(entry):
if outputToStdout:
self.log(entry)
def _splitdata(data):
""" Split data in pieces separated by \n """
lines = data.split("\n")
return lines[:-1], lines[-1]
def _logoutput(data, OUT_LINE, ERR_LINE):
[lines, partialline] = _splitdata(data)
if lines:
lines[0] = OUT_LINE + lines[0]
else:
partialline = OUT_LINE + partialline
OUT_LINE = ""
if partialline:
OUT_LINE = partialline
for x in lines:
_logentry(x,3)
return OUT_LINE, ERR_LINE
def _logerror(data, OUT_LINE, ERR_LINE):
[lines, partialline] = _splitdata(data)
if lines:
lines[0] = ERR_LINE + lines[0]
else:
partialline = ERR_LINE + partialline
ERR_LINE = ""
if partialline:
ERR_LINE = partialline
for x in lines:
_logentry(x,4)
return OUT_LINE, ERR_LINE
def _flushlogs(OUT_LINE, ERR_LINE):
""" Called when the child process closes. We need to get the last
non-\n terminated pieces of the stdout and stderr streams
"""
if OUT_LINE:
_logentry(OUT_LINE,3)
if ERR_LINE:
_logentry(ERR_LINE,4)
if command is None:
raise ValueError('Error, cannot execute command not specified')
try:
import errno
if self.isUnix():
import subprocess
import signal
try:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except Exception as ex:
selflog('failed to set child signal, error %s'%ex, 2)
childprocess = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=True, env=os.environ)
(output,error) = childprocess.communicate()
exitcode = childprocess.returncode
elif self.isWindows():
import subprocess, win32pipe, msvcrt, pywintypes
# For some awkward reason you need to include the stdin pipe, or you get an error deep inside
# the subprocess module if you use QRedirectStdOut in the calling script
# We do not use the stdin.
childprocess = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=False, shell=useShell, env=os.environ)
output = ""; OUT_LINE = ""; ERR_LINE = ""
childRunning = True
while childRunning:
stdoutData = childprocess.stdout.readline() # The readline method will block until data is received on stdout, or the stdout pipe has been destroyed. (Will return empty string)
# Only call processes that release their stdout pipe when exiting, otherwise the method will not return when the process completed.
# When the called process starts another process and marks its handle of the stdout pipe as inheritable, the pipe will not be destroyed before both processes end.
if stdoutData != '':
output = output + stdoutData
(OUT_LINE, ERR_LINE) = _logoutput(stdoutData, OUT_LINE, ERR_LINE)
else: # Did not read any data on channel
if childprocess.poll() != None: # Will return a number if the process has ended, or None if it's running.
childRunning = False
exitcode = childprocess.returncode
error = "Error output redirected to stdout."
else:
raise RuntimeError("Non supported OS for self.execute()")
except Exception as e:
print "ERROR IN EXECUTION, SHOULD NOT GET HERE."
raise
if exitcode!=0 or error!="":
self.log(" Exitcode:%s\nOutput:%s\nError:%s\n" % (exitcode, output, error), 5)
if ignoreErrorOutput!=True:
output="%s\n***ERROR***\n%s\n" % (output,error)
if exitcode !=0 and dieOnNonZeroExitCode:
self.log("command: [%s]\nexitcode:%s\noutput:%s\nerror:%s" % (command, exitcode, output, error), 3)
raise RuntimeError("Error during execution! (system.process.execute())\n\nCommand: [%s]\n\nExitcode: %s\n\nProgram output:\n%s\n\nErrormessage:\n%s\n" % (command, exitcode, output, error))
return output
def expand_tar_gz(self,path,destdir,deleteDestFirst=True,deleteSourceAfter=False):
self.lastdir=os.getcwd()
os.chdir(self.TMP)
basename=os.path.basename(path)
if basename.find(".tar.gz")==-1:
j.errorconditionhandler.raiseBug(message="Can only expand a tar gz file now %s"%path,category="installer.expand")
tarfilename=".".join(basename.split(".gz")[:-1])
self.delete(tarfilename)
if deleteDestFirst:
self.delete(destdir)
if self.TYPE=="WIN":
cmd="gzip -d %s" % path
os.system(cmd)
else:
handle = gzip.open(path)
with open(tarfilename, 'w') as out:
for line in handle:
out.write(line)
out.close()
handle.close()
t = tarfile.open(tarfilename, 'r')
t.extractall(destdir)
t.close()
self.delete(tarfilename)
if deleteSourceAfter:
self.delete(path)
os.chdir(self.lastdir)
self.lastdir=""
expand=expand_tar_gz
def getLastChangeSetBitbucket(self,account="jumpscale",reponame="jumpscale-core"):
url="https://api.bitbucket.org/1.0/repositories/%s/%s/src/tip/" % (account,reponame)
handle = urlopen(url)
lines=handle.readlines()
for line in lines:
if line.find("\"node\"")!=-1:
return line.split("\"")[3]
def getTmpPath(self,filename):
return "%s/%s"%(self.TMP,filename)
def downloadJumpScaleCore(self,dest):
#csid=getLastChangeSetBitbucket()
self.download ("https://bitbucket.org/jumpscale/jumpscale-core/get/default.tar.gz","%s/pl6core.tgz"%self.TMP)
self.expand("%s/pl6core.tgz"%self.TMP,dest)
def getPythonSiteConfigPath(self):
minl=1000000
result=""
for item in sys.path:
if len(item)<minl and item.find("python")!=-1:
result=item
minl=len(item)
return result
def writefile(self,path,content):
fo = open(path, "w")
fo.write( content )
fo.close()
def delete(self,path):
if self.debug:
print("delete: %s" % path)
if os.path.exists(path) or os.path.islink(path):
if os.path.isdir(path):
#print "delete dir %s" % path
if os.path.islink(path):
os.remove(path)
else:
shutil.rmtree(path)
else:
#print "delete file %s" % path
os.remove(path)
def copytreedeletefirst(self,source,dest):
self.delete(dest)
if self.debug:
print("copy %s %s" % (source,dest))
shutil.copytree(source,dest)
def copydeletefirst(self,source,dest):
#try:
# os.remove(dest)
#except:
# pass
self.delete(dest)
if self.debug:
print("copy %s %s" % (source,dest))
shutil.copy(source,dest)
def createdir(self,path):
if self.debug:
print("createdir: %s" % path)
if not os.path.exists(path) and not os.path.islink(path):
os.makedirs(path)
def installBaseMinimal(self):
pldir="%s/plcore" % self.TMP
shutil.rmtree(pldir,True)
self.downloadJumpScaleCore(pldir)
pldir=os.path.join(pldir,os.listdir(pldir)[0])
self.copytreedeletefirst(os.path.join(pldir,"qself.BASE6","cfg"),"%s/cfg/" % self.BASE)
if not self.TYPE=="WIN":
self.copydeletefirst(os.path.join(pldir,"qself.BASE6","jshell"),"%s/jshell" % self.BASE)
self.copytreedeletefirst(os.path.join(pldir,"core"),"%s/lib/jumpscale" % self.BASE)
#writefile("%s/lib/jumpscale/core/__init__.py"%self.BASE,"")
self.copytreedeletefirst(os.path.join(pldir,"extensions","core"),"%s/lib/jumpscaleextensions/core" % self.BASE)
self.copytreedeletefirst(os.path.join(pldir,"utils"),"%s/utils" % self.BASE)
if not self.TYPE=="WIN":
shutil.copyfile(os.path.join(pldir,"lib","python.zip"),"%s/lib/python.zip" % self.BASE)
self.writefile("%s/lib/__init__.py"%self.BASE,"")
try:
os.makedirs("%s/var/log/jumpscalelogs"%self.BASE)
except:
pass
try:
os.makedirs("%s/utils"%self.BASE)
except:
pass
print("minimal qself.BASE installed")
def removesymlink(self,path):
if self.TYPE=="WIN":
try:
cmd="junction -d %s 2>&1 > null" % (path)
print(cmd)
os.system(cmd)
except Exception as e:
pass
def symlink(self,src,dest):
"""
dest is where the link will be created pointing to src
"""
if self.debug:
print("symlink: src:%s dest:%s" % (src,dest))
#if os.path.exists(dest):
#try:
# os.remove(dest)
#except:
# pass
self.createdir(dest)
if self.TYPE=="WIN":
self.removesymlink(dest)
self.delete(dest)
else:
self.delete(dest)
print("symlink %s to %s" %(dest, src))
if self.TYPE=="WIN":
if self.debug:
print("symlink %s %s" % (src,dest))
cmd="junction %s %s 2>&1 > null" % (dest,src)
os.system(cmd)
#raise RuntimeError("not supported on windows yet")
else:
os.symlink(src,dest)
def replacesitecustomize(self):
if not self.TYPE=="WIN":
ppath="/usr/lib/python2.7/sitecustomize.py"
if ppath.find(ppath):
os.remove(ppath)
self.symlink("%s/utils/sitecustomize.py"%self.BASE,ppath)
def do(path,dirname,names):
if path.find("sitecustomize")!=-1:
self.symlink("%s/utils/sitecustomize.py"%self.BASE,path)
print("walk over /usr to find sitecustomize and link to new one")
os.path.walk("/usr", do,"")
os.path.walk("/etc", do,"")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.