gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
from os import path
import sys
__all__ = ['liblinear', 'feature_node', 'gen_feature_nodearray', 'problem',
'parameter', 'model', 'toPyModel', 'L2R_LR', 'L2R_L2LOSS_SVC_DUAL',
'L2R_L2LOSS_SVC', 'L2R_L1LOSS_SVC_DUAL', 'MCSVM_CS',
'L1R_L2LOSS_SVC', 'L1R_LR', 'L2R_LR_DUAL', 'L2R_L2LOSS_SVR',
'L2R_L2LOSS_SVR_DUAL', 'L2R_L1LOSS_SVR_DUAL', 'print_null']
try:
dirname = path.dirname(path.abspath(__file__))
if sys.platform == 'win32':
liblinear = CDLL(path.join(dirname, r'..\windows\liblinear.dll'))
else:
liblinear = CDLL(path.join(dirname, '../liblinear.so.3'))
except:
# For unix the prefix 'lib' is not considered.
if find_library('linear'):
liblinear = CDLL(find_library('linear'))
elif find_library('liblinear'):
liblinear = CDLL(find_library('liblinear'))
else:
raise Exception('LIBLINEAR library not found.')
L2R_LR = 0
L2R_L2LOSS_SVC_DUAL = 1
L2R_L2LOSS_SVC = 2
L2R_L1LOSS_SVC_DUAL = 3
MCSVM_CS = 4
L1R_L2LOSS_SVC = 5
L1R_LR = 6
L2R_LR_DUAL = 7
L2R_L2LOSS_SVR = 11
L2R_L2LOSS_SVR_DUAL = 12
L2R_L1LOSS_SVR_DUAL = 13
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class feature_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def __str__(self):
return '%d:%g' % (self.index, self.value)
def gen_feature_nodearray(xi, feature_max=None, issparse=True):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
xi = [0] + xi # idx should start from 1
index_range = range(1, len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if issparse:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (feature_node * (len(index_range)+2))()
ret[-1].index = -1 # for bias term
ret[-2].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range :
max_idx = index_range[-1]
return ret, max_idx
class problem(Structure):
_names = ["l", "n", "y", "x", "bias"]
_types = [c_int, c_int, POINTER(c_double), POINTER(POINTER(feature_node)), c_double]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, bias = -1):
if len(y) != len(x) :
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
self.bias = -1
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_feature_nodearray(xi)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = y[i]
self.x = (POINTER(feature_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
self.set_bias(bias)
def set_bias(self, bias):
if self.bias == bias:
return
if bias >= 0 and self.bias < 0:
self.n += 1
node = feature_node(self.n, bias)
if bias < 0 and self.bias >= 0:
self.n -= 1
node = feature_node(-1, bias)
for xi in self.x_space:
xi[-2] = node
self.bias = bias
class parameter(Structure):
_names = ["solver_type", "eps", "C", "nr_thread", "nr_weight", "weight_label", "weight", "p", "init_sol"]
_types = [c_int, c_double, c_double, c_int, c_int, POINTER(c_int), POINTER(c_double), c_double, POINTER(c_double)]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = parameter._names + list(self.__dict__.keys())
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.solver_type = L2R_L2LOSS_SVC_DUAL
self.eps = float('inf')
self.C = 1
self.p = 0.1
self.nr_thread = 1
self.nr_weight = 0
self.weight_label = None
self.weight = None
self.init_sol = None
self.bias = -1
self.flag_cross_validation = False
self.flag_C_specified = False
self.flag_solver_specified = False
self.flag_find_C = False
self.flag_omp = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError("arg 1 should be a list or a str.")
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv) :
if argv[i] == "-s":
i = i + 1
self.solver_type = int(argv[i])
self.flag_solver_specified = True
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
self.flag_C_specified = True
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-B":
i = i + 1
self.bias = float(argv[i])
elif argv[i] == "-v":
i = i + 1
self.flag_cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2 :
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i] == "-n":
i = i + 1
self.flag_omp = True
self.nr_thread = int(argv[i])
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-C":
self.flag_find_C = True
else :
raise ValueError("Wrong options")
i += 1
liblinear.set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
# default solver for parameter selection is L2R_L2LOSS_SVC
if self.flag_find_C:
if not self.flag_cross_validation:
self.nr_fold = 5
if not self.flag_solver_specified:
self.solver_type = L2R_L2LOSS_SVC
self.flag_solver_specified = True
elif self.solver_type not in [L2R_LR, L2R_L2LOSS_SVC]:
raise ValueError("Warm-start parameter search only available for -s 0 and -s 2")
if self.flag_omp:
if not self.flag_solver_specified:
self.solver_type = L2R_L2LOSS_SVC
self.flag_solver_specified = True
elif self.solver_type not in [L2R_LR, L2R_L2LOSS_SVC, L2R_L2LOSS_SVR]:
raise ValueError("Parallel LIBLINEAR is only available for -s 0, 2, 11 now")
if self.eps == float('inf'):
if self.solver_type in [L2R_LR, L2R_L2LOSS_SVC]:
self.eps = 0.01
elif self.solver_type in [L2R_L2LOSS_SVR]:
self.eps = 0.001
elif self.solver_type in [L2R_L2LOSS_SVC_DUAL, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L2R_LR_DUAL]:
self.eps = 0.1
elif self.solver_type in [L1R_L2LOSS_SVC, L1R_LR]:
self.eps = 0.01
elif self.solver_type in [L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL]:
self.eps = 0.1
class model(Structure):
_names = ["param", "nr_class", "nr_feature", "w", "label", "bias"]
_types = [parameter, c_int, c_int, POINTER(c_double), POINTER(c_int), c_double]
_fields_ = genFields(_names, _types)
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
liblinear.free_and_destroy_model(pointer(self))
def get_nr_feature(self):
return liblinear.get_nr_feature(self)
def get_nr_class(self):
return liblinear.get_nr_class(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
liblinear.get_labels(self, labels)
return labels[:nr_class]
def get_decfun_coef(self, feat_idx, label_idx=0):
return liblinear.get_decfun_coef(self, feat_idx, label_idx)
def get_decfun_bias(self, label_idx=0):
return liblinear.get_decfun_bias(self, label_idx)
def get_decfun(self, label_idx=0):
w = [liblinear.get_decfun_coef(self, feat_idx, label_idx) for feat_idx in range(1, self.nr_feature+1)]
b = liblinear.get_decfun_bias(self, label_idx)
return (w, b)
def is_probability_model(self):
return (liblinear.check_probability_model(self) == 1)
def is_regression_model(self):
return (liblinear.check_regression_model(self) == 1)
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> model
Convert a ctypes POINTER(model) to a Python model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(liblinear.train, POINTER(model), [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.find_parameter_C, None, [POINTER(problem), POINTER(parameter), c_int, c_double, c_double, POINTER(c_double), POINTER(c_double)])
fillprototype(liblinear.cross_validation, None, [POINTER(problem), POINTER(parameter), c_int, POINTER(c_double)])
fillprototype(liblinear.predict_values, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.predict, c_double, [POINTER(model), POINTER(feature_node)])
fillprototype(liblinear.predict_probability, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
fillprototype(liblinear.save_model, c_int, [c_char_p, POINTER(model)])
fillprototype(liblinear.load_model, POINTER(model), [c_char_p])
fillprototype(liblinear.get_nr_feature, c_int, [POINTER(model)])
fillprototype(liblinear.get_nr_class, c_int, [POINTER(model)])
fillprototype(liblinear.get_labels, None, [POINTER(model), POINTER(c_int)])
fillprototype(liblinear.get_decfun_coef, c_double, [POINTER(model), c_int, c_int])
fillprototype(liblinear.get_decfun_bias, c_double, [POINTER(model), c_int])
fillprototype(liblinear.free_model_content, None, [POINTER(model)])
fillprototype(liblinear.free_and_destroy_model, None, [POINTER(POINTER(model))])
fillprototype(liblinear.destroy_param, None, [POINTER(parameter)])
fillprototype(liblinear.check_parameter, c_char_p, [POINTER(problem), POINTER(parameter)])
fillprototype(liblinear.check_probability_model, c_int, [POINTER(model)])
fillprototype(liblinear.check_regression_model, c_int, [POINTER(model)])
fillprototype(liblinear.set_print_string_function, None, [CFUNCTYPE(None, c_char_p)])
| |
# -*- coding: utf-8 -*-
from django.db.models import Q
from django.views.generic import TemplateView
from django.views.generic import ListView
from django.shortcuts import get_object_or_404
from django.utils.html import strip_tags
from watson import search as watson
from mcat.models import Category, Product
from mcat.conf import PAGINATE_BY, DISABLE_BREADCRUMBS, USE_FILTERS, USE_PRICES, USE_ORDER, USE_BRAND, USE_PRICE_FILTER, PRICES_AS_INTEGER, CURRENCY
from mcat.utils import decode_ftype, get_min_max_prices
class IndexView(TemplateView):
template_name = "mcat/index.html"
class CategoryHomeView(TemplateView):
template_name = 'mcat/categories/index.html'
def get_context_data(self, **kwargs):
context = super(CategoryHomeView, self).get_context_data(**kwargs)
categories = Category.objects.filter(level__lte=0, status=0)
context['categories'] = categories
context['num_categories'] = len(categories)
if USE_ORDER:
context['use_order'] = True
return context
class CategoryView(TemplateView):
template_name = 'mcat/categories/browse.html'
def get_context_data(self, **kwargs):
context = super(CategoryView, self).get_context_data(**kwargs)
current_category=get_object_or_404(Category, slug=self.kwargs['slug'])
last_level=current_category.level+1
categories = current_category.get_descendants().filter(level__lte=last_level, status=0)
if DISABLE_BREADCRUMBS:
context['disable_breadcrumbs'] = True
else:
context['ancestors'] = current_category.get_ancestors()
context['current_category'] = current_category
context['categories'] = categories
context['num_categories'] = len(categories)
if USE_ORDER:
context['use_order'] = True
return context
class ProductsInCategoryView(ListView):
paginate_by = PAGINATE_BY
context_object_name = 'products'
def get_queryset(self):
self.category=get_object_or_404(Category.objects.prefetch_related('generic_caracteristics'), slug=self.kwargs['slug'], status=0)
products=Product.objects.filter(category=self.category, status=0).prefetch_related('caracteristics')
if USE_PRICES is True:
products=products.order_by('price')
self.caracteristics = self.category.generic_caracteristics.all()
#~ get the requested filters
self.filters = None
if self.request.GET and USE_FILTERS:
filters = {}
for param, value in self.request.GET.items():
if not param == 'page':
filters[param] = value
#~ filter on products
for name, value in filters.items():
raw_ftype = value.split(';')[1]
ftype = decode_ftype(raw_ftype)
if ftype in ['choices', 'boolean']:
val = name+':'+value.replace(raw_ftype, ftype)
products = products.filter(Q(carac1=val)|Q(carac2=val)|Q(carac3=val)|Q(carac4=val)|Q(carac5=val))
elif ftype == 'int':
if '_' in value:
frange = value.split(';')[0].split('_')
start_range = frange[0]
end_range = frange[1]
products = products.filter(Q(int_carac1_name=name, int_carac1__gte=start_range, int_carac1__lte=end_range)|Q(int_carac2_name=name, int_carac2__gte=start_range, int_carac2__lte=end_range)|Q(int_carac3_name=name, int_carac3__gte=start_range, int_carac3__lte=end_range))
else:
if value.startswith('-'):
val = value[1:].split(';')[0]
products = products.filter(Q(int_carac1_name=name, int_carac1__lt=val))
elif value.startswith('+'):
val = value[1:].split(';')[0]
products = products.filter(Q(int_carac1_name=name, int_carac1__gt=val))
self.filters = filters
self.num_products = len(products)
if USE_PRICES and USE_PRICE_FILTER:
self.min_price, self.max_price = get_min_max_prices(products)
if PRICES_AS_INTEGER:
try:
self.min_price = int(round(self.min_price))
self.max_price = int(round(self.max_price))
except:
pass
return products
def get_context_data(self, **kwargs):
context = super(ProductsInCategoryView, self).get_context_data(**kwargs)
category= self.category
last_level=category.level+1
categories = category.get_descendants().filter(level__lte=last_level)
if DISABLE_BREADCRUMBS is True:
context['disable_breadcrumbs'] = True
else:
context['ancestors'] = category.get_ancestors()
context['category'] = category
context['categories'] = categories
context['caracteristics'] = self.caracteristics
context['num_categories'] = len(categories)
context['num_products'] = self.num_products
if self.filters is not None:
context['active_filters'] = self.filters
context['use_filters'] = USE_FILTERS
if USE_PRICES is False:
context['no_prices'] = True
context['currency'] = CURRENCY
else:
if USE_PRICE_FILTER is True:
context['use_price_filter'] = True
context['min_price'] = self.min_price
context['max_price'] = self.max_price
if USE_ORDER:
context['use_order'] = True
return context
def get_template_names(self):
template_name = self.category.template_name
if template_name == 'default':
return 'mcat/products/index.html'
else:
return 'mcat/categories/alt/'+template_name+'.html'
class ProductView(TemplateView):
def get_context_data(self, **kwargs):
context = super(ProductView, self).get_context_data(**kwargs)
#~ get the data
category=get_object_or_404(Category, slug=self.kwargs['category_slug'], status=0)
if USE_BRAND:
product=get_object_or_404(Product.objects.prefetch_related('images','caracteristics','brand'), slug=self.kwargs['slug'], status=0)
else:
product=get_object_or_404(Product.objects.prefetch_related('images','caracteristics'), slug=self.kwargs['slug'], status=0)
last_level=category.level+1
categories = category.get_descendants().filter(level__lte=last_level).order_by('name')
self.template_name = product.template_name
#~ get product caracteristics
caracs = {}
for carac in product.caracteristics.all():
caracs[carac.type_name] = [carac.type, carac.value_name]
if DISABLE_BREADCRUMBS:
context['disable_breadcrumbs'] = True
else:
context['ancestors'] = category.get_ancestors()
#~ fill context
context['category'] = category
context['categories'] = categories
context['product'] = product
context['num_categories'] = len(categories)
context['caracteristics'] = caracs
if USE_PRICES is False:
context['no_prices'] = True
if USE_ORDER:
context['use_order'] = True
if USE_BRAND:
context['use_brand'] = True
if product.extra:
context['extra'] = product.extra
return context
def get_template_names(self):
template_name = self.template_name
if template_name == 'default':
return 'mcat/products/detail.html'
else:
return 'mcat/products/alt/'+template_name+'.html'
class SearchView(ListView):
template_name = 'mcat/search.html'
paginate_by = PAGINATE_BY
context_object_name = 'products'
def get_queryset(self):
if "q" in self.request.GET.keys():
products = Product.objects.filter(status=0).prefetch_related('images', 'category')
q = self.q = strip_tags(self.request.GET['q'])
search_results = watson.filter(products, q)
return search_results
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
if USE_ORDER:
context['use_order'] = True
context['search'] = True
context['user_query'] = self.q
return context
| |
import doctest
import mock
import unittest
from . import models
from sir.trigger_generation import (unique_split_paths,
walk_path,
OneToManyPathPart,
ManyToOnePathPart,
ColumnPathPart,
TriggerGenerator,
DeleteTriggerGenerator,
InsertTriggerGenerator,
UpdateTriggerGenerator,
GIDDeleteTriggerGenerator,
write_triggers_to_file,
write_direct_triggers)
class UniqueSplitPathsTest(unittest.TestCase):
def run_test(self, paths, expected):
res = list(unique_split_paths(paths))
self.assertEqual(res, expected)
def test_not_dotted_paths(self):
paths = ["a", "b"]
expected = ["a", "b"]
self.run_test(paths, expected)
def test_dotted_paths(self):
paths = ["a.b.c", "e.f"]
expected = [
"a", "a.b", "a.b.c",
"e", "e.f",
]
self.run_test(paths, expected)
def test_unique_paths(self):
paths = ["a.b.c", "a.b.d"]
expected = [
"a", "a.b", "a.b.c",
"a.b.d"
]
self.run_test(paths, expected)
class WalkPathTest(unittest.TestCase):
def test_many_to_one(self):
path = "c"
model = models.B
result, table = walk_path(model, path)
self.assertTrue(isinstance(result, ManyToOnePathPart))
self.assertTrue(isinstance(result.inner, ColumnPathPart))
self.assertEqual(result.render(),
"SELECT table_b.id FROM table_b WHERE table_b.c IN ({new_or_old}.id)")
self.assertEqual(table, "table_c")
def test_many_to_one_column_returns_none(self):
path = "c.id"
model = models.B
result, table = walk_path(model, path)
self.assertTrue(result is None)
self.assertTrue(table is None)
def test_one_to_many(self):
path = "bs"
model = models.C
result, table = walk_path(model, path)
self.assertTrue(isinstance(result, OneToManyPathPart))
self.assertTrue(isinstance(result.inner, ColumnPathPart))
self.assertEqual(result.render(),
"SELECT table_c.id FROM table_c WHERE table_c.id IN ({new_or_old}.c)")
self.assertEqual(table, "table_b")
def test_one_to_many_column_returns_none(self):
path = "bs.id"
model = models.C
result, table = walk_path(model, path)
self.assertTrue(result is None)
self.assertTrue(table is None)
def test_composite_column_returns_none(self):
path = "composite_column"
model = models.B
result, table = walk_path(model, path)
self.assertTrue(result is None)
self.assertTrue(table is None)
def test_non_sqlalchemy_paths(self):
path = "c.__tablename__"
model = models.B
result, table = walk_path(model, path)
self.assertTrue(result is None)
self.assertTrue(table is None)
class TriggerGeneratorTest(unittest.TestCase):
class TestGenerator(TriggerGenerator):
id_replacement = "REPLACEMENT"
op = "OPERATION"
beforeafter = "SOMEWHEN"
def setUp(self):
self.path = "foo.bar"
self.index = 7
self.gen = self.TestGenerator("PREFIX", "TABLE", self.path,
"SELECTION", "INDEXTABLE", self.index)
def test_function(self):
self.assertEqual(self.gen.function,
"""
CREATE OR REPLACE FUNCTION {name}() RETURNS trigger
AS $$
DECLARE
ids TEXT;
BEGIN
SELECT string_agg(tmp.id::text, ' ') INTO ids FROM (SELECTION) AS tmp;
PERFORM amqp.publish(1, 'search', 'None', 'INDEXTABLE ' || ids);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
COMMENT ON FUNCTION {name}() IS 'The path for this function is {path}';
""".format(name=self.gen.triggername, path=self.path))
def test_triggername(self):
self.assertEqual(self.gen.triggername,
"search_PREFIX_OPERATION_{index}".format(index=self.index))
def test_trigger(self):
self.assertEqual(self.gen.trigger,
"\n"
"CREATE TRIGGER {name} SOMEWHEN OPERATION ON TABLE"
"\n"
" FOR EACH ROW EXECUTE PROCEDURE {name}();"
"\n"
"COMMENT ON TRIGGER {name} IS 'The path for this trigger is {path}';\n".
format(name=self.gen.triggername, path=self.path))
def test_delete_attributes(self):
self.assertEqual(DeleteTriggerGenerator.op, "delete")
self.assertEqual(DeleteTriggerGenerator.id_replacement, "OLD")
self.assertEqual(DeleteTriggerGenerator.beforeafter, "BEFORE")
self.assertEqual(DeleteTriggerGenerator.routing_key, "update")
def test_insert_attributes(self):
self.assertEqual(InsertTriggerGenerator.op, "insert")
self.assertEqual(InsertTriggerGenerator.id_replacement, "NEW")
self.assertEqual(InsertTriggerGenerator.beforeafter, "AFTER")
self.assertEqual(InsertTriggerGenerator.routing_key, "index")
def test_update_attributes(self):
self.assertEqual(UpdateTriggerGenerator.op, "update")
self.assertEqual(UpdateTriggerGenerator.id_replacement, "NEW")
self.assertEqual(UpdateTriggerGenerator.beforeafter, "AFTER")
self.assertEqual(UpdateTriggerGenerator.routing_key, "update")
class WriteTriggersTest(unittest.TestCase):
def setUp(self):
self.functionfile = mock.Mock()
self.triggerfile = mock.Mock()
self.index = 5
write_triggers_to_file(self.triggerfile, self.functionfile,
(InsertTriggerGenerator,),
"entity_c", "table_c", "bs.foo", "SELECTION",
"table_b", self.index)
self.gen = InsertTriggerGenerator("entity_c", "table_c", "bs.foo",
"SELECTION", "table_b", self.index)
def test_writes_function(self):
self.functionfile.write.assert_any_call(self.gen.function)
def test_writes_trigger(self):
self.triggerfile.write.assert_any_call(self.gen.trigger)
def test_write_count(self):
self.assertEqual(self.functionfile.write.call_count, 1)
self.assertEqual(self.triggerfile.write.call_count, 1)
class DirectTriggerWriterTest(unittest.TestCase):
def setUp(self):
self.functionfile = mock.Mock()
self.triggerfile = mock.Mock()
write_direct_triggers(self.triggerfile, self.functionfile,
"entity_c", models.C)
self.generators = []
for g in (GIDDeleteTriggerGenerator,
InsertTriggerGenerator,
UpdateTriggerGenerator):
gen = g("entity_c", "table_c", "direct",
"SELECT table_c.id FROM table_c WHERE table_c.id = "
"{new_or_old}.id", "table_c", 0)
self.generators.append(gen)
def test_writes_functions(self):
for gen in self.generators:
self.functionfile.write.assert_any_call(gen.function)
def test_writes_triggers(self):
for gen in self.generators:
self.triggerfile.write.assert_any_call(gen.trigger)
def test_write_count(self):
self.assertEqual(self.functionfile.write.call_count, 3)
self.assertEqual(self.triggerfile.write.call_count, 3)
def load_tests(loader, tests, ignore):
from sir import trigger_generation
tests.addTests(doctest.DocTestSuite(trigger_generation.types))
return tests
| |
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic API for reading/writing small numbers of records."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
import calendar
import csv
import logging
import re
import StringIO
import xml.dom.minidom
import django.utils.html
from google.appengine import runtime
from google.appengine.ext import db
import config
import external_search
import full_text_search
import importer
import indexing
import model
import pfif
import simplejson
import subscribe
import utils
import xlrd
from model import Person, Note, ApiActionLog
from text_query import TextQuery
from utils import Struct
HARD_MAX_RESULTS = 200 # Clients can ask for more, but won't get more.
class InputFileError(Exception):
pass
def get_requested_formats(path):
"""Returns a list of requested formats.
The possible values are 'persons' and 'notes'."""
format = path.split('/')[-1]
if format in ['persons', 'notes']:
return [format]
return ['persons', 'notes']
def complete_record_ids(record, domain):
"""Ensures that a record's record_id fields are prefixed with a domain."""
def complete(record, field):
id = record.get(field)
if id and '/' not in id:
record[field] = '%s/%s' % (domain, id)
complete(record, 'person_record_id')
complete(record, 'note_record_id')
return record
def get_tag_params(handler):
"""Return HTML tag parameters used in import.html."""
return {
'begin_notes_template_link':
'<a href="%s/notes-template.xlsx">' %
django.utils.html.escape(handler.env.global_url),
'end_notes_template_link':
'</a>',
'begin_sample_anchor_tag':
'<a href="%s/sample-import.csv" target="_blank">' %
django.utils.html.escape(handler.env.global_url),
'end_sample_anchor_tag':
'</a>',
'begin_document_anchor_tag':
'<a href='
'"https://github.com/google/personfinder/wiki/ImportCSV" '
'target="_blank">',
'end_document_anchor_tag':
'</a>',
}
def generate_note_record_ids(records):
for record in records:
if not record.get('note_record_id', '').strip():
record['note_record_id'] = str(model.UniqueId.create_id())
yield record
def convert_time(text, offset):
"""Converts a textual date and time into an RFC 3339 UTC timestamp."""
if utils.DATETIME_RE.match(text.strip()): # don't apply offset
return text
match = re.search(r'(\d\d\d\d)[/-](\d+)[/-](\d+) *(\d+):(\d+)', text)
if match:
y, l, d, h, m = map(int, match.groups())
timestamp = calendar.timegm((y, l, d, h, m, 0)) - offset*3600
return utils.format_utc_timestamp(timestamp)
return text # keep the original text so it shows up in the error message
def convert_time_fields(rows, default_offset=0):
"""Filters CSV rows, converting time fields to RFC 3339 UTC times.
The first row that contains "person_record_id" is assumed to be the header
row containing field names. Preceding rows are treated as a preamble.
If the text "time_zone_offset" is found in the preamble section, the cell
immediately below it is treated as a time zone offset from UTC in hours.
Otherwise default_offset is used as the time zone offset.
Rows below the header row are returned as dictionaries (as csv.DictReader
would), except that any "*_date" fields are parsed as local times,
converted to UTC according to the specified offset, and reformatted
as RFC 3339 UTC times.
"""
field_names = []
time_fields = []
setting_names = []
settings = {}
offset = default_offset
for row in rows:
if field_names:
record = dict(zip(field_names, row))
for key in time_fields:
record[key] = convert_time(record[key], offset)
yield record
elif 'person_record_id' in row:
field_names = [name.lower().strip() for name in row]
time_fields = [name for name in row if name.endswith('_date')]
if 'time_zone_offset' in settings:
try:
offset = float(settings['time_zone_offset'])
except ValueError:
raise InputFileError('invalid time_zone_offset value')
else:
settings.update(dict(zip(setting_names, row)))
setting_names = [name.lower().strip() for name in row]
def convert_xsl_to_csv(contents):
"""Converts data in xsl (or xslx) format to CSV."""
try:
book = xlrd.open_workbook(file_contents=contents)
except xlrd.XLRDError as e:
return None, str(e)
except UnicodeDecodeError:
return None, 'The encoding of the file is unknown.'
if book.nsheets == 0:
return None, 'The uploaded file contains no sheets.'
sheet = book.sheet_by_index(0)
table = []
for row in xrange(sheet.nrows):
table_row = []
for col in xrange(sheet.ncols):
value = None
cell_value = sheet.cell_value(row, col)
cell_type = sheet.cell_type(row, col)
if cell_type == xlrd.XL_CELL_TEXT:
value = cell_value
elif cell_type == xlrd.XL_CELL_NUMBER:
value = str(int(cell_value))
elif cell_type == xlrd.XL_CELL_BOOLEAN:
value = 'true' if cell_value else 'false'
elif cell_type == xlrd.XL_CELL_DATE:
# TODO(ryok): support date type.
pass
table_row.append(value)
table.append(table_row)
csv_output = StringIO.StringIO()
csv_writer = csv.writer(csv_output)
csv_writer.writerows(table)
return csv_output.getvalue(), None
class Import(utils.BaseHandler):
https_required = True
def get(self):
self.render('import.html',
formats=get_requested_formats(self.env.path),
**get_tag_params(self))
def post(self):
if not (self.auth and self.auth.domain_write_permission):
# TODO(ryok): i18n
self.error(403, message='Missing or invalid authorization key.')
return
content = self.request.get('content')
if not content:
self.error(400, message='Please specify at least one CSV file.')
return
# Handle Excel sheets.
filename = self.request.POST['content'].filename
if re.search('\.xlsx?$', filename):
content, error = convert_xsl_to_csv(content)
if error:
self.response.set_status(400)
self.write(error)
return
try:
lines = content.splitlines() # handles \r, \n, or \r\n
if self.request.get('format') == 'notes':
self.import_notes(lines)
else:
self.import_persons(lines)
except InputFileError, e:
self.error(400, message='Problem in the uploaded file: %s' % e)
except runtime.DeadlineExceededError, e:
self.error(400, message=
'Sorry, the uploaded file is too large. Try splitting it into '
'smaller files (keeping the header rows in each file) and '
'uploading each part separately.')
def import_notes(self, lines):
source_domain = self.auth.domain_write_permission
records = importer.utf8_decoder(generate_note_record_ids(
convert_time_fields(csv.reader(lines))))
try:
records = [complete_record_ids(r, source_domain) for r in records]
except csv.Error, e:
self.error(400, message=
'The CSV file is formatted incorrectly. (%s)' % e)
return
notes_written, notes_skipped, notes_total = importer.import_records(
self.repo, source_domain, importer.create_note, records,
believed_dead_permission=self.auth.believed_dead_permission,
omit_duplicate_notes=True)
utils.log_api_action(self, ApiActionLog.WRITE,
0, notes_written, 0, len(notes_skipped))
self.render('import.html',
formats=get_requested_formats(self.env.path),
stats=[
Struct(type='Note',
written=notes_written,
skipped=notes_skipped,
total=notes_total)],
**get_tag_params(self))
def import_persons(self, lines):
# TODO(ryok): support non-UTF8 encodings.
source_domain = self.auth.domain_write_permission
records = importer.utf8_decoder(convert_time_fields(csv.reader(lines)))
try:
records = [complete_record_ids(r, source_domain) for r in records]
except csv.Error, e:
self.error(400, message=
'The CSV file is formatted incorrectly. (%s)' % e)
return
is_not_empty = lambda x: (x or '').strip()
persons = [r for r in records if is_not_empty(r.get('full_name'))]
notes = [r for r in records if is_not_empty(r.get('note_record_id'))]
people_written, people_skipped, people_total = importer.import_records(
self.repo, source_domain, importer.create_person, persons)
notes_written, notes_skipped, notes_total = importer.import_records(
self.repo, source_domain, importer.create_note, notes,
believed_dead_permission=self.auth.believed_dead_permission)
utils.log_api_action(self, ApiActionLog.WRITE,
people_written, notes_written,
len(people_skipped), len(notes_skipped))
self.render('import.html',
formats=get_requested_formats(self.env.path),
stats=[
Struct(type='Person',
written=people_written,
skipped=people_skipped,
total=people_total),
Struct(type='Note',
written=notes_written,
skipped=notes_skipped,
total=notes_total)],
**get_tag_params(self))
class Read(utils.BaseHandler):
https_required = True
def get(self):
if self.config.read_auth_key_required and not (
self.auth and self.auth.read_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
pfif_version = self.params.version
# Note that self.request.get can handle multiple IDs at once; we
# can consider adding support for multiple records later.
record_id = self.request.get('id')
if not record_id:
self.info(400, message='Missing id parameter', style='plain')
return
person = model.Person.get(
self.repo, record_id, filter_expired=False)
if not person:
self.info(
400,
message='No person record with ID %s' % record_id,
style='plain')
return
notes = model.Note.get_by_person_record_id(self.repo, record_id)
notes = [note for note in notes if not note.hidden]
self.response.headers['Content-Type'] = 'application/xml'
records = [pfif_version.person_to_dict(person, person.is_expired)]
note_records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
utils.optionally_filter_sensitive_fields(note_records, self.auth)
pfif_version.write_file(
self.response.out, records, lambda p: note_records)
utils.log_api_action(
self, ApiActionLog.READ, len(records), len(notes))
class Write(utils.BaseHandler):
https_required = True
def post(self):
if not (self.auth and self.auth.domain_write_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
source_domain = self.auth.domain_write_permission
try:
person_records, note_records = \
pfif.parse_file(self.request.body_file)
except Exception, e:
self.info(400, message='Invalid XML: %s' % e, style='plain')
return
mark_notes_reviewed = bool(self.auth.mark_notes_reviewed)
believed_dead_permission = bool(
self.auth.believed_dead_permission)
self.response.headers['Content-Type'] = 'application/xml'
self.write('<?xml version="1.0"?>\n')
self.write('<status:status>\n')
create_person = importer.create_person
num_people_written, people_skipped, total = importer.import_records(
self.repo, source_domain, create_person, person_records)
self.write_status(
'person', num_people_written, people_skipped, total,
'person_record_id')
create_note = importer.create_note
num_notes_written, notes_skipped, total = importer.import_records(
self.repo, source_domain, create_note, note_records,
mark_notes_reviewed, believed_dead_permission, self)
self.write_status(
'note', num_notes_written, notes_skipped, total, 'note_record_id')
self.write('</status:status>\n')
utils.log_api_action(self, ApiActionLog.WRITE,
num_people_written, num_notes_written,
len(people_skipped), len(notes_skipped))
def write_status(self, type, written, skipped, total, id_field):
"""Emit status information about the results of an attempted write."""
skipped_records = []
for error, record in skipped:
skipped_records.append(
' <pfif:%s>%s</pfif:%s>\n' %
(id_field, record.get(id_field, ''), id_field))
skipped_records.append(
' <status:error>%s</status:error>\n' % error)
self.write('''
<status:write>
<status:record_type>pfif:%s</status:record_type>
<status:parsed>%d</status:parsed>
<status:written>%d</status:written>
<status:skipped>
%s
</status:skipped>
</status:write>
''' % (type, total, written, ''.join(skipped_records).rstrip()))
class Search(utils.BaseHandler):
https_required = False
def get(self):
if self.config.search_auth_key_required and not (
self.auth and self.auth.search_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
pfif_version = self.params.version
# Retrieve parameters and do some sanity checks on them.
record_id = self.request.get('id')
query_string = self.request.get('q')
max_results = min(self.params.max_results or 100, HARD_MAX_RESULTS)
results = []
if record_id:
# Search by record ID (always returns just 1 result or nothing).
person = model.Person.get(self.repo, record_id)
if person:
results = [person]
elif query_string:
# Search by query words.
if self.config.external_search_backends:
query = TextQuery(query_string)
results = external_search.search(self.repo, query, max_results,
self.config.external_search_backends)
# External search backends are not always complete. Fall back to
# the original search when they fail or return no results.
if not results:
if config.get('enable_fulltext_search'):
results = full_text_search.search(
self.repo, query_string, max_results)
else:
results = indexing.search(
self.repo, TextQuery(query_string), max_results)
else:
self.info(
400,
message='Neither id nor q parameter specified',
style='plain')
records = [pfif_version.person_to_dict(result) for result in results]
utils.optionally_filter_sensitive_fields(records, self.auth)
# Define the function to retrieve notes for a person.
def get_notes_for_person(person):
notes = model.Note.get_by_person_record_id(
self.repo, person['person_record_id'])
notes = [note for note in notes if not note.hidden]
records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
return records
self.response.headers['Content-Type'] = 'application/xml'
pfif_version.write_file(
self.response.out, records, get_notes_for_person)
utils.log_api_action(self, ApiActionLog.SEARCH, len(records))
class Subscribe(utils.BaseHandler):
https_required = True
def post(self):
if not (self.auth and self.auth.subscribe_permission):
return self.error(403, 'Missing or invalid authorization key')
if not subscribe.is_email_valid(self.params.subscribe_email):
return self.error(400, 'Invalid email address')
person = model.Person.get(self.repo, self.params.id)
if not person:
return self.error(400, 'Invalid person_record_id')
subscription = subscribe.subscribe_to(self, self.repo, person,
self.params.subscribe_email,
self.env.lang)
utils.log_api_action(self, ApiActionLog.SUBSCRIBE)
if not subscription:
return self.info(200, 'Already subscribed')
return self.info(200, 'Successfully subscribed')
class Unsubscribe(utils.BaseHandler):
https_required = True
def post(self):
if not (self.auth and self.auth.subscribe_permission):
return self.error(403, 'Missing or invalid authorization key')
subscription = model.Subscription.get(self.repo, self.params.id,
self.params.subscribe_email)
self.response.set_status(200)
utils.log_api_action(self, ApiActionLog.UNSUBSCRIBE)
if subscription:
subscription.delete()
return self.info(200, 'Successfully unsubscribed')
return self.info(200, 'Not subscribed')
def fetch_all(query):
results = []
batch = query.fetch(500)
while batch:
results += batch
batch = query.with_cursor(query.cursor()).fetch(500)
return results
class Stats(utils.BaseHandler):
def get(self):
if not (self.auth and self.auth.stats_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
person_counts = model.Counter.get_all_counts(self.repo, 'person')
note_counts = model.Counter.get_all_counts(self.repo, 'note')
# unreviewed
note_counts['hidden=FALSE,reviewed=FALSE'] = len(fetch_all(
model.Note.all(keys_only=True
).filter('repo =', self.repo
).filter('reviewed =', False
).filter('hidden =', False
).order('-entry_date')))
# accepted
note_counts['hidden=FALSE,reviewed=TRUE'] = len(fetch_all(
model.Note.all(keys_only=True
).filter('repo =', self.repo
).filter('reviewed =', True
).filter('hidden =', False
).order('-entry_date')))
# flagged
note_counts['hidden=TRUE'] = len(fetch_all(
model.Note.all(keys_only=True
).filter('repo =', self.repo
).filter('hidden =', True
).order('-entry_date')))
self.response.headers['Content-Type'] = 'application/json'
self.write(simplejson.dumps({'person': person_counts,
'note': note_counts}))
class HandleSMS(utils.BaseHandler):
https_required = True
repo_required = False
MAX_RESULTS = 3
def post(self):
if not (self.auth and self.auth.search_permission
and self.auth.domain_write_permission == '*'):
self.info(
403,
message=
'"key" URL parameter is either missing, invalid or '
'lacks required permissions. The key\'s repo must be "*", '
'search_permission must be True, and it must have write '
'permission.',
style='plain')
return
body = self.request.body_file.read()
doc = xml.dom.minidom.parseString(body)
message_text = self.get_element_text(doc, 'message_text')
receiver_phone_number = self.get_element_text(
doc, 'receiver_phone_number')
if message_text is None:
self.info(
400,
message='message_text element is required.',
style='plain')
return
if receiver_phone_number is None:
self.info(
400,
message='receiver_phone_number element is required.',
style='plain')
return
repo = (
self.config.sms_number_to_repo and
self.config.sms_number_to_repo.get(receiver_phone_number))
if not repo:
self.info(
400,
message=
'The given receiver_phone_number is not found in '
'sms_number_to_repo config.',
style='plain')
return
responses = []
search_m = re.search(r'^search\s+(.+)$', message_text.strip(), re.I)
add_self_m = re.search(r'^i am\s+(.+)$', message_text.strip(), re.I)
if search_m:
query_string = search_m.group(1).strip()
query = TextQuery(query_string)
persons = indexing.search(repo, query, HandleSMS.MAX_RESULTS)
if persons:
for person in persons:
responses.append(self.render_person(person))
else:
responses.append('No results found for: %s' % query_string)
responses.append(
'More at: google.org/personfinder/%s?ui=light' % repo)
responses.append(
'All data entered in Person Finder is available to the public '
'and usable by anyone. Google does not review or verify the '
'accuracy of this data google.org/personfinder/global/tos.html')
elif self.config.enable_sms_record_input and add_self_m:
name_string = add_self_m.group(1).strip()
person = Person.create_original(
repo,
entry_date=utils.get_utcnow(),
full_name=name_string,
family_name='',
given_name='')
person.update_index(['old', 'new'])
note = Note.create_original(
repo,
entry_date=utils.get_utcnow(),
source_date=utils.get_utcnow(),
person_record_id=person.record_id,
author_name=name_string,
author_made_contact=True,
status='is_note_author',
text=message_text)
db.put(note)
model.UserActionLog.put_new('add', note, copy_properties=False)
person.update_from_note(note)
db.put(person)
model.UserActionLog.put_new('add', person, copy_properties=False)
responses.append('Added record for found person: %s' % name_string)
else:
usage_str = 'Usage: "Search John"'
if self.config.enable_sms_record_input:
usage_str += ' OR "I am John"'
responses.append(usage_str)
self.response.headers['Content-Type'] = 'application/xml'
self.write(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<response>\n'
' <message_text>%s</message_text>\n'
'</response>\n'
% django.utils.html.escape(' ## '.join(responses)))
def render_person(self, person):
fields = []
fields.append(person.full_name)
if person.latest_status:
# The result of utils.get_person_status_text() may be a Django's
# proxy object for lazy translation. Use unicode() to convert it
# into a unicode object. We must not specify an encoding for
# unicode() in this case.
fields.append(unicode(
utils.get_person_status_text(person)))
if person.sex: fields.append(person.sex)
if person.age: fields.append(person.age)
if person.home_city or person.home_state:
fields.append(
'From: ' +
' '.join(filter(None, [person.home_city, person.home_state])))
return ' / '.join(fields)
def get_element_text(self, doc, tag_name):
elems = doc.getElementsByTagName(tag_name)
if elems:
text = u''
for node in elems[0].childNodes:
if node.nodeType == node.TEXT_NODE:
text += node.data
return text.encode('utf-8')
else:
return None
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cairis.core.ARM import *
from cairis.daemon.CairisHTTPError import ARMHTTPError, MalformedJSONHTTPError, ObjectNotFoundHTTPError, MissingParameterHTTPError
from cairis.core.Environment import Environment
from cairis.core.EnvironmentParameters import EnvironmentParameters
from cairis.data.CairisDAO import CairisDAO
from cairis.tools.JsonConverter import json_serialize, json_deserialize
from cairis.tools.ModelDefinitions import EnvironmentModel
from cairis.tools.PseudoClasses import EnvironmentTensionModel
from cairis.tools.SessionValidator import check_required_keys
__author__ = 'Robin Quetin, Shamal Faily'
class EnvironmentDAO(CairisDAO):
def __init__(self, session_id):
CairisDAO.__init__(self, session_id, 'environment')
def get_objects(self, constraint_id=-1):
try:
environments = self.db_proxy.getEnvironments(constraint_id)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
envKeys = sorted(environments.keys())
envList = []
for key in envKeys:
value = environments[key]
envList.append(self.simplify(value))
return envList
def get_object_by_name(self, name, simplify=True):
found_environment = None
try:
environments = self.db_proxy.getEnvironments()
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
if environments is not None:
found_environment = environments.get(name)
if found_environment is None:
self.close()
raise ObjectNotFoundHTTPError('The provided environment name')
if simplify:
found_environment = self.simplify(found_environment)
return found_environment
def get_environment_names(self,pathValues = []):
try:
environment_names = self.db_proxy.getEnvironmentNames()
return environment_names
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_environment_names_by_vulnerability_threat(self, vulnerability_name, threat_name, pathValues = []):
return self.get_environment_names_by_threat_vulnerability(threat_name, vulnerability_name, pathValues)
def get_environment_names_by_threat_vulnerability(self, threat_name, vulnerability_name, pathValues = []):
try:
environments = self.db_proxy.riskEnvironments(threat_name, vulnerability_name)
return environments
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def get_environment_names_by_risk(self, risk_name, pathValues = []):
try:
environments = self.db_proxy.riskEnvironmentsByRisk(risk_name)
return environments
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def add_object(self, environment):
env_params = self.to_environment_parameters(environment)
try:
if not self.check_existing_environment(environment.theName):
self.db_proxy.addEnvironment(env_params)
else:
self.close()
raise DatabaseProxyException('Environment name already exists within the database.')
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def update_object(self, environment, name):
env_params = self.to_environment_parameters(environment)
try:
envId = self.db_proxy.getDimensionId(name,'environment')
env_params.setId(envId)
self.db_proxy.updateEnvironment(env_params)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def delete_object(self, name):
try:
envId = self.db_proxy.getDimensionId(name,'environment')
self.db_proxy.deleteEnvironment(envId)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
self.close()
raise ARMHTTPError(ex)
def check_existing_environment(self, environment_name):
try:
self.db_proxy.nameCheck(environment_name, 'environment')
return False
except DatabaseProxyException as ex:
if str(ex.value).find(' already exists') > -1:
return True
else:
self.close()
raise ARMHTTPError(ex)
except ARMException as ex:
if str(ex.value).find(' already exists') > -1:
return True
else:
self.close()
raise ARMHTTPError(ex)
def to_environment_parameters(self, environment):
assert isinstance(environment, Environment)
env_params = EnvironmentParameters(
conName=environment.theName,
conSc=environment.theShortCode,
conDesc=environment.theDescription,
environments=environment.theEnvironments,
duplProperty=environment.theDuplicateProperty,
overridingEnvironment=environment.theOverridingEnvironment,
envTensions=environment.theTensions
)
return env_params
def from_json(self, request):
json = request.get_json(silent=True)
if json is False or json is None:
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
json_dict = json['object']
assert isinstance(json_dict, dict)
check_required_keys(json_dict, EnvironmentModel.required)
json_dict['__python_obj__'] = Environment.__module__+'.'+Environment.__name__
if 'theTensions' in json_dict:
assert isinstance(json_dict['theTensions'], list)
tensions = json_dict['theTensions']
json_dict['theTensions'] = {}
for tension in tensions:
check_required_keys(tension, EnvironmentTensionModel.required)
key = tuple([tension['base_attr_id'], tension['attr_id']])
value = tuple([tension['value'], tension['rationale']])
json_dict['theTensions'][key] = value
json_dict['theId'] = -1
new_json_environment = json_serialize(json_dict)
environment = json_deserialize(new_json_environment)
if not isinstance(environment, Environment):
self.close()
raise MalformedJSONHTTPError(data=request.get_data())
else:
return environment
def simplify(self, obj):
assert isinstance(obj, Environment)
del obj.theId
the_tensions = obj.theTensions
assert isinstance(the_tensions, dict)
obj.theTensions = []
for key, value in list(the_tensions.items()):
obj.theTensions.append(EnvironmentTensionModel(key=key, value=value))
return obj
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import life_event
from google.ads.googleads.v9.services.types import life_event_service
from .base import LifeEventServiceTransport, DEFAULT_CLIENT_INFO
class LifeEventServiceGrpcTransport(LifeEventServiceTransport):
"""gRPC backend transport for LifeEventService.
Service to fetch Google Ads Life Events.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_life_event(
self,
) -> Callable[
[life_event_service.GetLifeEventRequest], life_event.LifeEvent
]:
r"""Return a callable for the get life event method over gRPC.
Returns the requested life event in full detail.
Returns:
Callable[[~.GetLifeEventRequest],
~.LifeEvent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_life_event" not in self._stubs:
self._stubs["get_life_event"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.LifeEventService/GetLifeEvent",
request_serializer=life_event_service.GetLifeEventRequest.serialize,
response_deserializer=life_event.LifeEvent.deserialize,
)
return self._stubs["get_life_event"]
__all__ = ("LifeEventServiceGrpcTransport",)
| |
#(C) Copyright Syd Logan 2020
#(C) Copyright Thousand Smiles Foundation 2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from rest_framework.views import APIView
from rest_framework.exceptions import APIException, NotFound
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from clinic.models import *
from patient.models import *
from dentalstate.models import *
from datetime import *
from django.core import serializers
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, HttpResponseServerError, HttpResponseNotFound
from common.decorators import *
import traceback
import sys
import json
import logging
LOG = logging.getLogger("tscharts")
class DentalStateView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
integerFields = [
"tooth",
]
booleanFields = [
]
textFields = [
"username",
"comment",
]
stateFields = [
"state",
]
locationFields = [
"location",
]
surfaceFields = [
"surface",
]
def stateToString(self, val):
ret = None
for x in DentalState.DENTAL_STATE_CHOICES:
if x[0] == val:
ret = x[1]
break
return ret
def stringToState(self, val):
ret = None
for x in DentalState.DENTAL_STATE_CHOICES:
if x[1] == val:
ret = x[0]
break
return ret
def locationToString(self, val):
ret = None
for x in DentalState.DENTAL_LOCATION_CHOICES:
if x[0] == val:
ret = x[1]
break
return ret
def stringToLocation(self, val):
ret = None
for x in DentalState.DENTAL_LOCATION_CHOICES:
if x[1] == val:
ret = x[0]
break
return ret
def surfaceToString(self, val):
ret = None
for x in DentalState.DENTAL_SURFACE_CHOICES:
if x[0] == val:
ret = x[1]
break
return ret
def surfaceToCSV(self, val):
ret = None
s = ""
val = "".join(val.split())
for x in val:
v = self.surfaceToString(x)
if v == None:
s = None
break
if s and len(s) > 0:
s += ","
s += v
ret = s
return ret
def stringToSurface(self, val):
ret = None
for x in DentalState.DENTAL_SURFACE_CHOICES:
if x[1] == val:
ret = x[0]
break
return ret
def CSVToSurface(self, val):
ret = None
s = ""
x = val.split(',')
for y in x:
y = "".join(y.split())
v = self.stringToSurface(y)
if v == None:
s = None
break
s += v
ret = s
return ret
def stringToBoolean(self, val):
ret = None
if val == "true" or val == "True":
ret = True
elif val == "false" or val == "False":
ret = False
return ret
def booleanToString(self, val):
ret = None
if val == True:
ret = "true"
elif val == False:
ret = "false"
return ret
def serialize(self, entry):
m = {}
m["id"] = entry.id
m["clinic"] = entry.clinic_id
m["patient"] = entry.patient_id
m["username"] = entry.username
m["time"] = entry.time
m["tooth"] = entry.tooth
m["code"] = entry.code_id
m["state"] = self.stateToString(entry.state)
m["location"] = self.locationToString(entry.location)
m["surface"] = self.surfaceToCSV(entry.surface)
m["comment"] = entry.comment
return m
@log_request
def get(self, request, dental_state_id=None, format=None):
dental_state = None
badRequest = False
aPatient = None
aClinic = None
aCode = None
kwargs = {}
if dental_state_id:
try:
dental_state = DentalState.objects.get(id = dental_state_id)
except:
dental_state = None
else:
# look for optional arguments
try:
patientid = request.GET.get('patient', '')
if patientid != '':
try:
aPatient = Patient.objects.get(id=patientid)
if not aPatient:
badRequest = True
else:
kwargs["patient"] = aPatient
except:
badRequest = True
except:
pass # no patient ID
try:
clinicid = request.GET.get('clinic', '')
if clinicid != '':
try:
aClinic = Clinic.objects.get(id=clinicid)
if not aClinic:
badRequest = True
else:
kwargs["clinic"] = aClinic
except:
badRequest = True
except:
pass # no clinic ID
try:
codeid = request.GET.get('code', '')
if codeid != '':
try:
aCode = DentalCDT.objects.get(id=codeid)
if not aCode:
badRequest = True
else:
kwargs["code"] = aCode
except:
badRequest = True
except:
pass # no code ID
for x in self.locationFields:
try:
val = request.GET.get(x, '')
if val != '':
val = self.stringToLocation(val)
if val == None:
badRequest = True
else:
kwargs[x] = val
except:
pass
for x in self.stateFields:
try:
val = request.GET.get(x, '')
if val != '':
val = self.stringToState(val)
if val == None:
badRequest = True
else:
kwargs[x] = val
except:
pass
for x in self.surfaceFields:
try:
val = request.GET.get(x, '')
if val != '':
val = self.stringToSurface(val)
if val == None:
badRequest = True
else:
kwargs[x] = val
except:
pass
for x in self.integerFields:
try:
val = request.GET.get(x, '')
if val != '':
val = int(val)
if val == None:
badRequest = True
else:
kwargs[x] = val
except:
pass
for x in self.textFields:
try:
val = request.GET.get(x, '')
if val != '':
if x == "comment":
kwargs["comment__icontains"] = val
elif x == "username":
kwargs["username__icontains"] = val
except:
pass
if not badRequest:
try:
dental_state = DentalState.objects.filter(**kwargs)
except:
dental_state = None
if not dental_state and not badRequest:
raise NotFound
elif not badRequest:
if dental_state_id:
ret = self.serialize(dental_state)
else:
ret = []
for x in dental_state:
m = self.serialize(x)
ret.append(m)
if badRequest:
return HttpResponseBadRequest()
else:
return Response(ret)
def validatePostArgs(self, data):
valid = True
kwargs = {}
kwargs = data
for k, v in data.items():
if not k in self.locationFields and not k in self.surfaceFields and not k in self.stateFields and not k in self.booleanFields and not k in self.textFields and not k in self.integerFields and k != "patient" and k != "clinic" and k != "code":
valid = False
LOG.warning("validatePostArgs: Failed to validate key {} value {}".format(k, v))
break
try:
val = self.stringToState(data["state"])
if val == None:
LOG.warning("validatePostArgs: Failed to validate key state val {}".format(data["state"]))
valid = False
else:
kwargs["state"] = val
except:
LOG.warning("validatePostArgs: Failed to locate key {}: {}".format("state", sys.exc_info()[0]))
valid = False
try:
val = self.stringToLocation(data["location"])
if val == None:
LOG.warning("validatePostArgs: Failed to validate key location val {}".format(data["location"]))
valid = False
else:
kwargs["location"] = val
except:
LOG.warning("validatePostArgs: Failed to locate key {}: {}".format("location", sys.exc_info()[0]))
valid = False
try:
val = self.CSVToSurface(data["surface"])
if val == None:
LOG.warning("validatePostArgs: Failed to validate key surface val {}".format(data["surface"]))
valid = False
else:
kwargs["surface"] = val
except:
LOG.warning("validatePostArgs: Failed to locate key {}: {}".format("surface", sys.exc_info()[0]))
valid = False
try:
if not ("username" in data and len(data["username"]) > 0):
valid = False
LOG.warning("validatePostArgs: Failed to validate key username")
else:
kwargs["username"] = data["username"]
except:
LOG.warning("validatePostArgs: Exception: Failed to validate key username")
valid = False
for x in self.booleanFields:
try:
val = self.stringToBoolean(data[x])
if val == None:
LOG.warning("validatePostArgs: Failed to validate key x {} val {}".format(x, data[x]))
valid = False
break
else:
kwargs[x] = val
except:
LOG.warning("validatePostArgs: Failed to locate key x {}".format(x))
valid = False
for x in self.integerFields:
try:
LOG.warning("validatePostArgs: validating key x {} val {}".format(x, data[x]))
val = int(data[x])
if val == None:
LOG.warning("validatePostArgs: Failed to validate key x {} val {}".format(x, data[x]))
valid = False
break
else:
kwargs[x] = val
except:
LOG.warning("validatePostArgs: Failed to locate key x {} {}".format(x, sys.exc_info()[0]))
valid = False
for x in self.textFields:
try:
val = str(data[x])
if val == False:
LOG.warning("validatePostArgs: Failed to validate key x {} val {}".format(x, data[x]))
valid = False
break
else:
kwargs[x] = data[x]
except:
LOG.warning("validatePostArgs: Failed to locate key x {}".format(x))
valid = False
return valid, kwargs
def validatePutArgs(self, data, dental_state):
valid = True
found = False
# first check to see if we have at least one item, and what
# we have is paired with a valid value
for k, v in data.items():
if k in self.stateFields:
found = True
try:
z = self.stringToState(v)
if z == None:
LOG.warning("validatePutArgs: invalid k {} v {}".format(k, v))
valid = False
except:
LOG.warning("validatePutArgs: exception invalid k {} v {}".format(k, v))
valid = False
elif k in self.surfaceFields:
found = True
try:
z = self.CSVToSurface(v)
if z == None:
LOG.warning("validatePutArgs: invalid k {} v {}".format(k, v))
valid = False
except:
LOG.warning("validatePutArgs: exception invalid k {} v {}".format(k, v))
valid = False
elif k in self.locationFields:
found = True
try:
z = self.stringToLocation(v)
if z == None:
LOG.warning("validatePutArgs: invalid k {} v {}".format(k, v))
valid = False
except:
LOG.warning("validatePutArgs: exception invalid k {} v {}".format(k, v))
valid = False
elif k in self.booleanFields:
found = True
try:
z = self.stringToBoolean(v)
if z == None:
LOG.warning("validatePutArgs: invalid k {} v {}".format(k, v))
valid = False
except:
LOG.warning("validatePutArgs: exception invalid k {} v {}".format(k, v))
valid = False
elif k in self.integerFields:
found = True
try:
z = int(v)
if z == None:
LOG.warning("validatePutArgs: invalid k {} v {}".format(k, v))
valid = False
except:
LOG.warning("validatePutArgs: exception invalid k {} v {}".format(k, v))
valid = False
elif k in self.textFields:
found = True
try:
x = str(v)
if x == None:
LOG.warning("validatePutArgs: invalid text field k {} v {}".format(k, v))
valid = False
except:
LOG.warning("validatePutArgs: exception invalid text field k {} v {}".format(k, v))
valid = False
elif k in ["clinic", "patient", "id", "code"]:
found = True
else:
LOG.warning("validatePutArgs: unknown key k {} v {}".format(k, v))
valid = False # unknown key
# now, build up the dental state object
if found == True and valid == True:
for k, v in data.items():
LOG.warning("validatePutArgs: bottom loop k {} v {}".format(k, v))
if k == "tooth":
dental_state.tooth = int(v)
elif k == "state":
dental_state.state = self.stringToState(v)
elif k == "location":
dental_state.location = self.stringToLocation(v)
elif k == "surface":
dental_state.surface = self.CSVToSurface(v)
elif k == "username":
dental_state.username = str(v)
elif k == "comment":
dental_state.comment = str(v)
try:
if "clinic" in data:
aClinic = Clinic.objects.get(id=int(data["clinic"]))
dental_state.clinic = aClinic
except:
LOG.warning("validatePutArgs: invalid clinic {}".format(data["clinic"]))
valid = False
try:
if "patient" in data:
aPatient = Patient.objects.get(id=int(data["patient"]))
dental_state.patient = aPatient
except:
LOG.warning("validatePutArgs: invalid patient {}".format(data["patient"]))
valid = False
try:
if "code" in data:
aCode = DentalCDT.objects.get(id=int(data["code"]))
dental_state.code = aCode
except:
LOG.warning("validatePutArgs: invalid patient {}".format(data["patient"]))
valid = False
return valid, dental_state
@log_request
def post(self, request, format=None):
badRequest = False
implError = False
implMsg = ""
data = json.loads(request.body)
try:
patientid = int(data["patient"])
except:
badRequest = True
try:
clinicid = int(data["clinic"])
except:
badRequest = True
try:
codeid = int(data["code"])
except:
badRequest = True
# validate the post data, and get a kwargs dict for
# creating the object
valid, kwargs = self.validatePostArgs(data)
if not valid:
LOG.warning("post: Failed to validate!!")
badRequest = True
if not badRequest and not implError:
# get the instances
try:
aPatient = Patient.objects.get(id=patientid)
except:
aPatient = None
try:
aClinic = Clinic.objects.get(id=clinicid)
except:
aClinic = None
try:
aCode = DentalCDT.objects.get(id=codeid)
except:
aCode = None
if not aPatient or not aClinic or not aCode:
raise NotFound
if not badRequest and not implError:
try:
kwargs["patient"] = aPatient
kwargs["clinic"] = aClinic
kwargs["code"] = aCode
dental_state = DentalState(**kwargs)
if dental_state:
dental_state.save()
else:
LOG.warning("post: unable to create DentalState object!!")
badRequest = True
except Exception as e:
badRequest = True
LOG.warning("post: exception!! {}".format(traceback.format_exc()))
implMsg = sys.exc_info()[0]
if badRequest:
return HttpResponseBadRequest()
if implError:
return HttpResponseServerError(implMsg)
else:
return Response({'id': dental_state.id})
@log_request
def put(self, request, dental_state_id=None, format=None):
badRequest = False
implError = False
notFound = False
if not dental_state_id:
LOG.warning("put: missing ID arg")
badRequest = True
if not badRequest:
dental_state = None
try:
dental_state = DentalState.objects.get(id=dental_state_id)
except:
LOG.warning("DentalState put exception!!")
if not dental_state:
notFound = True
else:
try:
data = json.loads(request.body)
valid, dental_state = self.validatePutArgs(data, dental_state)
if valid:
dental_state.save()
else:
LOG.warning("put: validate put args failed")
badRequest = True
except:
implError = True
implMsg = sys.exc_info()[0]
LOG.warning("DentalState exception {}".format(implMsg))
if badRequest:
return HttpResponseBadRequest()
if notFound:
return HttpResponseNotFound()
if implError:
return HttpResponseServerError(implMsg)
else:
return Response({})
@log_request
def delete(self, request, dental_state_id=None, format=None):
dental_state = None
# see if the object exists
if not dental_state_id:
return HttpResponseBadRequest()
try:
dental_state = DentalState.objects.get(id=dental_state_id)
except:
dental_state = None
if not dental_state:
raise NotFound
else:
dental_state.delete()
return Response({})
| |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import logging
import sys
import threading
import time
import traceback
import bson
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.util import retry_until_ok
from pymongo import MongoClient
import pymongo
try:
import Queue as queue
except ImportError:
import queue
class OplogThread(threading.Thread):
"""OplogThread gathers the updates for a single oplog.
"""
def __init__(self, primary_conn, main_address, oplog_coll, is_sharded,
doc_manager, oplog_progress, namespace_set, auth_key,
auth_username, repl_set=None, collection_dump=True,
batch_size=DEFAULT_BATCH_SIZE, fields=None,
dest_mapping={}, continue_on_error=False, oplog_name=None):
"""Initialize the oplog thread.
"""
super(OplogThread, self).__init__()
self.batch_size = batch_size
# The connection to the primary for this replicaSet.
self.primary_connection = primary_conn
# Boolean chooses whether to dump the entire collection if no timestamp
# is present in the config file
self.collection_dump = collection_dump
# The mongos for sharded setups
# Otherwise the same as primary_connection.
# The value is set later on.
self.main_connection = None
# The connection to the oplog collection
self.oplog = oplog_coll
self.oplog_name = oplog_name or str(self.oplog)
# Boolean describing whether the cluster is sharded or not
self.is_sharded = is_sharded
# A document manager for each target system.
# These are the same for all threads.
if type(doc_manager) == list:
self.doc_managers = doc_manager
else:
self.doc_managers = [doc_manager]
# Boolean describing whether or not the thread is running.
self.running = True
# A dictionary that stores OplogThread/timestamp pairs.
# Represents the last checkpoint for a OplogThread.
self.checkpoint = oplog_progress
# The set of namespaces to process from the mongo cluster.
self.namespace_set = namespace_set
# The dict of source namespaces to destination namespaces
self.dest_mapping = dest_mapping
# Whether the collection dump gracefully handles exceptions
self.continue_on_error = continue_on_error
# If authentication is used, this is an admin password.
self.auth_key = auth_key
# This is the username used for authentication.
self.auth_username = auth_username
# Set of fields to export
self.fields = fields
logging.info('OplogThread: Initializing oplog thread')
if is_sharded:
self.main_connection = MongoClient(main_address)
else:
self.main_connection = MongoClient(main_address,
replicaSet=repl_set)
self.oplog = self.main_connection['local']['oplog.rs']
if auth_key is not None:
# Authenticate for the whole system
self.primary_connection['admin'].authenticate(
auth_username, auth_key)
self.main_connection['admin'].authenticate(
auth_username, auth_key)
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
logging.warning('%s %s' % (err_msg, self.primary_connection))
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, value):
if value:
self._fields = set(value)
# Always include _id field
self._fields.add('_id')
else:
self._fields = None
def run(self):
"""Start the oplog worker.
"""
logging.debug("OplogThread: Run thread started")
while self.running is True:
logging.debug("OplogThread: Getting cursor")
cursor, cursor_len = self.init_cursor()
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_len == 0:
logging.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
logging.debug("OplogThread: Got the cursor, count is %d"
% cursor_len)
last_ts = None
err = False
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
logging.debug("OplogThread: about to process new oplog "
"entries")
while cursor.alive and self.running:
logging.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
logging.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
# Break out if this thread should stop
if not self.running:
break
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
continue
# Take fields out of the oplog entry that
# shouldn't be replicated. This may nullify
# the document if there's nothing to do.
if not self.filter_oplog_entry(entry):
continue
# sync the current oplog operation
operation = entry['op']
ns = entry['ns']
if '.' not in ns:
continue
coll = ns.split('.', 1)[1]
if coll.startswith("system."):
continue
# use namespace mapping if one exists
ns = self.dest_mapping.get(entry['ns'], ns)
for docman in self.doc_managers:
try:
logging.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
# Remove
if operation == 'd':
entry['_id'] = entry['o']['_id']
entry['ns'] = ns
entry['_ts'] = util.bson_ts_to_long(
entry['ts'])
docman.remove(entry)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
doc['_ts'] = util.bson_ts_to_long(
entry['ts'])
doc['ns'] = ns
docman.upsert(doc)
upsert_inc += 1
# Update
elif operation == 'u':
doc = {"_id": entry['o2']['_id'],
"_ts": util.bson_ts_to_long(
entry['ts']),
"ns": ns}
# 'o' field contains the update spec
docman.update(doc, entry.get('o', {}))
update_inc += 1
except errors.OperationFailed:
logging.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
logging.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
logging.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
logging.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1 and last_ts is not None:
self.checkpoint = last_ts
# update timestamp after running through oplog
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after"
"processing new oplog entries")
self.checkpoint = last_ts
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
logging.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
err = True
if err is True and self.auth_key is not None:
self.primary_connection['admin'].authenticate(
self.auth_username, self.auth_key)
self.main_connection['admin'].authenticate(
self.auth_username, self.auth_key)
err = False
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.checkpoint = last_ts
logging.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
logging.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
def filter_oplog_entry(self, entry):
"""Remove fields from an oplog entry that should not be replicated."""
if not self._fields:
return entry
def pop_excluded_fields(doc):
for key in set(doc) - self._fields:
doc.pop(key)
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
pop_excluded_fields(entry['o'])
# 'u' indicates an update. 'o' field is the update spec.
elif entry['op'] == 'u':
pop_excluded_fields(entry['o'].get("$set", {}))
pop_excluded_fields(entry['o'].get("$unset", {}))
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry['o'] and not entry['o']['$set']:
entry['o'].pop("$set")
if "$unset" in entry['o'] and not entry['o']['$unset']:
entry['o'].pop("$unset")
if not entry['o']:
return None
return entry
def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, filtering
entries not in the namespace set.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {}
if self.namespace_set:
query['ns'] = {'$in': self.namespace_set}
if timestamp is None:
cursor = self.oplog.find(
query,
tailable=True, await_data=True)
else:
query['ts'] = {'$gte': timestamp}
cursor = self.oplog.find(
query, tailable=True, await_data=True)
# Applying 8 as the mask to the cursor enables OplogReplay
cursor.add_option(8)
return cursor
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
dump_set = self.namespace_set or []
logging.debug("OplogThread: Dumping set of collections %s " % dump_set)
# no namespaces specified
if not self.namespace_set:
db_list = retry_until_ok(self.main_connection.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.main_connection[database].collection_names)
for coll in coll_list:
if coll.startswith("system"):
continue
namespace = "%s.%s" % (database, coll)
dump_set.append(namespace)
timestamp = util.retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
def docs_to_dump():
for namespace in dump_set:
logging.info("OplogThread: dumping collection %s"
% namespace)
database, coll = namespace.split('.', 1)
last_id = None
attempts = 0
# Loop to handle possible AutoReconnect
while attempts < 60:
target_coll = self.main_connection[database][coll]
if not last_id:
cursor = util.retry_until_ok(
target_coll.find,
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = util.retry_until_ok(
target_coll.find,
{"_id": {"$gt": last_id}},
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
raise StopIteration
doc["ns"] = self.dest_mapping.get(
namespace, namespace)
doc["_ts"] = long_ts
last_id = doc["_id"]
yield doc
break
except pymongo.errors.AutoReconnect:
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_inserted = 0
num_failed = 0
for num, doc in enumerate(docs_to_dump()):
if num % 10000 == 0:
logging.debug("Upserted %d docs." % num)
try:
dm.upsert(doc)
num_inserted += 1
except Exception:
if self.continue_on_error:
logging.exception(
"Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
logging.debug("Upserted %d docs" % num_inserted)
if num_failed > 0:
logging.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
dm.bulk_upsert(docs_to_dump())
except Exception as e:
if self.continue_on_error:
logging.exception("OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially")
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
# Bulk upsert if possible
if hasattr(dm, "bulk_upsert"):
logging.debug("OplogThread: Using bulk upsert function for "
"collection dump")
upsert_all(dm)
else:
logging.debug(
"OplogThread: DocManager %s has no "
"bulk_upsert method. Upserting documents "
"serially for collection dump." % str(dm))
upsert_each(dm)
except:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
klass, value, trace = errors.get_nowait()
dump_success = False
traceback.print_exception(klass, value, trace)
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
return timestamp
def get_last_oplog_timestamp(self):
"""Return the timestamp of the latest entry in the oplog.
"""
if not self.namespace_set:
curr = self.oplog.find().sort(
'$natural', pymongo.DESCENDING
).limit(1)
else:
curr = self.oplog.find(
{'ns': {'$in': self.namespace_set}}
).sort('$natural', pymongo.DESCENDING).limit(1)
if curr.count(with_limit_and_skip=True) == 0:
return None
logging.debug("OplogThread: Last oplog entry has timestamp %d."
% curr[0]['ts'].time)
return curr[0]['ts']
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
Returns the cursor and the number of documents left in the cursor.
"""
timestamp = self.checkpoint
if self.checkpoint is None:
if self.collection_dump:
# dump collection and update checkpoint
timestamp = self.dump_collection()
if timestamp is None:
return None, 0
else:
# Collection dump disabled:
# return cursor to beginning of oplog.
cursor = self.get_oplog_cursor()
self.checkpoint = self.get_last_oplog_timestamp()
return cursor, retry_until_ok(cursor.count)
for i in range(60):
cursor = self.get_oplog_cursor(timestamp)
cursor_len = retry_until_ok(cursor.count)
if cursor_len == 0:
# rollback, update checkpoint, and retry
logging.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
self.checkpoint = self.rollback()
return self.init_cursor()
# try to get the first oplog entry
try:
first_oplog_entry = next(cursor)
except StopIteration:
# It's possible for the cursor to become invalid
# between the cursor.count() call and now
time.sleep(1)
continue
# first entry should be last oplog entry processed
cursor_ts_long = util.bson_ts_to_long(
first_oplog_entry.get("ts"))
given_ts_long = util.bson_ts_to_long(timestamp)
if cursor_ts_long > given_ts_long:
# first entry in oplog is beyond timestamp
# we've fallen behind
return None, 0
# first entry has been consumed
return cursor, cursor_len - 1
else:
raise errors.MongoConnectorError(
"Could not initialize oplog cursor.")
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
logging.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}},
sort=[('$natural', pymongo.DESCENDING)]
)
logging.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = namespace
for source_name, dest_name in self.dest_mapping.items():
if dest_name == namespace:
original_namespace = source_name
database, coll = original_namespace.split('.', 1)
bson_obj_id_list = [doc['_id'] for doc in doc_list]
to_update = util.retry_until_ok(
self.main_connection[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
fields=self._fields
)
# doc list are docs in target system, to_update are
# docs in mongo
doc_hash = {doc['_id']: doc for doc in doc_list} # hash by _id
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
# delete the inconsistent documents
logging.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for doc in doc_hash.values():
try:
dm.remove(doc)
remov_inc += 1
logging.debug("OplogThread: Rollback, removed %s " %
str(doc))
except errors.OperationFailed:
logging.warning(
"Could not delete document during rollback: %s "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % str(doc)
)
logging.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
# insert the ones from mongo
logging.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
doc['_ts'] = util.bson_ts_to_long(rollback_cutoff_ts)
doc['ns'] = self.dest_mapping.get(namespace, namespace)
try:
insert_inc += 1
dm.upsert(doc)
except errors.OperationFailed as e:
fail_insert_inc += 1
logging.error("OplogThread: Rollback, Unable to "
"insert %s with exception %s"
% (doc, str(e)))
logging.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
| |
# Copyright 2012 NetApp
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Share driver module."""
import time
import ddt
import mock
from mock import PropertyMock
from manila import exception
from manila import network
from manila.share import configuration
from manila.share import driver
from manila import test
from manila.tests import utils as test_utils
from manila import utils
def fake_execute_with_raise(*cmd, **kwargs):
raise exception.ProcessExecutionError
def fake_sleep(duration):
pass
class ShareDriverWithExecuteMixin(driver.ShareDriver, driver.ExecuteMixin):
pass
@ddt.ddt
class ShareDriverTestCase(test.TestCase):
_SNAPSHOT_METHOD_NAMES = ["create_snapshot", "delete_snapshot"]
def setUp(self):
super(ShareDriverTestCase, self).setUp()
self.utils = utils
self.mock_object(self.utils, 'execute', fake_execute_with_raise)
self.time = time
self.mock_object(self.time, 'sleep', fake_sleep)
driver.CONF.set_default('driver_handles_share_servers', True)
def test__try_execute(self):
execute_mixin = ShareDriverWithExecuteMixin(
True, configuration=configuration.Configuration(None))
self.assertRaises(exception.ProcessExecutionError,
execute_mixin._try_execute)
def test_verify_share_driver_mode_option_type(self):
data = {'DEFAULT': {'driver_handles_share_servers': 'True'}}
with test_utils.create_temp_config_with_opts(data):
share_driver = driver.ShareDriver([True, False])
self.assertTrue(share_driver.driver_handles_share_servers)
def _instantiate_share_driver(self, network_config_group,
driver_handles_share_servers,
admin_network_config_group=None):
self.mock_object(network, 'API')
config = mock.Mock()
config.append_config_values = mock.Mock()
config.config_group = 'fake_config_group'
config.network_config_group = network_config_group
if admin_network_config_group:
config.admin_network_config_group = admin_network_config_group
config.safe_get = mock.Mock(return_value=driver_handles_share_servers)
share_driver = driver.ShareDriver([True, False], configuration=config)
self.assertTrue(hasattr(share_driver, 'configuration'))
config.append_config_values.assert_called_once_with(driver.share_opts)
if driver_handles_share_servers:
calls = []
if network_config_group:
calls.append(mock.call(
config_group_name=config.network_config_group))
else:
calls.append(mock.call(
config_group_name=config.config_group))
if admin_network_config_group:
calls.append(mock.call(
config_group_name=config.admin_network_config_group,
label='admin'))
network.API.assert_has_calls(calls)
self.assertTrue(hasattr(share_driver, 'network_api'))
self.assertTrue(hasattr(share_driver, 'admin_network_api'))
self.assertIsNotNone(share_driver.network_api)
self.assertIsNotNone(share_driver.admin_network_api)
else:
self.assertFalse(hasattr(share_driver, 'network_api'))
self.assertTrue(hasattr(share_driver, 'admin_network_api'))
self.assertIsNone(share_driver.admin_network_api)
self.assertFalse(network.API.called)
return share_driver
def test_instantiate_share_driver(self):
self._instantiate_share_driver(None, True)
def test_instantiate_share_driver_another_config_group(self):
self._instantiate_share_driver("fake_network_config_group", True)
def test_instantiate_share_driver_with_admin_network(self):
self._instantiate_share_driver(
"fake_network_config_group", True,
"fake_admin_network_config_group")
def test_instantiate_share_driver_no_configuration(self):
self.mock_object(network, 'API')
share_driver = driver.ShareDriver(True, configuration=None)
self.assertIsNone(share_driver.configuration)
network.API.assert_called_once_with(config_group_name=None)
def test_get_share_stats_refresh_false(self):
share_driver = driver.ShareDriver(True, configuration=None)
share_driver._stats = {'fake_key': 'fake_value'}
result = share_driver.get_share_stats(False)
self.assertEqual(share_driver._stats, result)
def test_get_share_stats_refresh_true(self):
conf = configuration.Configuration(None)
expected_keys = [
'qos', 'driver_version', 'share_backend_name',
'free_capacity_gb', 'total_capacity_gb',
'driver_handles_share_servers',
'reserved_percentage', 'vendor_name', 'storage_protocol',
'snapshot_support', 'mount_snapshot_support',
]
share_driver = driver.ShareDriver(True, configuration=conf)
fake_stats = {'fake_key': 'fake_value'}
share_driver._stats = fake_stats
result = share_driver.get_share_stats(True)
self.assertNotEqual(fake_stats, result)
for key in expected_keys:
self.assertIn(key, result)
self.assertEqual('Open Source', result['vendor_name'])
@ddt.data(
{'opt': True, 'allowed': True},
{'opt': True, 'allowed': (True, False)},
{'opt': True, 'allowed': [True, False]},
{'opt': True, 'allowed': set([True, False])},
{'opt': False, 'allowed': False},
{'opt': False, 'allowed': (True, False)},
{'opt': False, 'allowed': [True, False]},
{'opt': False, 'allowed': set([True, False])})
@ddt.unpack
def test__verify_share_server_handling_valid_cases(self, opt, allowed):
conf = configuration.Configuration(None)
self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt))
share_driver = driver.ShareDriver(allowed, configuration=conf)
self.assertTrue(conf.safe_get.celled)
self.assertEqual(opt, share_driver.driver_handles_share_servers)
@ddt.data(
{'opt': False, 'allowed': True},
{'opt': True, 'allowed': False},
{'opt': None, 'allowed': True},
{'opt': 'True', 'allowed': True},
{'opt': 'False', 'allowed': False},
{'opt': [], 'allowed': True},
{'opt': True, 'allowed': []},
{'opt': True, 'allowed': ['True']},
{'opt': False, 'allowed': ['False']})
@ddt.unpack
def test__verify_share_server_handling_invalid_cases(self, opt, allowed):
conf = configuration.Configuration(None)
self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt))
self.assertRaises(
exception.ManilaException,
driver.ShareDriver, allowed, configuration=conf)
self.assertTrue(conf.safe_get.celled)
def test_setup_server_handling_disabled(self):
share_driver = self._instantiate_share_driver(None, False)
# We expect successful execution, nothing to assert
share_driver.setup_server('Nothing is expected to happen.')
def test_setup_server_handling_enabled(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(
NotImplementedError,
share_driver.setup_server,
'fake_network_info')
def test_teardown_server_handling_disabled(self):
share_driver = self._instantiate_share_driver(None, False)
# We expect successful execution, nothing to assert
share_driver.teardown_server('Nothing is expected to happen.')
def test_teardown_server_handling_enabled(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(
NotImplementedError,
share_driver.teardown_server,
'fake_share_server_details')
def _assert_is_callable(self, obj, attr):
self.assertTrue(callable(getattr(obj, attr)))
@ddt.data('manage_existing',
'unmanage')
def test_drivers_methods_needed_by_manage_functionality(self, method):
share_driver = self._instantiate_share_driver(None, False)
self._assert_is_callable(share_driver, method)
@ddt.data('manage_existing_snapshot',
'unmanage_snapshot')
def test_drivers_methods_needed_by_manage_snapshot_functionality(
self, method):
share_driver = self._instantiate_share_driver(None, False)
self._assert_is_callable(share_driver, method)
@ddt.data('revert_to_snapshot',
'revert_to_replicated_snapshot')
def test_drivers_methods_needed_by_share_revert_to_snapshot_functionality(
self, method):
share_driver = self._instantiate_share_driver(None, False)
self._assert_is_callable(share_driver, method)
@ddt.data(True, False)
def test_get_share_server_pools(self, value):
driver.CONF.set_default('driver_handles_share_servers', value)
share_driver = driver.ShareDriver(value)
self.assertEqual([],
share_driver.get_share_server_pools('fake_server'))
@ddt.data(0.8, 1.0, 10.5, 20.0, None, '1', '1.1')
def test_check_for_setup_error(self, value):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
share_driver.configuration = configuration.Configuration(None)
self.mock_object(share_driver.configuration, 'safe_get',
mock.Mock(return_value=value))
if value and float(value) >= 1.0:
share_driver.check_for_setup_error()
else:
self.assertRaises(exception.InvalidParameterValue,
share_driver.check_for_setup_error)
def test_snapshot_support_exists(self):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {
"create_snapshot": fake_method,
"delete_snapshot": fake_method,
}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertTrue(child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(
([], [], False),
(_SNAPSHOT_METHOD_NAMES, [], True),
(_SNAPSHOT_METHOD_NAMES, _SNAPSHOT_METHOD_NAMES, True),
(_SNAPSHOT_METHOD_NAMES[0:1], _SNAPSHOT_METHOD_NAMES[1:], True),
([], _SNAPSHOT_METHOD_NAMES, True),
)
@ddt.unpack
def test_check_redefined_driver_methods(self, common_drv_meth_names,
child_drv_meth_names,
expected_result):
# This test covers the case of drivers inheriting other drivers or
# common classes.
driver.CONF.set_default('driver_handles_share_servers', True)
common_drv_methods, child_drv_methods = [
{method_name: lambda *args, **kwargs: None
for method_name in method_names}
for method_names in (common_drv_meth_names,
child_drv_meth_names)]
common_drv = type(
"NotRedefinedCommon", (driver.ShareDriver, ), common_drv_methods)
child_drv_instance = type("NotRedefined", (common_drv, ),
child_drv_methods)(True)
has_redefined_methods = (
child_drv_instance._has_redefined_driver_methods(
self._SNAPSHOT_METHOD_NAMES))
self.assertEqual(expected_result, has_redefined_methods)
@ddt.data(
(),
("create_snapshot"),
("delete_snapshot"),
("create_snapshot", "delete_snapshotFOO"),
)
def test_snapshot_support_absent(self, methods):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {}
for method in methods:
child_methods[method] = fake_method
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertFalse(child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_snapshot_support_not_exists_and_set_explicitly(
self, snapshots_are_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), {})(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats(
{"snapshot_support": snapshots_are_supported})
self.assertEqual(
snapshots_are_supported,
child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_snapshot_support_exists_and_set_explicitly(
self, snapshots_are_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {
"create_snapshot": fake_method,
"delete_snapshot": fake_method,
}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats(
{"snapshot_support": snapshots_are_supported})
self.assertEqual(
snapshots_are_supported,
child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
def test_create_share_from_snapshot_support_exists(self):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {
"create_share_from_snapshot": fake_method,
"create_snapshot": fake_method,
"delete_snapshot": fake_method,
}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertTrue(
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(
(),
("create_snapshot"),
("create_share_from_snapshotFOO"),
)
def test_create_share_from_snapshot_support_absent(self, methods):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {}
for method in methods:
child_methods[method] = fake_method
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertFalse(
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_create_share_from_snapshot_not_exists_and_set_explicitly(
self, creating_shares_from_snapshot_is_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), {})(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats({
"create_share_from_snapshot_support":
creating_shares_from_snapshot_is_supported,
})
self.assertEqual(
creating_shares_from_snapshot_is_supported,
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_create_share_from_snapshot_exists_and_set_explicitly(
self, create_share_from_snapshot_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {"create_share_from_snapshot": fake_method}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats({
"create_share_from_snapshot_support":
create_share_from_snapshot_supported,
})
self.assertEqual(
create_share_from_snapshot_supported,
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
def test_get_periodic_hook_data(self):
share_driver = self._instantiate_share_driver(None, False)
share_instances = ["list", "of", "share", "instances"]
result = share_driver.get_periodic_hook_data(
"fake_context", share_instances)
self.assertEqual(share_instances, result)
def test_get_admin_network_allocations_number(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertEqual(
0, share_driver.get_admin_network_allocations_number())
def test_allocate_admin_network_count_None(self):
share_driver = self._instantiate_share_driver(None, True)
ctxt = 'fake_context'
share_server = 'fake_share_server'
mock_get_admin_network_allocations_number = self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=0))
self.mock_object(
share_driver.admin_network_api,
'allocate_network',
mock.Mock(side_effect=Exception('ShouldNotBeRaised')))
share_driver.allocate_admin_network(ctxt, share_server)
mock_get_admin_network_allocations_number.assert_called_once_with()
self.assertFalse(
share_driver.admin_network_api.allocate_network.called)
def test_allocate_admin_network_count_0(self):
share_driver = self._instantiate_share_driver(None, True)
ctxt = 'fake_context'
share_server = 'fake_share_server'
self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=0))
self.mock_object(
share_driver.admin_network_api,
'allocate_network',
mock.Mock(side_effect=Exception('ShouldNotBeRaised')))
share_driver.allocate_admin_network(ctxt, share_server, count=0)
self.assertFalse(
share_driver.get_admin_network_allocations_number.called)
self.assertFalse(
share_driver.admin_network_api.allocate_network.called)
def test_allocate_admin_network_count_1_api_initialized(self):
share_driver = self._instantiate_share_driver(None, True)
ctxt = 'fake_context'
share_server = 'fake_share_server'
mock_get_admin_network_allocations_number = self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=1))
self.mock_object(
share_driver.admin_network_api,
'allocate_network',
mock.Mock())
share_driver.allocate_admin_network(ctxt, share_server)
mock_get_admin_network_allocations_number.assert_called_once_with()
(share_driver.admin_network_api.allocate_network.
assert_called_once_with(ctxt, share_server, count=1))
def test_allocate_admin_network_count_1_api_not_initialized(self):
share_driver = self._instantiate_share_driver(None, True, None)
ctxt = 'fake_context'
share_server = 'fake_share_server'
share_driver._admin_network_api = None
mock_get_admin_network_allocations_number = self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=1))
self.assertRaises(
exception.NetworkBadConfigurationException,
share_driver.allocate_admin_network,
ctxt, share_server,
)
mock_get_admin_network_allocations_number.assert_called_once_with()
def test_migration_start(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_start,
None, None, None, None, None, None, None)
def test_migration_continue(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_continue,
None, None, None, None, None, None, None)
def test_migration_complete(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_complete,
None, None, None, None, None, None, None)
def test_migration_cancel(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_cancel,
None, None, None, None, None, None, None)
def test_migration_get_progress(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError,
share_driver.migration_get_progress,
None, None, None, None, None, None, None)
@ddt.data(True, False)
def test_connection_get_info(self, admin):
expected = {
'mount': 'mount -vt nfs %(options)s /fake/fake_id %(path)s',
'unmount': 'umount -v %(path)s',
'access_mapping': {
'ip': ['nfs']
}
}
fake_share = {
'id': 'fake_id',
'share_proto': 'nfs',
'export_locations': [{
'path': '/fake/fake_id',
'is_admin_only': admin
}]
}
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
share_driver.configuration = configuration.Configuration(None)
connection_info = share_driver.connection_get_info(
None, fake_share, "fake_server")
self.assertEqual(expected, connection_info)
def test_migration_check_compatibility(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
share_driver.configuration = configuration.Configuration(None)
expected = {
'compatible': False,
'writable': False,
'preserve_metadata': False,
'nondisruptive': False,
'preserve_snapshots': False,
}
result = share_driver.migration_check_compatibility(
None, None, None, None, None)
self.assertEqual(expected, result)
def test_update_access(self):
share_driver = driver.ShareDriver(True, configuration=None)
self.assertRaises(
NotImplementedError,
share_driver.update_access,
'ctx',
'fake_share',
'fake_access_rules',
'fake_add_rules',
'fake_delete_rules'
)
def test_create_replica(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.create_replica,
'fake_context', ['r1', 'r2'],
'fake_new_replica', [], [])
def test_delete_replica(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.delete_replica,
'fake_context', ['r1', 'r2'],
'fake_replica', [])
def test_promote_replica(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.promote_replica,
'fake_context', [], 'fake_replica', [])
def test_update_replica_state(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.update_replica_state,
'fake_context', ['r1', 'r2'], 'fake_replica', [], [])
def test_create_replicated_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.create_replicated_snapshot,
'fake_context', ['r1', 'r2'], ['s1', 's2'])
def test_delete_replicated_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.delete_replicated_snapshot,
'fake_context', ['r1', 'r2'], ['s1', 's2'])
def test_update_replicated_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.update_replicated_snapshot,
'fake_context', ['r1', 'r2'], 'r1',
['s1', 's2'], 's1')
@ddt.data(True, False)
def test_share_group_snapshot_support_exists_and_equals_snapshot_support(
self, snapshots_are_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
child_class_instance = driver.ShareDriver(True)
child_class_instance._snapshots_are_supported = snapshots_are_supported
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertEqual(
snapshots_are_supported,
child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
def test_create_share_group_from_share_group_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
fake_shares = [
{'id': 'fake_share_%d' % i,
'source_share_group_snapshot_member_id': 'fake_member_%d' % i}
for i in (1, 2)]
fake_share_group_dict = {
'source_share_group_snapshot_id': 'some_fake_uuid_abc',
'shares': fake_shares,
'id': 'some_fake_uuid_def',
}
fake_share_group_snapshot_dict = {
'share_group_snapshot_members': [
{'id': 'fake_member_1'}, {'id': 'fake_member_2'}],
'id': 'fake_share_group_snapshot_id',
}
mock_create = self.mock_object(
share_driver, 'create_share_from_snapshot',
mock.Mock(side_effect=['fake_export1', 'fake_export2']))
expected_share_updates = [
{
'id': 'fake_share_1',
'export_locations': 'fake_export1',
},
{
'id': 'fake_share_2',
'export_locations': 'fake_export2',
},
]
share_group_update, share_update = (
share_driver.create_share_group_from_share_group_snapshot(
'fake_context', fake_share_group_dict,
fake_share_group_snapshot_dict))
mock_create.assert_has_calls([
mock.call(
'fake_context',
{'id': 'fake_share_1',
'source_share_group_snapshot_member_id': 'fake_member_1'},
{'id': 'fake_member_1'}),
mock.call(
'fake_context',
{'id': 'fake_share_2',
'source_share_group_snapshot_member_id': 'fake_member_2'},
{'id': 'fake_member_2'})
])
self.assertIsNone(share_group_update)
self.assertEqual(expected_share_updates, share_update)
def test_create_share_group_from_share_group_snapshot_dhss(self):
share_driver = self._instantiate_share_driver(None, True)
mock_share_server = mock.Mock()
fake_shares = [
{'id': 'fake_share_1',
'source_share_group_snapshot_member_id': 'foo_member_1'},
{'id': 'fake_share_2',
'source_share_group_snapshot_member_id': 'foo_member_2'}]
fake_share_group_dict = {
'source_share_group_snapshot_id': 'some_fake_uuid',
'shares': fake_shares,
'id': 'eda52174-0442-476d-9694-a58327466c14',
}
fake_share_group_snapshot_dict = {
'share_group_snapshot_members': [
{'id': 'foo_member_1'}, {'id': 'foo_member_2'}],
'id': 'fake_share_group_snapshot_id'
}
mock_create = self.mock_object(
share_driver, 'create_share_from_snapshot',
mock.Mock(side_effect=['fake_export1', 'fake_export2']))
expected_share_updates = [
{'id': 'fake_share_1', 'export_locations': 'fake_export1'},
{'id': 'fake_share_2', 'export_locations': 'fake_export2'},
]
share_group_update, share_update = (
share_driver.create_share_group_from_share_group_snapshot(
'fake_context',
fake_share_group_dict,
fake_share_group_snapshot_dict, share_server=mock_share_server,
)
)
mock_create.assert_has_calls([
mock.call(
'fake_context',
{'id': 'fake_share_%d' % i,
'source_share_group_snapshot_member_id': 'foo_member_%d' % i},
{'id': 'foo_member_%d' % i},
share_server=mock_share_server)
for i in (1, 2)
])
self.assertIsNone(share_group_update)
self.assertEqual(expected_share_updates, share_update)
def test_create_share_group_from_sg_snapshot_with_no_members(self):
share_driver = self._instantiate_share_driver(None, False)
fake_share_group_dict = {}
fake_share_group_snapshot_dict = {'share_group_snapshot_members': []}
share_group_update, share_update = (
share_driver.create_share_group_from_share_group_snapshot(
'fake_context', fake_share_group_dict,
fake_share_group_snapshot_dict))
self.assertIsNone(share_group_update)
self.assertIsNone(share_update)
def test_create_share_group_snapshot(self):
fake_snap_member_1 = {
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_1',
'provider_location': 'should_not_be_used_1',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': 3,
'share_proto': 'fake_share_proto',
},
}
fake_snap_member_2 = {
'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_2',
'provider_location': 'should_not_be_used_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': '2',
'share_proto': 'fake_share_proto',
},
}
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
fake_snap_member_1, fake_snap_member_2],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['snapshot_support'] = True
mock_create_snap = self.mock_object(
share_driver, 'create_snapshot',
mock.Mock(side_effect=lambda *args, **kwargs: {
'foo_k': 'foo_v', 'bar_k': 'bar_v_%s' % args[1]['id']}))
share_group_snapshot_update, member_update_list = (
share_driver.create_share_group_snapshot(
'fake_context', fake_snap_dict))
mock_create_snap.assert_has_calls([
mock.call(
'fake_context',
{'snapshot_id': member['share_group_snapshot_id'],
'share_id': member['share_id'],
'share_instance_id': member['share']['id'],
'id': member['id'],
'share': member['share'],
'size': member['share']['size'],
'share_size': member['share']['size'],
'share_proto': member['share']['share_proto'],
'provider_location': None},
share_server=None)
for member in (fake_snap_member_1, fake_snap_member_2)
])
self.assertIsNone(share_group_snapshot_update)
self.assertEqual(
[{'id': member['id'], 'foo_k': 'foo_v',
'bar_k': 'bar_v_%s' % member['id']}
for member in (fake_snap_member_1, fake_snap_member_2)],
member_update_list,
)
def test_create_share_group_snapshot_failed_snapshot(self):
fake_snap_member_1 = {
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_1',
'provider_location': 'should_not_be_used_1',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': 3,
'share_proto': 'fake_share_proto',
},
}
fake_snap_member_2 = {
'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_2',
'provider_location': 'should_not_be_used_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': '2',
'share_proto': 'fake_share_proto',
},
}
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
fake_snap_member_1, fake_snap_member_2],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
expected_exception = exception.ManilaException
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['snapshot_support'] = True
mock_create_snap = self.mock_object(
share_driver, 'create_snapshot',
mock.Mock(side_effect=[None, expected_exception]))
mock_delete_snap = self.mock_object(share_driver, 'delete_snapshot')
self.assertRaises(
expected_exception,
share_driver.create_share_group_snapshot,
'fake_context', fake_snap_dict)
fake_snap_member_1_expected = {
'snapshot_id': fake_snap_member_1['share_group_snapshot_id'],
'share_id': fake_snap_member_1['share_id'],
'share_instance_id': fake_snap_member_1['share']['id'],
'id': fake_snap_member_1['id'],
'share': fake_snap_member_1['share'],
'size': fake_snap_member_1['share']['size'],
'share_size': fake_snap_member_1['share']['size'],
'share_proto': fake_snap_member_1['share']['share_proto'],
'provider_location': None,
}
mock_create_snap.assert_has_calls([
mock.call(
'fake_context',
{'snapshot_id': member['share_group_snapshot_id'],
'share_id': member['share_id'],
'share_instance_id': member['share']['id'],
'id': member['id'],
'share': member['share'],
'size': member['share']['size'],
'share_size': member['share']['size'],
'share_proto': member['share']['share_proto'],
'provider_location': None},
share_server=None)
for member in (fake_snap_member_1, fake_snap_member_2)
])
mock_delete_snap.assert_called_with(
'fake_context', fake_snap_member_1_expected, share_server=None)
def test_create_share_group_snapshot_no_support(self):
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
{
'status': 'available',
'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f',
'user_id': 'a0314a441ca842019b0952224aa39192',
'deleted': 'False',
'share_proto': 'NFS',
'project_id': '13c0be6290934bd98596cfa004650049',
'share_group_snapshot_id':
'f6aa3b59-57eb-421e-965c-4e182538e36a',
'deleted_at': None,
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'size': 1
},
],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['snapshot_support'] = False
self.assertRaises(
exception.ShareGroupSnapshotNotSupported,
share_driver.create_share_group_snapshot,
'fake_context', fake_snap_dict)
def test_create_share_group_snapshot_no_members(self):
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['snapshot_support'] = True
share_group_snapshot_update, member_update_list = (
share_driver.create_share_group_snapshot(
'fake_context', fake_snap_dict))
self.assertIsNone(share_group_snapshot_update)
self.assertIsNone(member_update_list)
def test_delete_share_group_snapshot(self):
fake_snap_member_1 = {
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_1',
'provider_location': 'fake_provider_location_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': 3,
'share_proto': 'fake_share_proto',
},
}
fake_snap_member_2 = {
'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_2',
'provider_location': 'fake_provider_location_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': '2',
'share_proto': 'fake_share_proto',
},
}
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
fake_snap_member_1, fake_snap_member_2],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['share_group_snapshot_support'] = True
mock_delete_snap = self.mock_object(share_driver, 'delete_snapshot')
share_group_snapshot_update, member_update_list = (
share_driver.delete_share_group_snapshot(
'fake_context', fake_snap_dict))
mock_delete_snap.assert_has_calls([
mock.call(
'fake_context',
{'snapshot_id': member['share_group_snapshot_id'],
'share_id': member['share_id'],
'share_instance_id': member['share']['id'],
'id': member['id'],
'share': member['share'],
'size': member['share']['size'],
'share_size': member['share']['size'],
'share_proto': member['share']['share_proto'],
'provider_location': member['provider_location']},
share_server=None)
for member in (fake_snap_member_1, fake_snap_member_2)
])
self.assertIsNone(share_group_snapshot_update)
self.assertIsNone(member_update_list)
def test_snapshot_update_access(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.snapshot_update_access,
'fake_context', 'fake_snapshot', ['r1', 'r2'],
[], [])
@ddt.data({'user_networks': set([4]), 'conf': [4],
'expected': {'ipv4': True, 'ipv6': False}},
{'user_networks': set([6]), 'conf': [4],
'expected': {'ipv4': False, 'ipv6': False}},
{'user_networks': set([4, 6]), 'conf': [4],
'expected': {'ipv4': True, 'ipv6': False}},
{'user_networks': set([4]), 'conf': [6],
'expected': {'ipv4': False, 'ipv6': False}},
{'user_networks': set([6]), 'conf': [6],
'expected': {'ipv4': False, 'ipv6': True}},
{'user_networks': set([4, 6]), 'conf': [6],
'expected': {'ipv4': False, 'ipv6': True}},
{'user_networks': set([4]), 'conf': [4, 6],
'expected': {'ipv4': True, 'ipv6': False}},
{'user_networks': set([6]), 'conf': [4, 6],
'expected': {'ipv4': False, 'ipv6': True}},
{'user_networks': set([4, 6]), 'conf': [4, 6],
'expected': {'ipv4': True, 'ipv6': True}},
)
@ddt.unpack
def test_add_ip_version_capability_if_dhss_true(self,
user_networks,
conf,
expected):
share_driver = self._instantiate_share_driver(None, True)
self.mock_object(share_driver, 'get_configured_ip_versions',
mock.Mock(return_value=conf))
versions = PropertyMock(return_value=user_networks)
type(share_driver.network_api).enabled_ip_versions = versions
data = {'share_backend_name': 'fake_backend'}
result = share_driver.add_ip_version_capability(data)
self.assertIsNotNone(result['ipv4_support'])
self.assertEqual(expected['ipv4'], result['ipv4_support'])
self.assertIsNotNone(result['ipv6_support'])
self.assertEqual(expected['ipv6'], result['ipv6_support'])
@ddt.data({'conf': [4],
'expected': {'ipv4': True, 'ipv6': False}},
{'conf': [6],
'expected': {'ipv4': False, 'ipv6': True}},
{'conf': [4, 6],
'expected': {'ipv4': True, 'ipv6': True}},
)
@ddt.unpack
def test_add_ip_version_capability_if_dhss_false(self, conf, expected):
share_driver = self._instantiate_share_driver(None, False)
self.mock_object(share_driver, 'get_configured_ip_versions',
mock.Mock(return_value=conf))
data = {'share_backend_name': 'fake_backend'}
result = share_driver.add_ip_version_capability(data)
self.assertIsNotNone(result['ipv4_support'])
self.assertEqual(expected['ipv4'], result['ipv4_support'])
self.assertIsNotNone(result['ipv6_support'])
self.assertEqual(expected['ipv6'], result['ipv6_support'])
| |
#!/usr/bin/python3
import random;
import math;
import numpy;
import os;
import json;
NumberofDevice=[30]
NumberofDeploy= 1
NumberofTrafficModel=1
rangeX = NumberofDevice[0];
rangeY = NumberofDevice[0];
rangeZ = 1;
rangeTxRx = 10;
ScalingFactor = 10;
ValidFreq = [1,1,1,1]
TimeScaleFac = 1
#DeploymentChar =[0.15,0.15,0.7]
#DeploymentChar =[0.25,0.25,0.5]
DeploymentChar =[0.35,0.35,0.3]
DeviceType=["redhawk_ble_mesh.BleMeshDevice","redhawk_ble_mesh.BleMeshDeviceRSM","redhawk_ble_mesh.BleMeshDeviceRSMN"]
devProperties =[[]];
ComputeClass = ["DEV_COMPUTE_CLASS_SENSOR", "DEV_COMPUTE_CLASS_APPLIANCE", "DEV_COMPUTE_CLASS_PC"]
MemoryClass = ["DEV_MEMORY_CLASS_SENSOR", "DEV_MEMORY_CLASS_APPLIANCE", "DEV_MEMORY_CLASS_PC"]
EnergySrc = ["BLE_DEV_BATTRERY_NON_RECHARGEABLE", "BLE_DEV_BATTRERY_RECHARGEABLE", "BLE_DEV_CONNECTED_SUPPLY"]
BatterySize = [.0250, 0.2500,0] #Scaled BatterySize by factor of 10000
ConfigDump = {};
def RSD_Function(devNum,file,devProperties):
for i in range(0,devNum):
JsonDump={};
JsonDump["address"]=devProperties[i][14]
JsonDump["x"]=devProperties[i][1]
JsonDump["y"]=devProperties[i][2]
JsonDump["z"]=devProperties[i][3]
JsonDump["maxDistance"]=devProperties[i][4]
JsonDump["computeClass"]=devProperties[i][5]
JsonDump["memoryClass"]=devProperties[i][6]
JsonDump["energySourceType"]=devProperties[i][7]
JsonDump["initEnergyLevel"]=devProperties[i][8]
JsonDump["batterySize"]=devProperties[i][9]
JsonDump["period"]=devProperties[i][10]
JsonDump["destination"]=devProperties[i][11]
JsonDump["nrofPackets"]=1
JsonDump["delay"]=devProperties[i][13]
JsonDump["energyDissipitation"]=0.1+(devProperties[i][10]*3*0.001)
ConfigDump[i]= JsonDump;
JsonDump ={}
print("sim.devices[",i,"].devTrafficChar.address = ",devProperties[i][14],file=file)
print("sim.devices[",i,"].devMobilityChar.x = ",devProperties[i][1],file=file)
print("sim.devices[",i,"].devMobilityChar.y = ",devProperties[i][2],file=file)
print("sim.devices[",i,"].devMobilityChar.z = ",devProperties[i][3],file=file)
print("sim.devices[",i,"].devRadioChar.maxDistance = ",devProperties[i][4],file=file)
print("sim.devices[",i,"].devComputationChar.computeClass = ",devProperties[i][5],file=file)
print("sim.devices[",i,"].devComputationChar.memoryClass = ",devProperties[i][6],file=file)
print("sim.devices[",i,"].devEnergyChar.energySourceType = ",devProperties[i][7],file=file)
print("sim.devices[",i,"].devEnergyChar.initEnergyLevel = ",devProperties[i][8],file=file)
print("sim.devices[",i,"].devEnergyChar.batterySize = ",devProperties[i][9],file=file)
print("sim.devices[",i,"].trafficModel.period = ",devProperties[i][10]/TimeScaleFac,file=file)
print("sim.devices[",i,"].trafficModel.destination = ",devProperties[i][11],file=file)
print("sim.devices[",i,"].trafficModel.nrofPackets = ",1,file=file)
print("sim.devices[",i,"].trafficModel.delay = ",devProperties[i][13],file=file)
print("sim.devices[",i,"].devEnergyChar.energyDissipitation = ",0.1+(devProperties[i][10]*3*0.001),file=file) #Assuming for every Tx we have 3 Rx
if __name__ == "__main__":
for num in NumberofDevice:
DirName = "../Config/Config_"+str(DeploymentChar[2])+"_"+str(num)
TotalSenDev = math.ceil(num*DeploymentChar[2])
TotalMainDev = math.ceil(num * ((1-DeploymentChar[2])/2))
TotalRecDev = math.ceil(num * ((1-DeploymentChar[2])/2))
devProperties = num * [None]
DT = ['BLE_DEV_CONNECTED_SUPPLY'] * int(num * DeploymentChar[0]+1) + ['BLE_DEV_BATTRERY_RECHARGEABLE'] * int(num * DeploymentChar[1]+1) +['BLE_DEV_BATTRERY_NON_RECHARGEABLE'] * int(num * DeploymentChar[2]+1)
random.shuffle(DT)
for dep in range(0,NumberofDeploy):
SensorLoc = numpy.random.uniform(0,30,size=(TotalSenDev,2))
MainLoc = numpy.random.uniform(0,30,size=(TotalMainDev,2))
RecLoc = numpy.random.uniform(0,30,size=(TotalRecDev,2))
print("////////////////////")
print(SensorLoc)
print("////////////////////")
print(MainLoc)
print("////////////////////")
print(RecLoc)
AddressAssignment = [i for i in range(num)]
random.shuffle(AddressAssignment)
for i in range(0,num):
if (i < TotalSenDev):
devProperties[i]=["Random",
SensorLoc[i][0], #X-Pos
SensorLoc[i][1], #Y-Pos
random.randint(1, rangeZ), #Z-Pos
random.randint(rangeTxRx, rangeTxRx), #RxTx-Range
ComputeClass[random.randint(0, 2)], #Compute Class
MemoryClass[random.randint(0, 2)], #Memory Class
'BLE_DEV_BATTRERY_NON_RECHARGEABLE', #Energy Source
random.randint(50, 99), #Battery Percentage
random.randint(50, 99), #Battery Size
ValidFreq[random.randint(0,3)], #Period of Packet Tx
AddressAssignment[TotalSenDev + i % (num - TotalSenDev)], #Peer Address
random.randint(1, num-1)*1000, #No of Packets to be Tx
7, #Delay for the first packet
AddressAssignment[i]];
devProperties[i][5] = ComputeClass[random.randint(0, 1)] if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else ComputeClass[random.randint(0, 2)]
devProperties[i][6] = MemoryClass[random.randint(0, 1)] if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else MemoryClass[random.randint(0, 2)]
devProperties[i][8] = 99 if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else random.randint(50, 99)
devProperties[i][9] = BatterySize[ 0 if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else 1]
devProperties[i][13] = 7
elif (i >= TotalSenDev and (i - TotalSenDev) < TotalMainDev):
devProperties[i]=["Random",
MainLoc[i-TotalSenDev][0], #X-Pos
MainLoc[i-TotalSenDev][1], #Y-Pos
random.randint(1, rangeZ), #Z-Pos
random.randint(rangeTxRx, rangeTxRx), #RxTx-Range
ComputeClass[random.randint(0, 2)], #Compute Class
MemoryClass[random.randint(0, 2)], #Memory Class
'BLE_DEV_CONNECTED_SUPPLY', #Energy Source
random.randint(50, 99), #Battery Percentage
random.randint(50, 99), #Battery Size
ValidFreq[random.randint(0,3)], #Period of Packet Tx
AddressAssignment[TotalSenDev + i % (num - TotalSenDev)], #Peer Address
random.randint(1, num-1)*1000, #No of Packets to be Tx
7, #Delay for the first packet
AddressAssignment[i]];
devProperties[i][5] = ComputeClass[random.randint(0, 1)] if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else ComputeClass[random.randint(0, 2)]
devProperties[i][6] = MemoryClass[random.randint(0, 1)] if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else MemoryClass[random.randint(0, 2)]
devProperties[i][8] = 99 if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else random.randint(50, 99)
devProperties[i][9] = BatterySize[ 0 if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else 1]
devProperties[i][13] = 7
elif (i >= (TotalSenDev + TotalMainDev) and (i - TotalSenDev - TotalMainDev) < TotalRecDev):
devProperties[i]=["Random",
RecLoc[i-(TotalSenDev + TotalMainDev)][0], #X-Pos
RecLoc[i-(TotalSenDev + TotalMainDev)][1], #Y-Pos
random.randint(1, rangeZ), #Z-Pos
random.randint(rangeTxRx, rangeTxRx), #RxTx-Range
ComputeClass[random.randint(0, 2)], #Compute Class
MemoryClass[random.randint(0, 2)], #Memory Class
'BLE_DEV_BATTRERY_RECHARGEABLE', #Energy Source
random.randint(50, 99), #Battery Percentage
random.randint(50, 99), #Battery Size
ValidFreq[random.randint(0,3)], #Period of Packet Tx
AddressAssignment[TotalSenDev + i % (num - TotalSenDev)], #Peer Address
random.randint(1, num-1)*1000, #No of Packets to be Tx
7, #Delay for the first packet
AddressAssignment[i]];
devProperties[i][5] = ComputeClass[random.randint(0, 1)] if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else ComputeClass[random.randint(0, 2)]
devProperties[i][6] = MemoryClass[random.randint(0, 1)] if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else MemoryClass[random.randint(0, 2)]
devProperties[i][8] = 99 if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else random.randint(50, 99)
devProperties[i][9] = BatterySize[ 0 if devProperties[i][7] == "BLE_DEV_BATTRERY_NON_RECHARGEABLE" else 1]
devProperties[i][13] = 7
for traf in range(0,NumberofTrafficModel):
if not os.path.exists(DirName):
os.makedirs(DirName)
fileName = DirName+"/"+str(num)+"_Deployment_"+str(dep)+"_Traf_"+str(traf)+".rsd"
file = open(fileName,'w+')
print("sim.depId = ",dep,file=file)
RSD_Function(num,file,devProperties)
fileJSON=open(DirName+"/"+str(num)+"_Deployment_"+str(dep)+"_Traf_"+str(traf)+".json",'w+')
json.dump(ConfigDump,fileJSON,sort_keys=True, indent=4, separators=(',', ': '))
fileJSON.close()
file.close()
for device in DeviceType:
for num in NumberofDevice:
DirName = "../Config/Config_"+str(DeploymentChar[2])+"_"+str(num)
for dep in range(0,NumberofDeploy):
for traf in range(0,NumberofTrafficModel):
filename = device.split('.')[1]+"_" + str(num)+"_" + str(dep)+ str(traf)+".rsd"
filename1 = device.split('.')[1]+"_" + str(num)+"_" + str(dep)+ str(traf)+".cfg"
#file = open(DirName+"\\"+filename,'w+')
#print("redhawkFileVersion=2",file=file)
#print("# A test scenario for BLE Mesh simulator",file=file)
#print("sim: redhawk_ble_mesh.BleMeshSimulator",file=file)
#print("# create devices",file=file)
#print("sim.devices[]: ",device,file=file)
#print("# Load Deployment model with Device characteristic",file=file)
#print(">",str(num)+"_Deployment_"+str(dep)+"_Traf_"+str(traf)+ ".rsd",file=file)
#print("sim.txManager.mode = simple",file=file)
#print("sim.endtime = 100.0",file=file)
#file1 = open(DirName+"\\"+filename1,'w+')
#print("-parfile",filename,file=file1)
#print("-iterate sim.devices[].gatewaySelRule={1;2;3;4}",file=file1)
#print("-seeds {1}",file=file1)
#print("-logConfig BleMesh.lcf",file=file1)
#print("-logFormat MAT2",file=file1)
#file1.close()
#file.close()
file.close()
| |
'''Import/Export of QCArchive data
'''
from dataclasses import dataclass
import typing
from qcexport_extra import extra_children_map
from sqlalchemy.orm import make_transient, Load
from sqlalchemy import inspect
from qcfractal.storage_sockets.models import (
AccessLogORM,
BaseResultORM,
CollectionORM,
DatasetORM,
GridOptimizationProcedureORM,
MoleculeORM,
KeywordsORM,
KVStoreORM,
OptimizationProcedureORM,
QueueManagerLogORM,
QueueManagerORM,
ResultORM,
ServerStatsLogORM,
ServiceQueueORM,
QueueManagerORM,
TaskQueueORM,
TorsionDriveProcedureORM,
Trajectory,
VersionsORM,
WavefunctionStoreORM,
)
from qcfractal.storage_sockets.models.collections_models import DatasetEntryORM
from qcfractal.storage_sockets.models.results_models import GridOptimizationAssociation, TorsionInitMol
_all_orm = [
AccessLogORM,
BaseResultORM,
CollectionORM,
DatasetORM,
DatasetEntryORM,
GridOptimizationProcedureORM,
GridOptimizationAssociation,
MoleculeORM,
KeywordsORM,
KVStoreORM,
OptimizationProcedureORM,
QueueManagerLogORM,
QueueManagerORM,
ResultORM,
ServerStatsLogORM,
ServiceQueueORM,
QueueManagerORM,
TaskQueueORM,
TorsionDriveProcedureORM,
TorsionInitMol,
Trajectory,
VersionsORM,
WavefunctionStoreORM,
]
# Maps table names to sqlalchemy ORM objects
_table_orm_map = {orm.__tablename__: orm for orm in _all_orm}
class RowKeyValues:
'''Generates and stores information about primary and foreign keys of a table
'''
@dataclass(order=True)
class PKInfo:
'''Holds information about a row's primary key.
Holds the column names and the values of the primary key columns.
These are lists in order to handle composite primary keys
'''
table: str
columns: list
values: list
@dataclass(order=True)
class FKInfo:
'''Holds information about a row's foreign key.
For a single foreign key, holds the source and destination/foreign table names and columns. Also
holds the value in the source row.
'''
src_table: str
src_column: str
dest_table: str
dest_column: str
value: 'typing.Any'
def __init__(self, orm_obj):
'''Generates primary and foreign key info given an ORM object'''
self.orm_type = type(orm_obj)
insp = inspect(self.orm_type)
###########################################################
# First, get which columns are primary and foreign keys
###########################################################
# Handle if this is a derived class (polymorphic?)
# This seems poorly documented. But get the table name of the
# base class (if there is one)
base_class = insp.inherits.entity if insp.inherits else None
base_table = base_class.__tablename__ if base_class else None
# Get the columns comprising the primary key
primary_key_columns = [x.name for x in insp.primary_key]
# Now foreign keys. Loop over all the columns.
# Each column has a set() (which may be empty) stored in foreign_keys
foreign_key_info = []
for col in insp.columns:
for fk in sorted(list(col.foreign_keys)):
# Remove foreign keys to base class
# The purpose of this function is to get foreign keys that we need to
# load. But if it is part of the base class, then no need to do that
if not (base_table and fk.column.table.name == base_table):
new_fk = self.FKInfo(col.table.name, col.name, fk.column.table.name, fk.column.name, None)
foreign_key_info.append(new_fk)
# Not sure if order is always preserved, but sort just in case
# so that things are always consistent
primary_key_columns = sorted(primary_key_columns)
foreign_key_info = sorted(foreign_key_info)
# Now store in this class
self.primary_key = self.PKInfo(self.orm_type.__tablename__, primary_key_columns, None)
self.foreign_keys = foreign_key_info
#######################################################
# Obtain values for the primary and foreign key columns
#######################################################
self.primary_key.values = [getattr(orm_obj, column) for column in self.primary_key.columns]
for fk in self.foreign_keys:
fk.value = getattr(orm_obj, fk.src_column)
def is_composite_primary(self):
'''Returns True if this represents a composite primary key'''
return len(self.primary_key.columns) > 1
def as_lookup_key(self):
'''Return a unique string representing the primary key
This is used as a key to a dictionary to store already-copied data.
'''
return repr(self.orm_type) + repr(self.primary_key)
def remove_primary_key(self, orm_obj):
'''Remove primary key values that are integers and not part of
a composite primary key'''
if type(orm_obj) != self.orm_type:
raise RuntimeError("Removing primary keys of type f{type(orm_obj)} but I can only handle {self.orm_type}")
# Don't touch composite primary
if self.is_composite_primary():
return
for pk, old_value in zip(self.primary_key.columns, self.primary_key.values):
if isinstance(old_value, int):
setattr(orm_obj, pk, None)
def _add_children(orm_obj, session_dest, session_src, new_pk_map, options, row_key_info, indent=''):
'''Given an ORM object, adds the dependent data (through foreign keys)
Finds all the foreign keys for the object, and adds the dependent data to the DB.
It then fixes the values of the foreign keys in the ORM object to match the newly-inserted data.
Parameters
----------
orm_obj
An ORM object to add the children of
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
row_key_info : RowKeyValues
Information about the row's primary and foreign keys
indent : str
Prefix to add to all printed output lines
'''
for fk_info in row_key_info.foreign_keys:
# Data in that column may be empty/null
if fk_info.value is None:
continue
print(indent + "+ Handling child: ")
print(
indent +
f" - {fk_info.src_table}.{fk_info.src_column}:{fk_info.value} -> {fk_info.dest_table}.{fk_info.dest_column}"
)
# We need to load from the db (from the foreign/destination table) given the column and value
# in the foreign key info
fk_query = {fk_info.dest_column: fk_info.value}
# Copy the foreign info. This should only return one record
# NOTE: This requires going to the source db for info. It is possible that
# we can check new_pk_map here using the info from the foreign key to see if it
# was already done. However, the hit rate would generally be low, and might be error
# prone, especially with esoteric cases.
new_info = _general_copy(table_name=fk_info.dest_table,
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by=fk_query,
single=True,
indent=indent + ' ')
# Now set the foreign keys to point to the new id
setattr(orm_obj, fk_info.src_column, new_info[fk_info.dest_column])
def _add_tasks_and_services(base_result_id, session_dest, session_src, new_pk_map, options, indent):
'''Adds entries in the task_queue and service_queue given something deriving from base_result
Should only be called after adding the result or procedure.
Parameters
----------
base_result_id
ID of the base_result (result, procedure, ...)
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
indent : str
Prefix to add to all printed output lines
'''
print(indent + f"$ Adding task & service queue entries for base_result_id = {base_result_id}")
# Add anything from the task queue corresponding to the given base result id
# (if calculation is completed, task is deleted)
_general_copy(table_name='task_queue',
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by={'base_result_id': base_result_id},
indent=indent + ' ')
# Do the same for the services queue
#if int(base_result_id) == 17761750:
# breakpoint()
_general_copy(table_name='service_queue',
session_dest=session_dest,
session_src=session_src,
new_pk_map=new_pk_map,
options=options,
filter_by={'procedure_id': base_result_id},
indent=indent + ' ')
def _general_copy(table_name,
session_dest,
session_src,
new_pk_map,
options,
filter_by=None,
filter_in=None,
order_by=None,
limit=None,
single=False,
indent=''):
'''
Given queries, copies all results of the query from session_src to session_dest
Adds data to session_dest, keeping a map of newly-added info and fixing foreign keys
to match newly-inserted data.
Called recursively to add dependent data through foreign keys.
Parameters
----------
table_name : str
Name of the table to copy data from/to
session_dest
SQLAlchemy session to write data to
session_src
SQLAlchemy session to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
filter_by : dict
Filters (column: value) to add to the query. ie, {'id': 123}
filter_in : dict
Filters (column: list(values)) to add to the query using 'in'. ie, {'id': [123,456]}
order_by: dict
How to order the results of the query. ie {'id': 'desc'}
limit : int
Limit the number of records returned
single : bool
If true, expect only one returned record. If not, raise an exception
indent : str
Prefix to add to all printed output lines
'''
orm_type = _table_orm_map[table_name]
# Build the query based on filtering, etc
query = session_src.query(orm_type)
if filter_by is not None:
query = query.filter_by(**filter_by)
if filter_in is not None:
for key, values in filter_in.items():
query = query.filter(getattr(orm_type, key).in_(values))
if order_by:
for column, order in order_by.items():
# Gets, for example, Trajectory.opt_id.desc
# opt_id = column, desc = bound function
o = getattr(orm_type, column)
o = getattr(o, order)
query = query.order_by(o())
if limit is not None:
if single and limit != 1:
raise RuntimeError(f'Limit = {limit} but single return is specified')
query = query.limit(limit)
elif single:
limit = 1
# Disable all relationship loading
query = query.options(Load(orm_type).noload('*'))
data = query.all()
return_info = []
# We have to expunge and make transient everything first
# If not, sqlalchemy tries to be smart. After you add the entries found
# through foreign keys, the rest of the objects in the data list may change.
# But then you will have parts of objects in session_src and parts in session_dest
for d in data:
session_src.expunge(d)
make_transient(d)
for d in data:
# Obtain primary/foreign key columns and values
src_rck = RowKeyValues(d)
# The type of the object may not be the same as we queried (due to polymorphic types)
real_orm_type = type(d)
real_table_name = real_orm_type.__tablename__
# real_orm_type should never be BaseResultORM
assert real_orm_type != BaseResultORM
print(indent +
f'* Copying {table_name} {str(src_rck.primary_key.columns)} = {str(src_rck.primary_key.values)}')
if real_orm_type != orm_type:
print(indent + f'& But actually using table {real_table_name}')
############################################################
############################################################
## TODO - If working with an existing db, do lookups here ##
## (this is for future capability of importing ##
## into an existing db) ##
############################################################
############################################################
src_lookup_key = src_rck.as_lookup_key()
if src_lookup_key in new_pk_map:
print(indent + f' - Already previously done')
return_info.append(new_pk_map[src_lookup_key])
continue
# Save src information for laters. When adding extra children, old ids and stuff may be needed
src_info = d.to_dict()
# Loop through foreign keys and recursively add those
_add_children(d, session_dest, session_src, new_pk_map, options, src_rck, indent + ' ')
# Remove the primary key. We will generate a new one on adding
src_rck.remove_primary_key(d)
# Truncate KV store entries by default
# (but can be overridden)
if table_name == 'kv_store':
truncate_kv_store = options.get('truncate_kv_store', True)
if truncate_kv_store:
d.value = str(d.value)[:2000]
# Now add it to the session
# and obtain the key info
session_dest.add(d)
session_dest.commit()
dest_rck = RowKeyValues(d)
print(indent + f'! adding {real_table_name} {str(src_rck.primary_key.values)} = {str(dest_rck.primary_key.values)}')
# Store the info for the entire row
# (exception: kvstore)
dest_info = d.to_dict()
# Don't store kvstore data in the dictionary (not needed)
if table_name == 'kv_store':
dest_info.pop('value')
# We can't just use primary key, since foreign keys may
# reference non-primary-keys of other tables (as long as they are unique)
new_pk_map[src_lookup_key] = dest_info
return_info.append(dest_info)
########################################################################
# Now handle children that are not specified by foreign keys
# This includes decoupled data like datasets, as well as when foreign
# keys are specified in json
#
# We do that here after adding. Some of these have foreign keys
# to this object, so we need the new id (retrieved through new_pk_map)
########################################################################
if real_orm_type in extra_children_map:
# The function called in extra_children_map may modify the object.
# We let the called function do that, then merge it back into the db
extra_children_map[real_orm_type](d, src_info, session_dest, session_src, new_pk_map, options, indent + ' ')
session_dest.commit()
########################################################################
# Now add tasks/services if this is a result/procedure
########################################################################
if issubclass(real_orm_type, BaseResultORM):
_add_tasks_and_services(src_info['id'], session_dest, session_src, new_pk_map, options, indent + ' ')
# If the caller specified single=True, should only be one record
if single:
if len(return_info) != 1:
raise RuntimeError(f'Wanted single record but got {len(return_info)} instead')
return return_info[0]
else:
return return_info
def general_copy(table_name,
storage_dest,
storage_src,
new_pk_map=None,
options={},
filter_by={},
order_by=None,
limit=None,
indent=''):
''' Copies data from the source db to the destination db
Given queries, copies all results of the query from session_src to session_dest
Handles copying of data required by foreign keys as well.
Parameters
----------
table_name : str
Name of the table to copy data from/to
storage_dest
Storage object to write data to
storage_src
Storage object to read data from
new_pk_map : dict
Where to store the mapping of old to new data
options : dict
Various options to be passed into the internal functions
filter_by : dict
Filters (column: value) to add to the query. ie, {'id': 123}
order_by: dict
How to order the results of the query. ie {'id': 'desc'}
limit : int
Limit the number of records returned
indent : str
Prefix to add to all printed output lines
'''
if new_pk_map is None:
new_pk_map = dict()
with storage_src.session_scope() as session_src:
with storage_dest.session_scope() as session_dest:
_general_copy(table_name,
session_dest,
session_src,
new_pk_map=new_pk_map,
options=options,
filter_by=filter_by,
order_by=order_by,
limit=limit,
indent=indent)
| |
import xml.dom.minidom as Dom
import time
import os
import signal
from xen.xend import XendDomain, XendNode, XendAPIStore, XendPIFMetrics
import logging
log = logging.getLogger("performance")
log.setLevel(logging.DEBUG)
file_handle = logging.FileHandler("/var/log/xen/performance.log")
log.addHandler(file_handle)
DOM0_UUID = "00000000-0000-0000-0000-000000000000"
class Performance1:
#file_path = "/opt/xen/performance/" + step + ".xml"
def __init__(self):
self.step = 15
self.file_path = "/opt/xen/performance/15sec.xml"
#self.file_path="/tmp/per.xml"
#self.domain = Domain()
#self.host = Host()
def collect(self):
self.collect_host()
self.collect_vms()
self.timestamp = int(time.time() * 1000)
def collect_vms(self):
self.domain = Domain()
self.vms = self.domain.get_running_domains()
self.vm_records = []
for vm in self.vms:
record = {}
record['uuid'] = self.domain.get_uuid(vm)
record['vcpus_num'] = self.domain.get_vcpus_num(vm)
record['vcpus_util'] = self.domain.get_vcpus_util(vm)
record['vifs_record'] = []
vif_number = 0
for vif in self.domain.get_vifs(vm):
vif_record = {}
vif_record['number'] = vif_number
vif_number += 1
vif_record['io_read_kbs'] = vm.get_dev_property('vif', vif, 'io_read_kbs')
vif_record['io_write_kbs'] = vm.get_dev_property('vif', vif, 'io_write_kbs')
record['vifs_record'].append(vif_record)
print record['vifs_record']
record['vbds_record'] = []
for vbd in self.domain.get_vbds(vm):
vbd_record = {}
vbd_record['device'] = vm.get_dev_property('vbd', vbd, 'device')
vbd_record['io_read_kbs'] = vm.get_dev_property('vbd', vbd, 'io_read_kbs')
vbd_record['io_write_kbs'] = vm.get_dev_property('vbd', vbd, 'io_write_kbs')
record['vbds_record'].append(vbd_record)
# memory
record['mem_cur'] = self.domain.get_memory_current(vm)
record['mem_max'] = self.domain.get_memory_max(vm)
try:
mem_free_file_path = "/opt/xen/performance/guest/"+record['uuid']+"/memory_free"
f = open(mem_free_file_path)
record['mem_free'] = float(f.readline())
f.close()
except:
record['mem_free'] = 100
# app type
app_type_dir = "/opt/xen/performance/guest/%s/apps/" % record['uuid']
shell_cmd = "ls -t %s | head -1" % app_type_dir
log.debug(shell_cmd)
#shell_cmd = "ls -t /opt/xen/performance/guest/%s/apps | head -1 | xargs cat" % record['uuid']
import subprocess
output = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).communicate()
app_type_file = output[0].strip()
if app_type_file:
app_type_path = app_type_dir + app_type_file
log.debug(app_type_path)
record['app_type'] = open(app_type_path).readline().strip()
else:
record['app_type'] = "UNKNOWN,UNRECOGNIZED"
self.vm_records.append(record)
#print self.vm_records
def collect_host(self):
self.host = Host()
self.host_uuid = self.host.get_uuid()
self.host_memory_total = self.host.get_memory_total()
self.host_memory_free = self.host.get_memory_free()
self.host_pifs = self.host.get_pifs()
self.host_pifs_devices = [self.host.get_pif_device(pif) for pif in self.host_pifs]
self.host_pifs_metrics = [self.host.get_pif_metrics(pif) for pif in self.host_pifs]
self.cpus = self.host.get_cpus()
self.cpu_utils = [self.host.get_cpu_util(cpu) for cpu in self.cpus]
def write(self):
# host
host_metrics = []
host_memory_total_str = "memory_total_kib:" + self.format(self.host_memory_total)
host_memory_free_str = "memory_free_kib:" + self.format(self.host_memory_free)
host_metrics.append(host_memory_total_str)
host_metrics.append(host_memory_free_str)
for i in range(len(self.host_pifs)):
host_pif_r_str = "pif_" + self.host_pifs_devices[i] + "_rx:" + \
self.format(self.host_pifs_metrics[i].get_io_read_kbs())
host_pif_w_str = "pif_" + self.host_pifs_devices[i] + "_tx:" + \
self.format(self.host_pifs_metrics[i].get_io_write_kbs())
host_metrics.append(host_pif_r_str)
host_metrics.append(host_pif_w_str)
for i in range(len(self.cpu_utils)):
host_cpu_util_str = "cpu" + str(i) + ":" + self.format(self.cpu_utils[i])
host_metrics.append(host_cpu_util_str)
# vms
vm_metrics_map = {}
for vm_record in self.vm_records:
vm_metrics = []
vm_prefix_str = "VM:" + vm_record['uuid']
for i in range(vm_record['vcpus_num']):
vm_cpu_str = "cpu" + str(i) + ":" + \
self.format(vm_record['vcpus_util'][str(i)])
vm_metrics.append(vm_cpu_str)
for vif_record in vm_record['vifs_record']:
vm_vif_r_str = "vif_" + str(vif_record['number']) + "_rx:" + \
self.format(vif_record['io_read_kbs'])
vm_vif_w_str = "vif_" + str(vif_record['number']) + "_tx:" + \
self.format(vif_record['io_write_kbs'])
vm_metrics.append(vm_vif_r_str)
vm_metrics.append(vm_vif_w_str)
for vbd_record in vm_record['vbds_record']:
vm_vbd_r_str = "vbd_" + str(vbd_record['device']) + "_read:" + \
self.format(vbd_record['io_read_kbs'])
vm_vbd_w_str = "vbd_" + str(vbd_record['device']) + "_write:" + \
self.format(vbd_record['io_write_kbs'])
vm_metrics.append(vm_vbd_r_str)
vm_metrics.append(vm_vbd_w_str)
vm_memory_cur_str = "memory:" + self.format(vm_record['mem_cur'])
vm_memory_max_str = "memory_target:" + self.format(vm_record['mem_max'])
vm_memory_free_str = "memory_internal_free:" + self.format(vm_record['mem_free'])
vm_metrics.append(vm_memory_cur_str)
vm_metrics.append(vm_memory_max_str)
vm_metrics.append(vm_memory_free_str)
vm_app_type_str = "app_type:" + vm_record['app_type']
vm_metrics.append(vm_app_type_str)
vm_metrics_map[vm_record['uuid']] = vm_metrics
import pprint
pprint.pprint(host_metrics)
pprint.pprint(vm_metrics_map)
import datetime
d0 = datetime.datetime.now()
# write to xml
doc = Dom.Document()
# create a row
row_node = doc.createElement('row')
time_node = doc.createElement('t')
time_text = doc.createTextNode(str(self.timestamp))
time_node.appendChild(time_text)
row_node.appendChild(time_node)
host_node = doc.createElement('host_'+self.host_uuid)
#host_id_node = doc.createElement("uuid")
#host_id_text = doc.createTextNode(self.host_uuid)
#host_id_node.appendChild(host_id_text)
#host_node.appendChild(host_id_node)
for value in host_metrics:
valueNode = doc.createElement('v')
valueText = doc.createTextNode(value)
valueNode.appendChild(valueText)
host_node.appendChild(valueNode)
row_node.appendChild(host_node)
for vm_uuid, vm_metrics in vm_metrics_map.items():
vm_node = doc.createElement('vm_'+vm_uuid)
#vm_id_node = doc.createElement("uuid")
#vm_id_text = doc.createTextNode(vm_uuid)
#vm_id_node.appendChild(vm_id_text)
#vm_node.appendChild(vm_id_node)
for value in vm_metrics:
valueNode = doc.createElement('v')
valueText = doc.createTextNode(value)
valueNode.appendChild(valueText)
vm_node.appendChild(valueNode)
row_node.appendChild(vm_node)
# create rows
if os.path.isfile(self.file_path):
old_doc = Dom.parse(self.file_path)
root_node = old_doc.documentElement
row_nodes = root_node.getElementsByTagName("row")
else:
row_nodes = []
row_nodes.append(row_node)
if len(row_nodes) > 100:
row_nodes = row_nodes[1:]
# create dom tree
root_node = doc.createElement('data')
doc.appendChild(root_node)
len_node = doc.createElement("length")
len_text = doc.createTextNode(str(len(row_nodes)))
len_node.appendChild(len_text)
root_node.appendChild(len_node)
for node in row_nodes:
root_node.appendChild(node)
d1 = datetime.datetime.now()
print "Time " + str(d1-d0)
# write to file
f = open(self.file_path+"tmp", "w")
f.write(doc.toprettyxml(indent = "", newl = "", encoding = "utf-8"))
f.close()
d1 = datetime.datetime.now()
os.system("mv %s %s" % (self.file_path+"tmp", self.file_path))
d2 = datetime.datetime.now()
print "time" + str(d2-d1)
def writeone(self):
# host
host_metrics = []
host_memory_total_str = "memory_total_kib:" + self.format(self.host_memory_total)
host_memory_free_str = "memory_free_kib:" + self.format(self.host_memory_free)
host_metrics.append(host_memory_total_str)
host_metrics.append(host_memory_free_str)
for i in range(len(self.host_pifs)):
host_pif_r_str = "pif_" + self.host_pifs_devices[i] + "_rx:" + \
self.format(self.host_pifs_metrics[i].get_io_read_kbs())
host_pif_w_str = "pif_" + self.host_pifs_devices[i] + "_tx:" + \
self.format(self.host_pifs_metrics[i].get_io_write_kbs())
host_metrics.append(host_pif_r_str)
host_metrics.append(host_pif_w_str)
for i in range(len(self.cpu_utils)):
host_cpu_util_str = "cpu" + str(i) + ":" + self.format(self.cpu_utils[i])
host_metrics.append(host_cpu_util_str)
# vms
vm_metrics_map = {}
for vm_record in self.vm_records:
vm_metrics = []
vm_prefix_str = "VM:" + vm_record['uuid']
for i in range(vm_record['vcpus_num']):
vm_cpu_str = "cpu" + str(i) + ":" + \
self.format(vm_record['vcpus_util'][str(i)])
vm_metrics.append(vm_cpu_str)
for vif_record in vm_record['vifs_record']:
vm_vif_r_str = "vif_" + str(vif_record['number']) + "_rx:" + \
self.format(vif_record['io_read_kbs'])
vm_vif_w_str = "vif_" + str(vif_record['number']) + "_tx:" + \
self.format(vif_record['io_write_kbs'])
vm_metrics.append(vm_vif_r_str)
vm_metrics.append(vm_vif_w_str)
for vbd_record in vm_record['vbds_record']:
vm_vbd_r_str = "vbd_" + str(vbd_record['device']) + "_read:" + \
self.format(vbd_record['io_read_kbs'])
vm_vbd_w_str = "vbd_" + str(vbd_record['device']) + "_write:" + \
self.format(vbd_record['io_write_kbs'])
vm_metrics.append(vm_vbd_r_str)
vm_metrics.append(vm_vbd_w_str)
vm_memory_cur_str = "memory:" + self.format(vm_record['mem_cur'])
vm_memory_max_str = "memory_target:" + self.format(vm_record['mem_max'])
vm_memory_free_str = "memory_internal_free:" + self.format(vm_record['mem_free'])
vm_metrics.append(vm_memory_cur_str)
vm_metrics.append(vm_memory_max_str)
vm_metrics.append(vm_memory_free_str)
vm_app_type_str = "app_type:" + vm_record['app_type']
vm_metrics.append(vm_app_type_str)
vm_metrics_map[vm_record['uuid']] = vm_metrics
import pprint
pprint.pprint(host_metrics)
pprint.pprint(vm_metrics_map)
import datetime
d0 = datetime.datetime.now()
# write to xml
doc = Dom.Document()
# create a row
row_node = doc.createElement('row')
time_node = doc.createElement('t')
time_text = doc.createTextNode(str(self.timestamp))
time_node.appendChild(time_text)
row_node.appendChild(time_node)
host_node = doc.createElement('host_'+self.host_uuid)
#host_id_node = doc.createElement("uuid")
#host_id_text = doc.createTextNode(self.host_uuid)
#host_id_node.appendChild(host_id_text)
#host_node.appendChild(host_id_node)
for value in host_metrics:
valueNode = doc.createElement('v')
valueText = doc.createTextNode(value)
valueNode.appendChild(valueText)
host_node.appendChild(valueNode)
row_node.appendChild(host_node)
for vm_uuid, vm_metrics in vm_metrics_map.items():
vm_node = doc.createElement('vm_'+vm_uuid)
#vm_id_node = doc.createElement("uuid")
#vm_id_text = doc.createTextNode(vm_uuid)
#vm_id_node.appendChild(vm_id_text)
#vm_node.appendChild(vm_id_node)
for value in vm_metrics:
valueNode = doc.createElement('v')
valueText = doc.createTextNode(value)
valueNode.appendChild(valueText)
vm_node.appendChild(valueNode)
row_node.appendChild(vm_node)
# create rows
#if os.path.isfile(self.file_path):
#old_doc = Dom.parse(self.file_path)
#root_node = old_doc.documentElement
#row_nodes = root_node.getElementsByTagName("row")
#else:
#row_nodes = []
#row_nodes.append(row_node)
#if len(row_nodes) > 100:
#row_nodes = row_nodes[1:]
# create dom tree
#root_node = doc.createElement('data')
doc.appendChild(row_node)
#len_node = doc.createElement("length")
#len_text = doc.createTextNode(str(len(row_nodes)))
#len_node.appendChild(len_text)
#root_node.appendChild(len_node)
#for node in row_nodes:
#root_node.appendChild(row_node)
d1 = datetime.datetime.now()
print "Time " + str(d1-d0)
# write to file self.timestamp
singlepath = "/opt/xen/performance/s"+str(self.timestamp)+".xml"
f = open(singlepath, "w")
f.write(doc.toprettyxml(indent = "", newl = "", encoding = "utf-8"))
f.close()
#d1 = datetime.datetime.now()
#os.system("mv %s %s" % (self.file_path+"tmp", self.file_path))
#d2 = datetime.datetime.now()
#print "time" + str(d2-d1)
def output(self):
print self.host_memory_total
print self.host_memory_free
for pif_devices in self.host_pifs_devices:
print pif_devices
for pif_metrics in self.host_pifs_metrics:
print pif_metrics.get_io_read_kbs()
print pif_metrics.get_io_write_kbs()
print pif_metrics.get_last_updated()
for cpu_util in self.cpu_utils:
print cpu_util
print "current_time"
print self.timestamp
def run(self):
# create xml
doc = Dom.Document()
# root
xport_node = doc.createElement("xport")
doc.appendChild(xport_node)
# meta and data
meta_node = doc.createElement("meta")
data_node = doc.createElement("data")
xport_node.appendChild(meta_node)
xport_node.appendChild(data_node)
# fill content
self.make_data_node(doc, data_node)
self.make_meta_node(doc, meta_node)
# write to file
f = open(self.file_path, "w")
f.write(doc.toprettyxml(indent = "", newl = "", encoding = "utf-8"))
f.close()
def make_meta_node(self, doc, meta):
start_node = doc.createElement("start")
start_text = doc.createTextNode(str(self.start_time / 1000))
start_node.appendChild(start_text)
step_node = doc.createElement("step")
step_text = doc.createTextNode(str(self.step))
step_node.appendChild(step_text)
end_node = doc.createElement("end")
end_text = doc.createTextNode(str(self.end_time / 1000))
end_node.appendChild(end_text)
meta.appendChild(start_node)
meta.appendChild(step_node)
meta.appendChild(end_node)
# entrys
legend_node = doc.createElement("legend")
self.make_legend_entrys(doc, legend_node)
rows_node = doc.createElement("rows")
rows_text = doc.createTextNode(str(len(legend_node.childNodes)))
rows_node.appendChild(rows_text)
meta.appendChild(rows_node)
columns_node = doc.createElement("columns")
columns_text = doc.createTextNode(str(self.columns_num))
columns_node.appendChild(columns_text)
meta.appendChild(columns_node)
meta.appendChild(legend_node)
def make_legend_entrys(self, doc, legend):
# Host
entry = self.make_entry(doc,"Host:" + self.host_uuid + ":memory_total_kib")
legend.appendChild(entry)
entry = self.make_entry(doc,"Host:" + self.host_uuid + ":memory_free_kib")
legend.appendChild(entry)
for pif_device in self.host_pifs_devices:
entry = self.make_entry(doc, "Host:" + self.host_uuid + ":pif_" + pif_device + "_rx")
legend.appendChild(entry)
entry = self.make_entry(doc, "Host:" + self.host_uuid + ":pif_" + pif_device + "_tx")
legend.appendChild(entry)
for i in range(len(self.cpus))[::-1]:
entry = self.make_entry(doc, "Host:" + self.host_uuid + ":cpu" + str(i))
legend.appendChild(entry)
# VM
for vm_record in self.vm_records:
for i in range(vm_record['vcpus_num'])[::-1]:
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":cpu" + str(i))
legend.appendChild(entry)
for vif_record in vm_record["vifs_record"]:
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":vif_" + str(vif_record['number']) + "_rx")
legend.appendChild(entry)
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":vif_" + str(vif_record['number']) + "_tx")
legend.appendChild(entry)
for vbd_record in vm_record["vbds_record"]:
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":vbd_" + str(vbd_record['device']) + "_read")
legend.appendChild(entry)
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":vbd_" + str(vbd_record['device']) + "_write")
legend.appendChild(entry)
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":memory")
legend.appendChild(entry)
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":memory_target")
legend.appendChild(entry)
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":memory_internal_free")
legend.appendChild(entry)
entry = self.make_entry(doc, "VM:" + vm_record['uuid'] + ":app_type")
legend.appendChild(entry)
def make_entry(self, doc, name):
entry_node = doc.createElement("entry")
entry_text = doc.createTextNode(name)
entry_node.appendChild(entry_text)
return entry_node
def make_data_node(self, doc, data):
if os.path.isfile(self.file_path):
old_doc = Dom.parse(self.file_path)
old_root = old_doc.documentElement
data_rows = old_root.getElementsByTagName("row")
else:
data_rows = []
data_rows.append(self.make_data_row(doc))
#print "data rows ", len(data_rows)
if len(data_rows) > 100:
del data_rows[0]
self.columns_num = len(data_rows)
self.start_time = self.get_timestamp(data_rows[0])
self.end_time = self.get_timestamp(data_rows[-1])
for row in data_rows:
data.appendChild(row)
def get_timestamp(self, row):
timestamp_node = row.getElementsByTagName("t")[0]
#print timestamp_node.childNodes
timestamp_text = timestamp_node.childNodes[0].nodeValue
return int(timestamp_text)
def make_data_row(self, doc):
row = doc.createElement("row")
time_node = doc.createElement("t")
time_text = doc.createTextNode(str(self.timestamp))
time_node.appendChild(time_text)
row.appendChild(time_node)
memory_total_node = doc.createElement("v")
memory_total_text = doc.createTextNode(self.format(self.host_memory_total))
memory_total_node.appendChild(memory_total_text)
row.appendChild(memory_total_node)
memory_free_node = doc.createElement("v")
memory_free_text = doc.createTextNode(self.format(self.host_memory_free))
memory_free_node.appendChild(memory_free_text)
row.appendChild(memory_free_node)
for pif_metrics in self.host_pifs_metrics:
pif_rx_node = doc.createElement("v")
pif_rx_text = doc.createTextNode(self.format(pif_metrics.get_io_read_kbs()))
pif_rx_node.appendChild(pif_rx_text)
pif_tx_node = doc.createElement("v")
pif_tx_text = doc.createTextNode(self.format(pif_metrics.get_io_write_kbs()))
pif_tx_node.appendChild(pif_tx_text)
row.appendChild(pif_rx_node)
row.appendChild(pif_tx_node)
#print len(self.cpu_utils)
for cpu_util in self.cpu_utils[::-1]:
cpu_util_node = doc.createElement("v")
cpu_util_text = doc.createTextNode(self.format(cpu_util))
cpu_util_node.appendChild(cpu_util_text)
row.appendChild(cpu_util_node)
for vm_record in self.vm_records:
for i in range(vm_record['vcpus_num'])[::-1]:
vcpu_util_node = doc.createElement("v")
vcpu_util_text = doc.createTextNode(self.format(vm_record['vcpus_util'][str(i)]))
vcpu_util_node.appendChild(vcpu_util_text)
row.appendChild(vcpu_util_node)
for vif_record in vm_record['vifs_record']:
vif_rx_node = doc.createElement("v")#vif_record['io_read_kbs']
vif_rx_text = doc.createTextNode(self.format(vif_record['io_read_kbs']))
vif_rx_node.appendChild(vif_rx_text)
vif_tx_node = doc.createElement("v")#vif_record['io_read_kbs']
vif_tx_text = doc.createTextNode(self.format(vif_record['io_write_kbs']))
vif_tx_node.appendChild(vif_tx_text)
row.appendChild(vif_rx_node)
row.appendChild(vif_tx_node)
for vbd_record in vm_record['vbds_record']:
vbd_rx_node = doc.createElement("v")
vbd_rx_text = doc.createTextNode(self.format(vbd_record['io_read_kbs']))
vbd_rx_node.appendChild(vbd_rx_text)
vbd_tx_node = doc.createElement("v")
vbd_tx_text = doc.createTextNode(self.format(vbd_record['io_write_kbs']))
vbd_tx_node.appendChild(vbd_tx_text)
row.appendChild(vbd_rx_node)
row.appendChild(vbd_tx_node)
memory_cur_node = doc.createElement("v")
memory_cur_text = doc.createTextNode(self.format(vm_record['mem_cur']))
memory_cur_node.appendChild(memory_cur_text)
row.appendChild(memory_cur_node)
memory_max_node = doc.createElement("v")
memory_max_text = doc.createTextNode(self.format(vm_record['mem_max']))
memory_max_node.appendChild(memory_max_text)
row.appendChild(memory_max_node)
memory_inter_free_node = doc.createElement("v")
memory_inter_free_text = doc.createTextNode(self.format(vm_record['mem_free']))
memory_inter_free_node.appendChild(memory_inter_free_text)
row.appendChild(memory_inter_free_node)
app_type_node = doc.createElement("v")
app_type_text = doc.createTextNode(vm_record['app_type'])
app_type_node.appendChild(app_type_text)
row.appendChild(app_type_node)
return row
def format(self, value):
return "%.4f" % value
class Host:
def __init__(self):
self.host_instance = XendNode.instance()
self.host_cpus = self.host_instance.get_host_cpu_refs()
pif_refs = self.host_instance.get_PIF_refs()
self.host_pifs = []
for pif_ref in pif_refs:
pif = XendAPIStore.get(pif_ref, "PIF")
self.host_pifs.append(pif)
def get_uuid(self):
return self.host_instance.uuid
def get_cpus(self):
return self.host_cpus
def get_cpu_util(self, cpu):
return self.host_instance.get_host_cpu_load(cpu)
def get_pifs(self):
return self.host_pifs
def get_pif_device(self, pif):
return pif.get_device()
def get_pif_metrics(self, pif):
return XendAPIStore.get(pif.get_metrics(), "PIF_metrics")
def get_ovs_:
def get_memory_total(self):
return self.host_instance.xc.physinfo()['total_memory']
def get_memory_free(self):
node = XendNode.instance()
xendom = XendDomain.instance()
doms = xendom.list()
doms_mem_total = 0
for dom in doms:
if cmp(dom.get_uuid(), DOM0_UUID) == 0:
continue
dominfo = xendom.get_vm_by_uuid(dom.get_uuid())
doms_mem_total += dominfo.get_memory_dynamic_max()
return (self.host_instance.xc.physinfo()['total_memory'] * 1024 - doms_mem_total)/1024
class Domain:
def __init__(self):
self.domain_instance = XendDomain.instance()
def get_running_domains(self):
#self.vm_refs = [d.get_uuid() for d in self.domain_instance.list('all')]
vms = self.domain_instance.list()
#[self.domain_instance.get_vm_by_uuid(ref) for ref in self.vm_refs]
#self.running_vms = [vm for vm in self.vms ]#if vm.get_power_state() == 'Running']
return vms[1:]
def get_uuid(self, vm):
return vm.get_uuid()
def get_vcpus_num(self, vm):
return XendAPIStore.get(vm.get_metrics(),"VM_metrics").get_VCPUs_number()
def get_vcpus_util(self, vm):
return vm.get_vcpus_util()
def get_vifs(self, vm):
vifs = vm.get_vifs()
print vifs
return vifs#vm.get_vifs()
def get_vbds(self, vm):
return vm.get_vbds()
def get_memory_current(self, vm):
return vm.get_memory_dynamic_max() / 1024
def get_memory_max(self, vm):
return vm.get_memory_static_max() / 1024
import threading
import time
class RunPerformance1(threading.Thread):
def run(self):
while True:
#
# if int(open("/etc/xen/per", "r").readline()) == 0:
# time.sleep(3)
# continue
#for i in range(1000):
p = Performance1()
p.collect()
# p.run()
p.writeone()
time.sleep(14)
def main():
rp = RunPerformance1()
rp.start()
if __name__ == '__main__':
main()
| |
import datetime
import hashlib
import random
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from registration.mail import send_templated_mail
try:
from django.utils.timezone import now as datetime_now
except ImportError:
datetime_now = datetime.datetime.now
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.username
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt + username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except User.DoesNotExist:
profile.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime_now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use email template:
``registration/activation_letter.html``
This template will be used for the subject and body of the
email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
send_templated_mail(email_template='registration/activation_letter.html',
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[self.user.email],
context=ctx_dict)
| |
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017,2018
"""
Testing support for streaming applications.
********
Overview
********
Allows testing of a streaming application by creation conditions
on streams that are expected to become valid during the processing.
`Tester` is designed to be used with Python's `unittest` module.
A complete application may be tested or fragments of it, for example a sub-graph can be tested
in isolation that takes input data and scores it using a model.
Supports execution of the application on
:py:const:`~streamsx.topology.context.ContextTypes.STREAMING_ANALYTICS_SERVICE`,
:py:const:`~streamsx.topology.context.ContextTypes.DISTRIBUTED`
or :py:const:`~streamsx.topology.context.ContextTypes.STANDALONE`.
A :py:class:`Tester` instance is created and associated with the :py:class:`Topology` to be tested.
Conditions are then created against streams, such as a stream must receive 10 tuples using
:py:meth:`~Tester.tuple_count`.
Here is a simple example that tests a filter correctly only passes tuples with values greater than 5::
import unittest
from streamsx.topology.topology import Topology
from streamsx.topology.tester import Tester
class TestSimpleFilter(unittest.TestCase):
def setUp(self):
# Sets self.test_ctxtype and self.test_config
Tester.setup_streaming_analytics(self)
def test_filter(self):
# Declare the application to be tested
topology = Topology()
s = topology.source([5, 7, 2, 4, 9, 3, 8])
s = s.filter(lambda x : x > 5)
# Create tester and assign conditions
tester = Tester(topology)
tester.contents(s, [7, 9, 8])
# Submit the application for test
# If it fails an AssertionError will be raised.
tester.test(self.test_ctxtype, self.test_config)
A stream may have any number of conditions and any number of streams may be tested.
A :py:meth:`~Tester.local_check` is supported where a method of the
unittest class is executed once the job becomes healthy. This performs
checks from the context of the Python unittest class, such as
checking external effects of the application or using the REST api to
monitor the application.
A test fails-fast if any of the following occur:
* Any condition fails. E.g. a tuple failing a :py:meth:`~Tester.tuple_check`.
* The :py:meth:`~Tester.local_check` (if set) raises an error.
* The job for the test:
* Fails to become healthy.
* Becomes unhealthy during the test run.
* Any processing element (PE) within the job restarts.
A test timeouts if it does not fail but its conditions do not become valid.
The timeout is not fixed as an absolute test run time, but as a time since "progress"
was made. This can allow tests to pass when healthy runs are run in a constrained
environment that slows execution. For example with a tuple count condition of ten,
progress is indicated by tuples arriving on a stream, so that as long as gaps
between tuples are within the timeout period the test remains running until ten tuples appear.
.. note:: The test timeout value is not configurable.
.. note:: The submitted job (application under test) has additional elements (streams & operators) inserted to implement the conditions. These are visible through various APIs including the Streams console raw graph view. Such elements are put into the `Tester` category.
.. note::
The package `streamsx.testing <https://pypi.org/project/streamsx.testing/>`_ provides `nose <https://pypi.org/project/nose>`_ plugins to provide control over tests without having to modify their source code.
.. versionchanged:: 1.9 - Python 2.7 supported (except with Streaming Analytics service).
"""
__all__ = ['Tester']
import streamsx.ec as ec
import streamsx.topology.context as stc
import csv
import os
import unittest
import logging
import collections
import pkg_resources
import platform
import threading
from streamsx.rest import StreamsConnection
from streamsx.rest import StreamingAnalyticsConnection
import streamsx.rest_primitives
from streamsx.topology.context import ConfigParams
import time
import json
import sys
import warnings
import streamsx.topology.tester_runtime as sttrt
import streamsx._streams._version
__version__ = streamsx._streams._version.__version__
_logger = logging.getLogger('streamsx.topology.test')
class _TestConfig(dict):
def __init__(self, test, entries=None):
super(_TestConfig, self).__init__()
self._test = test
if entries:
self.update(entries)
class Tester(object):
"""Testing support for a Topology.
Allows testing of a Topology by creating conditions against the contents
of its streams.
Conditions may be added to a topology at any time before submission.
If a topology is submitted directly to a context then the graph
is not modified. This allows testing code to be inserted while
the topology is being built, but not acted upon unless the topology
is submitted in test mode.
If a topology is submitted through the test method then the topology
may be modified to include operations to ensure the conditions are met.
.. warning::
For future compatibility applications under test should not include intended failures that cause
a processing element to stop or restart. Thus, currently testing is against expected application behavior.
Args:
topology: Topology to be tested.
"""
def __init__(self, topology):
self.topology = topology
topology.tester = self
self._conditions = {}
self.local_check = None
self._run_for = 0
@staticmethod
def _log_env(test, verbose):
streamsx._streams._version._mismatch_check(__name__)
if verbose:
_logger.propogate = False
_logger.setLevel(logging.DEBUG)
_logger.addHandler(logging.StreamHandler())
_logger.debug("Test:%s: PYTHONHOME=%s", test.id(), os.environ.get('PYTHONHOME', '<notset>'))
_logger.debug("Test:%s: sys.path=%s", test.id(), sys.path)
_logger.debug("Test:%s: tester.__file__=%s", test.id(), __file__)
srp = pkg_resources.working_set.find(pkg_resources.Requirement.parse('streamsx'))
if srp is None:
_logger.debug("Test:%s: streamsx not installed.", test.id())
else:
_logger.debug("Test:%s: %s installed at %s.", test.id(), srp, srp.location)
@staticmethod
def setup_standalone(test, verbose=None):
"""
Set up a unittest.TestCase to run tests using IBM Streams standalone mode.
Requires a local IBM Streams install define by the ``STREAMS_INSTALL``
environment variable. If ``STREAMS_INSTALL`` is not set, then the
test is skipped.
A standalone application under test will run until a condition
fails or all the streams are finalized or when the
:py:meth:`run_for` time (if set) elapses.
Applications that include infinite streams must include set a
run for time using :py:meth:`run_for` to ensure the test completes
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config- Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
verbose(bool): If `true` then the ``streamsx.topology.test`` logger is configured at ``DEBUG`` level with output sent to standard error.
Returns: None
"""
if not 'STREAMS_INSTALL' in os.environ:
raise unittest.SkipTest("Skipped due to no local IBM Streams install")
Tester._log_env(test, verbose)
test.test_ctxtype = stc.ContextTypes.STANDALONE
test.test_config = _TestConfig(test)
test.addCleanup(Tester._cleanup_config, test)
@staticmethod
def _cleanup_config(test):
if hasattr(test, 'test_ctxtype'): del test.test_ctxtype
if hasattr(test, 'test_config'): del test.test_config
@staticmethod
def get_streams_version(test):
""" Returns IBM Streams product version string for a test.
Returns the product version corresponding to the test's setup.
For ``STANDALONE`` and ``DISTRIBUTED`` the product version
corresponds to the version defined by the environment variable
``STREAMS_INSTALL``.
Args:
test(unittest.TestCase): Test case setup to run IBM Streams tests.
.. versionadded: 1.11
"""
if hasattr(test, 'test_ctxtype'):
if test.test_ctxtype == stc.ContextTypes.STANDALONE or test.test_ctxtype == stc.ContextTypes.DISTRIBUTED:
return Tester._get_streams_product_version()
if test.test_ctxtype == stc.ContextTypes.STREAMING_ANALYTICS_SERVICE:
sas = Tester._get_sas_conn(test.test_config)
return sas.get_instances()[0].activeVersion['productVersion']
raise ValueError('Tester has not been setup.')
@staticmethod
def _get_streams_product_version():
pvf = os.path.join(os.environ['STREAMS_INSTALL'], '.product')
vers={}
with open(pvf, "r") as cf:
eqc = b'=' if sys.version_info.major == 2 else '='
reader = csv.reader(cf, delimiter=eqc, quoting=csv.QUOTE_NONE)
for row in reader:
vers[row[0]] = row[1]
return vers['Version']
@staticmethod
def _minimum_streams_version(product_version, required_version):
rvrmf = required_version.split('.')
pvrmf = product_version.split('.')
for i in range(len(rvrmf)):
if i >= len(pvrmf):
return False
pi = int(pvrmf[i])
ri = int(rvrmf[i])
if pi < ri:
return False
if pi > ri:
return True
return True
@staticmethod
def minimum_streams_version(test, required_version):
""" Checks test setup matches a minimum required IBM Streams version.
Args:
test(unittest.TestCase): Test case setup to run IBM Streams tests.
required_version(str): VRMF of the minimum version the test requires. Examples are ``'4.3'``, ``4.2.4``.
Returns:
bool: True if the setup fulfills the minimum required version, false otherwise.
.. versionadded: 1.11
"""
return Tester._minimum_streams_version(Tester.get_streams_version(test), required_version)
@staticmethod
def require_streams_version(test, required_version):
"""Require a test has minimum IBM Streams version.
Skips the test if the test's setup is not at the required
minimum IBM Streams version.
Args:
test(unittest.TestCase): Test case setup to run IBM Streams tests.
required_version(str): VRMF of the minimum version the test requires. Examples are ``'4.3'``, ``4.2.4``.
.. versionadded: 1.11
"""
if not Tester.minimum_streams_version(test, required_version):
raise unittest.SkipTest("Skipped as test requires IBM Streams {0} but {1} is setup for {2}.".format(required_version, Tester.get_streams_version(test), test.test_ctxtype))
@staticmethod
def setup_distributed(test, verbose=None):
"""
Set up a unittest.TestCase to run tests using IBM Streams distributed mode.
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config - Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
verbose(bool): If `true` then the ``streamsx.topology.test`` logger is configured at ``DEBUG`` level with output sent to standard error.
Returns: None
.. rubric:: Cloud Pak for Data integrated instance configuration
These environment variables define how the test is built and submitted.
* ``CP4D_URL`` - Cloud Pak for Data deployment URL, e.g. `https://cp4d_server:31843`.
* ``STREAMS_INSTANCE_ID`` - Streams service instance name.
* ``STREAMS_USERNAME`` - (optional) User name to submit the test as, defaulting to the current operating system user name.
* ``STREAMS_PASSWORD`` - Password for authentication.
.. rubric:: Cloud Pak for Data standalone instance configuration
These environment variables define how the test is built and submitted.
* ``STREAMS_BUILD_URL`` - Endpoint for the Streams build service.
* ``STREAMS_REST_URL`` - Endpoint for the Streams SWS (REST) service.
* ``STREAMS_USERNAME`` - (optional) User name to submit the test as, defaulting to the current operating system user name.
* ``STREAMS_PASSWORD`` - Password for authentication.
.. rubric:: Streams 4.2 & 4.3 instance configuration
Requires a local IBM Streams install define by the ``STREAMS_INSTALL``
environment variable. If ``STREAMS_INSTALL`` is not set then the
test is skipped.
The Streams instance to use is defined by the environment variables:
* ``STREAMS_ZKCONNECT`` - Zookeeper connection string (optional)
* ``STREAMS_DOMAIN_ID`` - Domain identifier
* ``STREAMS_INSTANCE_ID`` - Instance identifier
The user used to submit and monitor the job is set by the
optional environment variables:
* ``STREAMS_USERNAME`` - User name defaulting to `streamsadmin`.
* ``STREAMS_PASSWORD`` - User password defaulting to `passw0rd`.
The defaults match the setup for testing on a IBM Streams Quick
Start Edition (QSE) virtual machine.
.. warning::
``streamtool`` is used to submit the job and requires that ``streamtool`` does not prompt for authentication. This is achieved by using ``streamtool genkey``.
.. seealso::
`Generating authentication keys for IBM Streams <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.1/com.ibm.streams.cfg.doc/doc/ibminfospherestreams-user-security-authentication-rsa.html>`_
"""
Tester._log_env(test, verbose)
test.test_ctxtype = stc.ContextTypes.DISTRIBUTED
test.test_config = _TestConfig(test)
# Distributed setup check is delayed until the test is run
# as the connection information can be in the service definition.
@staticmethod
def _check_setup_distributed(cfg):
if streamsx.rest_primitives.Instance._find_service_def(cfg):
return
domain_instance_setup = 'STREAMS_INSTANCE_ID' in os.environ and 'STREAMS_DOMAIN_ID' in os.environ
if domain_instance_setup:
if not 'STREAMS_INSTALL' in os.environ:
raise unittest.SkipTest("Skipped due to no local IBM Streams install")
return
icpd_integrated_setup = 'CP4D_URL' in os.environ and 'STREAMS_INSTANCE_ID' in os.environ and 'STREAMS_PASSWORD' in os.environ
if icpd_integrated_setup:
return
icpd_standalone_setup = 'STREAMS_BUILD_URL' in os.environ and 'STREAMS_REST_URL' in os.environ and 'STREAMS_PASSWORD' in os.environ
if icpd_standalone_setup:
return
raise unittest.SkipTest("No IBM Streams instance definition for DISTRIBUTED")
@staticmethod
def setup_streaming_analytics(test, service_name=None, force_remote_build=False, verbose=None):
"""
Set up a unittest.TestCase to run tests using Streaming Analytics service on IBM Cloud.
The service to use is defined by:
* VCAP_SERVICES environment variable containing `streaming_analytics` entries.
* service_name which defaults to the value of STREAMING_ANALYTICS_SERVICE_NAME environment variable.
If VCAP_SERVICES is not set or a service name is not defined, then the test is skipped.
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config - Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
service_name(str): Name of Streaming Analytics service to use. Must exist as an
entry in the VCAP services. Defaults to value of STREAMING_ANALYTICS_SERVICE_NAME environment variable.
force_remote_build(bool): Force use of the Streaming Analytics build service. If `false` and ``STREAMS_INSTALL`` is set then a local build will be used if the local environment is suitable for the service, otherwise the Streams application bundle is built using the build service.
verbose(bool): If `true` then the ``streamsx.topology.test`` logger is configured at ``DEBUG`` level with output sent to standard error.
If run with Python 2 the test is skipped,.
Returns: None
"""
if sys.version_info.major == 2:
raise unittest.SkipTest('Skipped due to running with Python 2')
if not 'VCAP_SERVICES' in os.environ:
raise unittest.SkipTest("Skipped due to VCAP_SERVICES environment variable not set")
test.test_ctxtype = stc.ContextTypes.STREAMING_ANALYTICS_SERVICE
if service_name is None:
service_name = os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None)
if service_name is None:
raise unittest.SkipTest("Skipped due to no service name supplied")
Tester._log_env(test, verbose)
test.test_config = _TestConfig(test, {'topology.service.name': service_name})
if force_remote_build:
test.test_config['topology.forceRemoteBuild'] = True
def add_condition(self, stream, condition):
"""Add a condition to a stream.
Conditions are normally added through :py:meth:`tuple_count`, :py:meth:`contents` or :py:meth:`tuple_check`.
This allows an additional conditions that are implementations of :py:class:`Condition`.
Args:
stream(Stream): Stream to be tested.
condition(Condition): Arbitrary condition.
Returns:
Stream: stream
"""
self._conditions[condition.name] = (stream, condition)
return stream
def tuple_count(self, stream, count, exact=True):
"""Test that a stream contains a number of tuples.
If `exact` is `True`, then condition becomes valid when `count`
tuples are seen on `stream` during the test. Subsequently if additional
tuples are seen on `stream` then the condition fails and can never
become valid.
If `exact` is `False`, then the condition becomes valid once `count`
tuples are seen on `stream` and remains valid regardless of
any additional tuples.
Args:
stream(Stream): Stream to be tested.
count(int): Number of tuples expected.
exact(bool): `True` if the stream must contain exactly `count`
tuples, `False` if the stream must contain at least `count` tuples.
Returns:
Stream: stream
"""
_logger.debug("Adding tuple count (%d) condition to stream %s.", count, stream)
name = stream.name + '_count'
if exact:
cond = sttrt._TupleExactCount(count, name)
cond._desc = "{0} stream expects tuple count equal to {1}.".format(stream.name, count)
else:
cond = sttrt._TupleAtLeastCount(count, name)
cond._desc = "'{0}' stream expects tuple count of at least {1}.".format(stream.name, count)
return self.add_condition(stream, cond)
def punct_count(self, stream, count, exact=True):
"""Test that a stream contains a number of window punctuations.
If `exact` is `True`, then condition becomes valid when `count`
punctuations are seen on `stream` during the test. Subsequently if additional
punctuations are seen on `stream` then the condition fails and can never
become valid.
If `exact` is `False`, then the condition becomes valid once `count`
punctuations are seen on `stream` and remains valid regardless of
any additional punctuations.
.. note::
Punctuation marks are in-band signals that are inserted between tuples in a stream. If sources or stream transforms insert window markers at all, and when they insert them depends on the source or the semantic of the stream transformation. One example is the :py:meth:`~Window.aggregate`, which inserts a window marker into the output stream after each aggregation.
Args:
stream(Stream): Stream to be tested.
count(int): Number of punctuations expected.
exact(bool): `True` if the stream must contain exactly `count`
punctuations, `False` if the stream must contain at least `count` punctuations.
Returns:
Stream: stream
"""
_logger.debug("Adding punctuations count (%d) condition to stream %s.", count, stream)
name = stream.name + '_punct_count'
if exact:
cond = sttrt._PunctExactCount(count, name)
cond._desc = "{0} stream expects punctuations count equal to {1}.".format(stream.name, count)
else:
cond = sttrt._PunctAtLeastCount(count, name)
cond._desc = "'{0}' stream expects punctuations count of at least {1}.".format(stream.name, count)
return self.add_condition(stream, cond)
def contents(self, stream, expected, ordered=True):
"""Test that a stream contains the expected tuples.
Args:
stream(Stream): Stream to be tested.
expected(list): Sequence of expected tuples.
ordered(bool): True if the ordering of received tuples must match expected.
Returns:
Stream: stream
"""
name = stream.name + '_contents'
if ordered:
cond = sttrt._StreamContents(expected, name)
cond._desc = "'{0}' stream expects tuple ordered contents: {1}.".format(stream.name, expected)
else:
cond = sttrt._UnorderedStreamContents(expected, name)
cond._desc = "'{0}' stream expects tuple unordered contents: {1}.".format(stream.name, expected)
return self.add_condition(stream, cond)
def resets(self, minimum_resets=10):
"""Create a condition that randomly resets consistent regions.
The condition becomes valid when each consistent region in the
application under test has been reset `minimum_resets` times
by the tester.
The resets are performed at arbitrary intervals scaled to the
period of the region (if it is periodically triggered).
.. note::
A region is reset by initiating a request though the Job Control Plane. The reset is not driven by any injected failure, such as a PE restart.
Args:
minimum_resets(int): Minimum number of resets for each region.
.. versionadded:: 1.11
"""
resetter = sttrt._Resetter(self.topology, minimum_resets=minimum_resets)
self.add_condition(None, resetter)
def tuple_check(self, stream, checker):
"""Check each tuple on a stream.
For each tuple ``t`` on `stream` ``checker(t)`` is called.
If the return evaluates to `False` then the condition fails.
Once the condition fails it can never become valid.
Otherwise the condition becomes or remains valid. The first
tuple on the stream makes the condition valid if the checker
callable evaluates to `True`.
The condition can be combined with :py:meth:`tuple_count` with
``exact=False`` to test a stream map or filter with random input data.
An example of combining `tuple_count` and `tuple_check` to test a filter followed
by a map is working correctly across a random set of values::
def rands():
r = random.Random()
while True:
yield r.random()
class TestFilterMap(unittest.testCase):
# Set up omitted
def test_filter(self):
# Declare the application to be tested
topology = Topology()
r = topology.source(rands())
r = r.filter(lambda x : x > 0.7)
r = r.map(lambda x : x + 0.2)
# Create tester and assign conditions
tester = Tester(topology)
# Ensure at least 1000 tuples pass through the filter.
tester.tuple_count(r, 1000, exact=False)
tester.tuple_check(r, lambda x : x > 0.9)
# Submit the application for test
# If it fails an AssertionError will be raised.
tester.test(self.test_ctxtype, self.test_config)
Args:
stream(Stream): Stream to be tested.
checker(callable): Callable that must evaluate to True for each tuple.
"""
name = stream.name + '_check'
cond = sttrt._TupleCheck(checker, name)
self.topology.graph.add_dependency(checker)
return self.add_condition(stream, cond)
def eventual_result(self, stream, checker):
"""Test a stream reaches a known result or state.
Creates a test condition that the tuples on a stream
eventually reach a known result or state. Each tuple
on `stream` results in a call to ``checker(tuple_)``.
The return from `checker` is handled as:
* ``None`` - The condition requires more tuples to become valid.
* `true value` - The condition has become valid.
* `false value` - The condition has failed. Once a condition has failed it can never become valid.
Thus `checker` is typically stateful and allows ensuring that
condition becomes valid from a set of input tuples. For example
in a financial application the application under test may need
to achieve a final known balance, but due to timings of windows the
number of tuples required to set the final balance may be variable.
Once the condition becomes valid any false value,
except ``None``, returned by processing of subsequent
tuples will cause the condition to fail.
Returning ``None`` effectively never changes the state of the condition.
Args:
stream(Stream): Stream to be tested.
checker(callable): Callable that returns evaluates the state of the stream with result to the result.
.. versionadded:: 1.11
"""
name = stream.name + '_eventual'
cond = sttrt._EventualResult(checker, name)
self.topology.graph.add_dependency(checker)
return self.add_condition(stream, cond)
def local_check(self, callable):
"""Perform local check while the application is being tested.
A call to `callable` is made after the application under test is submitted and becomes healthy.
The check is in the context of the Python runtime executing the unittest case,
typically the callable is a method of the test case.
The application remains running until all the conditions are met
and `callable` returns. If `callable` raises an error, typically
through an assertion method from `unittest` then the test will fail.
Used for testing side effects of the application, typically with `STREAMING_ANALYTICS_SERVICE`
or `DISTRIBUTED`. The callable may also use the REST api for context types that support
it to dynamically monitor the running application.
The callable can use `submission_result` and `streams_connection` attributes from :py:class:`Tester` instance
to interact with the job or the running Streams instance.
These REST binding classes can be obtained as follows:
* :py:class:`~streamsx.rest_primitives.Job` - ``tester.submission_result.job``
* :py:class:`~streamsx.rest_primitives.Instance` - ``tester.submission_result.job.get_instance()``
* :py:class:`~streamsx.rest.StreamsConnection` - ``tester.streams_connection``
Simple example of checking the job is healthy::
import unittest
from streamsx.topology.topology import Topology
from streamsx.topology.tester import Tester
class TestLocalCheckExample(unittest.TestCase):
def setUp(self):
Tester.setup_distributed(self)
def test_job_is_healthy(self):
topology = Topology()
s = topology.source(['Hello', 'World'])
self.tester = Tester(topology)
self.tester.tuple_count(s, 2)
# Add the local check
self.tester.local_check = self.local_checks
# Run the test
self.tester.test(self.test_ctxtype, self.test_config)
def local_checks(self):
job = self.tester.submission_result.job
self.assertEqual('healthy', job.health)
.. warning::
A local check must not cancel the job (application under test).
.. warning::
A local check is not supported in standalone mode.
Args:
callable: Callable object.
"""
self.local_check = callable
def run_for(self, duration):
"""Run the test for a minimum number of seconds.
Creates a test wide condition that becomes `valid` when the
application under test has been running for `duration` seconds.
Maybe be called multiple times, the test will run as long as the maximum value provided.
Can be used to test applications without any externally visible
streams, or streams that do not have testable conditions. For
example a complete application may be tested by runnning it for
for ten minutes and use :py:meth:`local_check` to test
any external impacts, such as messages published to a
message queue system.
Args:
duration(float): Minimum number of seconds the test will run for.
.. versionadded: 1.9
"""
self._run_for = max(self._run_for, float(duration))
def test(self, ctxtype, config=None, assert_on_fail=True, username=None, password=None, always_collect_logs=False):
"""Test the topology.
Submits the topology for testing and verifies the test conditions are met and the job remained healthy through its execution.
The submitted application (job) is monitored for the test conditions and
will be canceled when all the conditions are valid or at least one failed.
In addition if a local check was specified using :py:meth:`local_check` then
that callable must complete before the job is cancelled.
The test passes if all conditions became valid and the local check callable (if present) completed without
raising an error.
The test fails if the job is unhealthy, any condition fails or the local check callable (if present) raised an exception.
In the event that the test fails when submitting to the `STREAMING_ANALYTICS_SERVICE` context, the application logs are retrieved as
a tar file and are saved to the current working directory. The filesystem path to the application logs is saved in the
tester's result object under the `application_logs` key, i.e. `tester.result['application_logs']`
Args:
ctxtype(str): Context type for submission.
config: Configuration for submission.
assert_on_fail(bool): True to raise an assertion if the test fails, False to return the passed status.
username(str): **Deprecated**
password(str): **Deprecated**
always_collect_logs(bool): True to always collect the console log and PE trace files of the test.
Attributes:
result: The result of the test. This can contain exit codes, application log paths, or other relevant test information.
submission_result: Result of the application submission from :py:func:`~streamsx.topology.context.submit`.
streams_connection(StreamsConnection): Connection object that can be used to interact with the REST API of
the Streaming Analytics service or instance.
Returns:
bool: `True` if test passed, `False` if test failed if `assert_on_fail` is `False`.
.. deprecated:: 1.8.3
``username`` and ``password`` parameters. When required for
a distributed test use the environment variables
``STREAMS_USERNAME`` and ``STREAMS_PASSWORD`` to define
the Streams user.
"""
if username or password:
warnings.warn("Set username and password with environment variables", DeprecationWarning, stacklevel=2)
if config is None:
config = {}
config['topology.alwaysCollectLogs'] = always_collect_logs
config['originator'] = 'tester-' + __version__ + ':python-' + platform.python_version()
# Look for streamsx.testing plugins
# Each action that plugin attached to the test is
# called passing Tester, TestCase, context type and config
if isinstance(config, _TestConfig):
test_ = config._test
actions = test_._streamsx_testing_actions if hasattr(test_, '_streamsx_testing_actions') else None
if actions:
for action in actions:
_logger.debug("Adding nose plugin action %s to topology %s.", str(action), self.topology.name)
action(self, test_, ctxtype, config)
if stc.ContextTypes.DISTRIBUTED == ctxtype:
Tester._check_setup_distributed(config)
# Add the conditions into the graph as sink operators
_logger.debug("Adding conditions to topology %s.", self.topology.name)
for ct in self._conditions.values():
condition = ct[1]
stream = ct[0]
condition._attach(stream)
# Standalone uses --kill-after parameter.
if self._run_for and stc.ContextTypes.STANDALONE != ctxtype:
rfn = 'run_for_' + str(int(self._run_for)) + 's'
run_cond = sttrt._RunFor(self._run_for, rfn)
self.add_condition(None, run_cond)
cond_run_time = self.topology.source(run_cond, name=rfn)
cond_run_time.category = 'Tester'
cond_run_time._op()._layout(hidden=True)
_logger.debug("Starting test topology %s context %s.", self.topology.name, ctxtype)
if stc.ContextTypes.STANDALONE == ctxtype:
passed = self._standalone_test(config)
elif stc.ContextTypes.DISTRIBUTED == ctxtype:
passed = self._distributed_test(config, username, password)
elif stc.ContextTypes.STREAMING_ANALYTICS_SERVICE == ctxtype:
passed = self._streaming_analytics_test(ctxtype, config)
else:
raise NotImplementedError("Tester context type not implemented:", ctxtype)
if hasattr(self, 'result') and self.result.get('conditions'):
for cn,cnr in self.result['conditions'].items():
c = self._conditions[cn][1]
cdesc = cn
if hasattr(c, '_desc'):
cdesc = c._desc
if 'Fail' == cnr:
_logger.error("Condition: %s : %s", cnr, cdesc)
elif 'NotValid' == cnr:
_logger.warning("Condition: %s : %s", cnr, cdesc)
elif 'Valid' == cnr:
_logger.info("Condition: %s : %s", cnr, cdesc)
if assert_on_fail:
assert passed, "Test failed for topology: " + self.topology.name
if passed:
_logger.info("Test topology %s passed for context:%s", self.topology.name, ctxtype)
else:
_logger.error("Test topology %s failed for context:%s", self.topology.name, ctxtype)
return passed
def _standalone_test(self, config):
""" Test using STANDALONE.
Success is solely indicated by the process completing and returning zero.
"""
if self._run_for:
config = config.copy()
config['topology.standaloneRunTime'] = self._run_for + 5.0
sr = stc.submit(stc.ContextTypes.STANDALONE, self.topology, config)
self.submission_result = sr
self.result = {'passed': sr['return_code'], 'submission_result': sr}
return sr['return_code'] == 0
def _distributed_test(self, config, username, password):
sjr = stc.submit(stc.ContextTypes.DISTRIBUTED, self.topology, config)
self.submission_result = sjr
if sjr['return_code'] != 0:
_logger.error("Failed to submit job to distributed instance.")
return False
self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self.streams_connection is None:
self.streams_connection = self.submission_result.job.rest_client._sc
return self._distributed_wait_for_result(stc.ContextTypes.DISTRIBUTED, config)
def _streaming_analytics_test(self, ctxtype, config):
sjr = stc.submit(ctxtype, self.topology, config)
self.submission_result = sjr
self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self.streams_connection is None:
self.streams_connection = Tester._get_sas_conn(config)
if sjr['return_code'] != 0:
_logger.error("Failed to submit job to Streaming Analytics instance")
return False
return self._distributed_wait_for_result(ctxtype, config)
@staticmethod
def _get_sas_conn(config):
vcap_services = config.get(ConfigParams.VCAP_SERVICES)
service_name = config.get(ConfigParams.SERVICE_NAME)
return StreamingAnalyticsConnection(vcap_services, service_name)
def _distributed_wait_for_result(self, ctxtype, config):
cc = _ConditionChecker(self, self.streams_connection, self.submission_result)
# Wait for the job to be healthy before calling the local check.
if cc._wait_for_healthy():
self._start_local_check()
self.result = cc._complete()
if self.local_check is not None:
self._local_thread.join()
else:
_logger.error ("Job %s Wait for healthy failed", cc._job_id)
self.result = cc._end(False, _ConditionChecker._UNHEALTHY)
self.result['submission_result'] = self.submission_result
if not self.result['passed'] or config['topology.alwaysCollectLogs']:
path = self._fetch_application_logs(ctxtype)
self.result['application_logs'] = path
cc._canceljob(self.result)
if hasattr(self, 'local_check_exception') and self.local_check_exception is not None:
raise self.local_check_exception
return self.result['passed']
def _fetch_application_logs(self, ctxtype):
# Fetch the logs if submitting to a Streaming Analytics Service
if stc.ContextTypes.STREAMING_ANALYTICS_SERVICE == ctxtype or stc.ContextTypes.DISTRIBUTED == ctxtype:
application_logs = self.submission_result.job.retrieve_log_trace()
if application_logs is not None:
_logger.info("Application logs have been fetched to " + application_logs)
else:
_logger.warning("Fetching job application logs is not supported in this version of Streams.")
return application_logs
def _start_local_check(self):
self.local_check_exception = None
if self.local_check is None:
return
self._local_thread = threading.Thread(target=self._call_local_check)
self._local_thread.start()
def _call_local_check(self):
try:
self.local_check_value = self.local_check()
except Exception as e:
self.local_check_value = None
self.local_check_exception = e
# Stop nose from seeing tha Tester.test is a test (#1266)
Tester.__test__ = False
#######################################
# Internal functions
#######################################
def _result_to_dict(passed, t):
result = {}
result['passed'] = passed
result['valid'] = t[0]
result['fail'] = t[1]
result['progress'] = t[2]
result['conditions'] = t[3]
return result
class _ConditionChecker(object):
# Return from _check_once
# (valid, fail, progress, condition_states)
_UNHEALTHY = (False, True, False, None)
def __init__(self, tester, sc, sjr):
self.tester = tester
self._sc = sc
self._sjr = sjr
self._job_id = sjr['jobId']
self._sequences = {}
for cn in tester._conditions:
self._sequences[cn] = -1
self.delay = 1.0
self.timeout = 30.0
self.waits = 0
self.additional_checks = 2
self.job = self._sjr.job
# Wait for job to be healthy. Returns True
# if the job became healthy, False if not.
def _wait_for_healthy(self):
ok_pes = 0
while (self.waits * self.delay) < self.timeout:
ok_ = self._check_job_health(start=True)
if ok_ is True:
self.waits = 0
return True
if ok_ is False: # actually failed
_logger.error ("Job %s wait for healthy actually failed", self._job_id)
return False
# ok_ is number of ok PEs
if ok_ <= ok_pes:
self.waits += 1
else:
# making progress so don't move towards
# the timeout
self.waits = 0
ok_pes = ok_
time.sleep(self.delay)
else:
_logger.error ("Job %s Timed out waiting for healthy", self._job_id)
return self._check_job_health(verbose=True)
def _complete(self):
while (self.waits * self.delay) < self.timeout:
check = self._check_once()
if check[1]:
return self._end(False, check)
if check[0]:
if self.additional_checks == 0:
return self._end(True, check)
self.additional_checks -= 1
continue
if check[2]:
self.waits = 0
else:
self.waits += 1
time.sleep(self.delay)
else:
_logger.error("Job %s Timed out waiting for test to complete", self._job_id)
return self._end(False, check)
def _end(self, passed, check):
result = _result_to_dict(passed, check)
return result
def _canceljob(self, result):
if self.job is not None:
self.job.cancel(force=not result['passed'])
def _check_once(self):
if not self._check_job_health(verbose=True):
return _ConditionChecker._UNHEALTHY
cms = self._get_job_metrics()
valid = True
progress = False
fail = False
condition_states = {}
for cn in self._sequences:
condition_states[cn] = 'NotValid'
seq_mn = sttrt.Condition._mn('seq', cn)
# If the metrics are missing then the operator
# is probably still starting up, cannot be valid.
if not seq_mn in cms:
valid = False
continue
seq_m = cms[seq_mn]
if seq_m.value != self._sequences[cn]:
# At least one condition making progress
progress = True
self._sequences[cn] = seq_m.value
fail_mn = sttrt.Condition._mn('fail', cn)
if not fail_mn in cms:
valid = False
continue
fail_m = cms[fail_mn]
if fail_m.value != 0:
fail = True
condition_states[cn] = 'Fail'
continue
valid_mn = sttrt.Condition._mn('valid', cn)
if not valid_mn in cms:
valid = False
continue
valid_m = cms[valid_mn]
if valid_m.value == 0:
valid = False
else:
condition_states[cn] = 'Valid'
return valid, fail, progress, condition_states
def _check_job_health(self, start=False, verbose=False):
self.job.refresh()
ok_ = self.job.health == 'healthy'
if not ok_:
if verbose:
_logger.error("Job %s (%s) health:%s", self.job.name, self._job_id, self.job.health)
if not start:
return False
ok_pes = 0
pes = self.job.get_pes()
if verbose:
_logger.info("Job %s health:%s PE count:%d", self.job.name, self.job.health, len(pes))
for pe in pes:
if pe.launchCount == 0:
if verbose:
_logger.warn("Job %s PE %s launch count == 0", self._job_id, pe.id)
continue # not a test failure, but not an ok_pe either
if pe.launchCount > 1:
if verbose or start:
_logger.error("Job %s PE %s launch count > 1: %s", self._job_id, pe.id, pe.launchCount)
return False
if pe.health != 'healthy':
if verbose:
_logger.error("Job %s PE %s health: %s", self._job_id, pe.id, pe.health)
if not start:
return False
else:
if verbose:
_logger.info("Job %s PE %s health: %s", self._job_id, pe.id, pe.health)
ok_pes += 1
return True if ok_ else ok_pes
def _get_job_metrics(self):
"""Fetch all the condition metrics for a job.
We refetch the metrics each time to ensure that we don't miss
any being added, e.g. if an operator is slow to start.
"""
cms = {}
for op in self.job.get_operators():
metrics = op.get_metrics(name=sttrt.Condition._METRIC_PREFIX + '*')
for m in metrics:
cms[m.name] = m
return cms
| |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import *
from ambari_commons import OSCheck
from mock.mock import MagicMock, patch
from resource_management.core import shell
class TestZkfc(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
STACK_VERSION = "2.0.6"
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
classname = "ZkfcSlave",
command = "start",
config_file = "ha_default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
create_parents = True,
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
create_parents = True,
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
to = '/usr/lib/hadoop/lib/libsnappy.so',
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
to = '/usr/lib/hadoop/lib64/libsnappy.so',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
content = Template('hdfs.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
mode = 0644
)
self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
content = Template('slaves.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755
)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
owner = 'hdfs',
create_parents = True,
group = 'hadoop'
)
self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
owner = 'hdfs',
create_parents = True,
group = 'hadoop'
)
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
classname = "ZkfcSlave",
command = "stop",
config_file = "ha_default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid")
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid', action = ['delete'])
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
classname = "ZkfcSlave",
command = "start",
config_file = "ha_secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
create_parents = True,
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
create_parents = True,
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
to = '/usr/lib/hadoop/lib/libsnappy.so',
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
to = '/usr/lib/hadoop/lib64/libsnappy.so',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
content = Template('hdfs.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/hdfs_dn_jaas.conf',
content = Template('hdfs_dn_jaas.conf.j2'),
owner = 'hdfs',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/hdfs_nn_jaas.conf',
content = Template('hdfs_nn_jaas.conf.j2'),
owner = 'hdfs',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/hdfs_jn_jaas.conf',
content = Template('hdfs_jn_jaas.conf.j2'),
owner = 'hdfs',
group = 'hadoop',
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
mode = 0644
)
self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
content = Template('slaves.j2'),
owner = 'root',
)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755
)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
owner = 'hdfs',
create_parents = True,
group = 'hadoop'
)
self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
owner = 'hdfs',
create_parents = True,
group = 'hadoop'
)
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
classname = "ZkfcSlave",
command = "stop",
config_file = "ha_secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid")
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid', action = ['delete'])
self.assertNoMoreResources()
def test_start_with_ha_active_namenode_bootstrap(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
classname = "ZkfcSlave",
command = "start",
config_file="ha_bootstrap_active_node.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
create_parents = True,
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
create_parents = True,
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
to = '/usr/lib/hadoop/lib/libsnappy.so',
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
to = '/usr/lib/hadoop/lib64/libsnappy.so',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
content = Template('hdfs.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
mode = 0644
)
self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
content = Template('slaves.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755
)
# TODO: verify that the znode initialization occurs prior to ZKFC startup
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
owner = 'hdfs',
create_parents = True,
group = 'hadoop'
)
self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
owner = 'hdfs',
create_parents = True,
group = 'hadoop'
)
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertNoMoreResources()
def test_start_with_ha_standby_namenode_bootstrap(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
classname = "ZkfcSlave",
command = "start",
config_file="ha_bootstrap_standby_node.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
create_parents = True,
)
self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
create_parents = True,
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
to = '/usr/lib/hadoop/lib/libsnappy.so',
)
self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
to = '/usr/lib/hadoop/lib64/libsnappy.so',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
content = Template('hdfs.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
mode = 0644
)
self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
content = Template('slaves.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755
)
# TODO: verify that the znode initialization occurs prior to ZKFC startup
self.assertResourceCalled('Directory', '/var/run/hadoop',
owner = 'hdfs',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
owner = 'hdfs',
create_parents = True,
group = 'hadoop'
)
self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
owner = 'hdfs',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
action = ['delete'],
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
)
self.assertNoMoreResources()
| |
# -*- coding: utf-8 -*-
import datetime
import os
import six
from .model_base import AccessControlledModel
from girder.constants import AccessType
from girder.exceptions import ValidationException
from girder.settings import SettingKey
from girder.utility.progress import noProgress
class Collection(AccessControlledModel):
"""
Collections are the top level roots of the data hierarchy. They are used
to group and organize data that is meant to be shared amongst users.
"""
def initialize(self):
self.name = 'collection'
self.ensureIndices(['name'])
self.ensureTextIndex({
'name': 10,
'description': 1
})
self.exposeFields(level=AccessType.READ, fields={
'_id',
'name',
'description',
'public',
'publicFlags',
'created',
'updated',
'size',
'meta'
})
def validate(self, doc):
doc['name'] = doc['name'].strip()
if doc['description']:
doc['description'] = doc['description'].strip()
if not doc['name']:
raise ValidationException(
'Collection name must not be empty.', 'name')
# Ensure unique name for the collection
q = {
'name': doc['name']
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
duplicate = self.findOne(q, fields=['_id'])
if duplicate is not None:
raise ValidationException('A collection with that name already '
'exists.', 'name')
doc['lowerName'] = doc['name'].lower()
return doc
def remove(self, collection, progress=None, **kwargs):
"""
Delete a collection recursively.
:param collection: The collection document to delete.
:type collection: dict
:param progress: A progress context to record progress on.
:type progress: girder.utility.progress.ProgressContext or None.
"""
from .folder import Folder
folderModel = Folder()
folders = folderModel.find({
'parentId': collection['_id'],
'parentCollection': 'collection'
})
for folder in folders:
folderModel.remove(folder, progress=progress, **kwargs)
# Delete this collection
AccessControlledModel.remove(self, collection)
if progress:
progress.update(increment=1, message='Deleted collection ' + collection['name'])
def createCollection(self, name, creator=None, description='', public=True,
reuseExisting=False):
"""
Create a new collection.
:param name: The name of the collection. Must be unique.
:type name: str
:param description: Description for the collection.
:type description: str
:param public: Public read access flag.
:type public: bool
:param creator: The user who is creating this collection.
:type creator: dict
:param reuseExisting: If a collection with the given name already exists
return that collection rather than creating a new one.
:type reuseExisting: bool
:returns: The collection document that was created.
"""
if reuseExisting:
existing = self.findOne({
'name': name
})
if existing:
return existing
now = datetime.datetime.utcnow()
collection = {
'name': name,
'description': description,
'creatorId': creator['_id'] if creator else None,
'created': now,
'updated': now,
'size': 0,
'meta': {}
}
self.setPublic(collection, public, save=False)
if creator:
self.setUserAccess(
collection, user=creator, level=AccessType.ADMIN, save=False)
return self.save(collection)
def updateCollection(self, collection):
"""
Updates a collection.
:param collection: The collection document to update
:type collection: dict
:returns: The collection document that was edited.
"""
collection['updated'] = datetime.datetime.utcnow()
# Validate and save the collection
return self.save(collection)
def load(self, id, level=AccessType.ADMIN, user=None, objectId=True,
force=False, fields=None, exc=False):
"""
Calls AccessControlMixin.load, and if no meta field is present,
adds an empty meta field and saves.
Takes the same parameters as
:py:func:`girder.models.model_base.AccessControlMixin.load`.
"""
doc = super(Collection, self).load(
id=id, level=level, user=user, objectId=objectId, force=force, fields=fields,
exc=exc)
if doc is not None:
if 'meta' not in doc:
doc['meta'] = {}
self.update({'_id': doc['_id']}, {'$set': {
'meta': doc['meta']
}})
return doc
def filter(self, doc, user=None, additionalKeys=None):
"""
Overrides the parent ``filter`` method to add an empty meta field
(if it doesn't exist) to the returned collection.
"""
filteredDoc = super(Collection, self).filter(doc, user, additionalKeys=additionalKeys)
if 'meta' not in filteredDoc:
filteredDoc['meta'] = {}
return filteredDoc
def setMetadata(self, collection, metadata, allowNull=False):
"""
Set metadata on an collection. A `ValidationException` is thrown in the
cases where the metadata JSON object is badly formed, or if any of the
metadata keys contains a period ('.').
:param collection: The collection to set the metadata on.
:type collection: dict
:param metadata: A dictionary containing key-value pairs to add to
the collection's meta field
:type metadata: dict
:param allowNull: Whether to allow `null` values to be set in the collection's
metadata. If set to `False` or omitted, a `null` value will cause that
metadata field to be deleted.
:returns: the collection document
"""
if 'meta' not in collection:
collection['meta'] = {}
# Add new metadata to existing metadata
collection['meta'].update(six.viewitems(metadata))
# Remove metadata fields that were set to null (use items in py3)
if not allowNull:
toDelete = [k for k, v in six.viewitems(metadata) if v is None]
for key in toDelete:
del collection['meta'][key]
self.validateKeys(collection['meta'])
collection['updated'] = datetime.datetime.utcnow()
# Validate and save the collection
return self.save(collection)
def deleteMetadata(self, collection, fields):
"""
Delete metadata on an collection. A `ValidationException` is thrown if the
metadata field names contain a period ('.') or begin with a dollar sign
('$').
:param collection: The collection to delete metadata from.
:type collection: dict
:param fields: An array containing the field names to delete from the
collection's meta field
:type field: list
:returns: the collection document
"""
self.validateKeys(fields)
if 'meta' not in collection:
collection['meta'] = {}
for field in fields:
collection['meta'].pop(field, None)
collection['updated'] = datetime.datetime.utcnow()
return self.save(collection)
def fileList(self, doc, user=None, path='', includeMetadata=False,
subpath=True, mimeFilter=None, data=True):
"""
This function generates a list of 2-tuples whose first element is the
relative path to the file from the collection's root and whose second
element depends on the value of the `data` flag. If `data=True`, the
second element will be a generator that will generate the bytes of the
file data as stored in the assetstore. If `data=False`, the second
element is the file document itself.
:param doc: the collection to list.
:param user: a user used to validate data that is returned.
:param path: a path prefix to add to the results.
:param includeMetadata: if True and there is any metadata, include a
result which is the JSON string of the
metadata. This is given a name of
metadata[-(number).json that is distinct from
any file within the item.
:param subpath: if True, add the collection's name to the path.
:param mimeFilter: Optional list of MIME types to filter by. Set to
None to include all files.
:type mimeFilter: `list or tuple`
:param data: If True return raw content of each file as stored in the
assetstore, otherwise return file document.
:type data: bool
"""
from .folder import Folder
if subpath:
path = os.path.join(path, doc['name'])
folderModel = Folder()
# Eagerly evaluate this list, as the MongoDB cursor can time out on long requests
childFolders = list(folderModel.childFolders(
parentType='collection', parent=doc, user=user,
fields=['name'] + (['meta'] if includeMetadata else [])
))
for folder in childFolders:
for (filepath, file) in folderModel.fileList(
folder, user, path, includeMetadata, subpath=True,
mimeFilter=mimeFilter, data=data):
yield (filepath, file)
def subtreeCount(self, doc, includeItems=True, user=None, level=None):
"""
Return the size of the folders within the collection. The collection
is counted as well.
:param doc: The collection.
:param includeItems: Whether items should be included in the count.
:type includeItems: bool
:param user: If filtering by permission, the user to filter against.
:param level: If filtering by permission, the required permission level.
:type level: AccessLevel
"""
from .folder import Folder
count = 1
folderModel = Folder()
folders = folderModel.findWithPermissions({
'parentId': doc['_id'],
'parentCollection': 'collection'
}, fields='access', user=user, level=level)
count += sum(folderModel.subtreeCount(
folder, includeItems=includeItems, user=user, level=level)
for folder in folders)
return count
def setAccessList(self, doc, access, save=False, recurse=False, user=None,
progress=noProgress, setPublic=None, publicFlags=None, force=False):
"""
Overrides AccessControlledModel.setAccessList to add a recursive
option. When `recurse=True`, this will set the access list on all
subfolders to which the given user has ADMIN access level. Any
subfolders that the given user does not have ADMIN access on will be
skipped.
:param doc: The collection to set access settings on.
:type doc: collection
:param access: The access control list.
:type access: dict
:param save: Whether the changes should be saved to the database.
:type save: bool
:param recurse: Whether this access list should be propagated to all
folders underneath this collection.
:type recurse: bool
:param user: The current user (for recursive mode filtering).
:param progress: Progress context to update.
:type progress: :py:class:`girder.utility.progress.ProgressContext`
:param setPublic: Pass this if you wish to set the public flag on the
resources being updated.
:type setPublic: bool or None
:param publicFlags: Pass this if you wish to set the public flag list on
resources being updated.
:type publicFlags: flag identifier str, or list/set/tuple of them, or None
:param force: Set this to True to set the flags regardless of the passed in
user's permissions.
:type force: bool
"""
progress.update(increment=1, message='Updating ' + doc['name'])
if setPublic is not None:
self.setPublic(doc, setPublic, save=False)
if publicFlags is not None:
doc = self.setPublicFlags(doc, publicFlags, user=user, save=False, force=force)
doc = AccessControlledModel.setAccessList(
self, doc, access, user=user, save=save, force=force)
if recurse:
from .folder import Folder
folderModel = Folder()
folders = folderModel.findWithPermissions({
'parentId': doc['_id'],
'parentCollection': 'collection'
}, user=user, level=AccessType.ADMIN)
for folder in folders:
folderModel.setAccessList(
folder, access, save=True, recurse=True, user=user,
progress=progress, setPublic=setPublic, publicFlags=publicFlags)
return doc
def hasCreatePrivilege(self, user):
"""
Tests whether a given user has the authority to create collections on
this instance. This is based on the collection creation policy settings.
By default, only admins are allowed to create collections.
:param user: The user to test.
:returns: bool
"""
from .setting import Setting
if user['admin']:
return True
policy = Setting().get(SettingKey.COLLECTION_CREATE_POLICY)
if policy['open'] is True:
return True
if user['_id'] in policy.get('users', ()):
return True
if set(policy.get('groups', ())) & set(user.get('groups', ())):
return True
return False
def countFolders(self, collection, user=None, level=None):
"""
Returns the number of top level folders under this collection. Access
checking is optional; to circumvent access checks, pass ``level=None``.
:param collection: The collection.
:type collection: dict
:param user: If performing access checks, the user to check against.
:type user: dict or None
:param level: The required access level, or None to return the raw
top-level folder count.
"""
from .folder import Folder
fields = () if level is None else ('access', 'public')
folderModel = Folder()
folders = folderModel.findWithPermissions({
'parentId': collection['_id'],
'parentCollection': 'collection'
}, fields=fields, user=user, level=level)
return folders.count()
def updateSize(self, doc):
"""
Recursively recomputes the size of this collection and its underlying
folders and fixes the sizes as needed.
:param doc: The collection.
:type doc: dict
"""
from .folder import Folder
size = 0
fixes = 0
folderModel = Folder()
folders = folderModel.find({
'parentId': doc['_id'],
'parentCollection': 'collection'
})
for folder in folders:
# fix folder size if needed
_, f = folderModel.updateSize(folder)
fixes += f
# get total recursive folder size
folder = folderModel.load(folder['_id'], force=True)
size += folderModel.getSizeRecursive(folder)
# fix value if incorrect
if size != doc.get('size'):
self.update({'_id': doc['_id']}, update={'$set': {'size': size}})
fixes += 1
return size, fixes
| |
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.expr.types import ArrayExpr, TableExpr, RelationError
from ibis.common import ExpressionError
import ibis.expr.analysis as L
import ibis.expr.api as api
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis
from ibis.compat import unittest
from ibis.expr.tests.mocks import MockConnection, BasicTestCase
import ibis.common as com
import ibis.config as config
from ibis.tests.util import assert_equal
class TestTableExprBasics(BasicTestCase, unittest.TestCase):
def test_empty_schema(self):
table = api.table([], 'foo')
assert len(table.schema()) == 0
def test_columns(self):
t = self.con.table('alltypes')
result = t.columns
expected = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
assert result == expected
def test_view_new_relation(self):
# For assisting with self-joins and other self-referential operations
# where we need to be able to treat instances of the same TableExpr as
# semantically distinct
#
# This thing is not exactly a projection, since it has no semantic
# meaning when it comes to execution
tview = self.table.view()
roots = tview._root_tables()
assert len(roots) == 1
assert roots[0] is tview.op()
def test_get_type(self):
for k, v in self.schema_dict.iteritems():
assert self.table._get_type(k) == v
def test_getitem_column_select(self):
for k, v in self.schema_dict.iteritems():
col = self.table[k]
# Make sure it's the right type
assert isinstance(col, ArrayExpr)
assert isinstance(col, ir.array_type(v))
# Ensure we have a field selection with back-reference to the table
parent = col.parent()
assert isinstance(parent, ops.TableColumn)
assert parent.parent() is self.table
def test_getitem_attribute(self):
result = self.table.a
assert_equal(result, self.table['a'])
assert 'a' in dir(self.table)
# Project and add a name that conflicts with a TableExpr built-in
# attribute
view = self.table[[self.table, self.table['a'].name('schema')]]
assert not isinstance(view.schema, ArrayExpr)
def test_projection(self):
cols = ['f', 'a', 'h']
proj = self.table[cols]
assert isinstance(proj, TableExpr)
assert isinstance(proj.op(), ops.Projection)
assert proj.schema().names == cols
for c in cols:
expr = proj[c]
assert type(expr) == type(self.table[c])
def test_projection_no_list(self):
expr = (self.table.f * 2).name('bar')
result = self.table.select(expr)
expected = self.table.projection([expr])
assert_equal(result, expected)
def test_projection_with_exprs(self):
# unnamed expr to test
mean_diff = (self.table['a'] - self.table['c']).mean()
col_exprs = [self.table['b'].log().name('log_b'),
mean_diff.name('mean_diff')]
proj = self.table[col_exprs + ['g']]
schema = proj.schema()
assert schema.names == ['log_b', 'mean_diff', 'g']
assert schema.types == ['double', 'double', 'string']
# Test with unnamed expr
self.assertRaises(ExpressionError, self.table.projection,
['g', self.table['a'] - self.table['c']])
def test_projection_duplicate_names(self):
self.assertRaises(com.IntegrityError, self.table.projection,
[self.table.c, self.table.c])
def test_projection_invalid_root(self):
schema1 = {
'foo': 'double',
'bar': 'int32'
}
left = api.table(schema1, name='foo')
right = api.table(schema1, name='bar')
exprs = [right['foo'], right['bar']]
self.assertRaises(RelationError, left.projection, exprs)
def test_projection_unnamed_literal_interactive_blowup(self):
# #147 and #153 alike
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
try:
table.select([table.bigint_col, ibis.literal(5)])
except Exception as e:
assert 'named' in e.message
def test_projection_of_aggregated(self):
# Fully-formed aggregations "block"; in a projection, column
# expressions referencing table expressions below the aggregation are
# invalid.
pass
def test_projection_with_star_expr(self):
new_expr = (self.table['a'] * 5).name('bigger_a')
t = self.table
# it lives!
proj = t[t, new_expr]
repr(proj)
ex_names = self.table.schema().names + ['bigger_a']
assert proj.schema().names == ex_names
# cannot pass an invalid table expression
t2 = t.aggregate([t['a'].sum().name('sum(a)')], by=['g'])
self.assertRaises(RelationError, t.__getitem__, [t2])
# TODO: there may be some ways this can be invalid
def test_projection_convenient_syntax(self):
proj = self.table[self.table, self.table['a'].name('foo')]
proj2 = self.table[[self.table, self.table['a'].name('foo')]]
assert_equal(proj, proj2)
def test_add_column(self):
# Creates a projection with a select-all on top of a non-projection
# TableExpr
new_expr = (self.table['a'] * 5).name('bigger_a')
t = self.table
result = t.add_column(new_expr)
expected = t[[t, new_expr]]
assert_equal(result, expected)
result = t.add_column(new_expr, 'wat')
expected = t[[t, new_expr.name('wat')]]
assert_equal(result, expected)
def test_add_column_scalar_expr(self):
# Check literals, at least
pass
def test_add_column_aggregate_crossjoin(self):
# A new column that depends on a scalar value produced by this or some
# other table.
#
# For example:
# SELECT *, b - VAL
# FROM table1
#
# Here, VAL could be something produced by aggregating table1 or any
# other table for that matter.
pass
def test_add_column_existing_projection(self):
# The "blocking" predecessor table is a projection; we can simply add
# the column to the existing projection
foo = (self.table.f * 2).name('foo')
bar = (self.table.f * 4).name('bar')
t2 = self.table.add_column(foo)
t3 = t2.add_column(bar)
expected = self.table[self.table, foo, bar]
assert_equal(t3, expected)
def test_mutate(self):
one = self.table.f * 2
foo = (self.table.a + self.table.b).name('foo')
expr = self.table.mutate(foo, one=one, two=2)
expected = self.table[self.table, foo, one.name('one'),
ibis.literal(2).name('two')]
assert_equal(expr, expected)
def test_mutate_alter_existing_columns(self):
new_f = self.table.f * 2
foo = self.table.d * 2
expr = self.table.mutate(f=new_f, foo=foo)
expected = self.table['a', 'b', 'c', 'd', 'e',
new_f.name('f'), 'g', 'h',
foo.name('foo')]
assert_equal(expr, expected)
def test_replace_column(self):
tb = api.table([
('a', 'int32'),
('b', 'double'),
('c', 'string')
])
expr = tb.b.cast('int32')
tb2 = tb.set_column('b', expr)
expected = tb[tb.a, expr.name('b'), tb.c]
assert_equal(tb2, expected)
def test_filter_no_list(self):
pred = self.table.a > 5
result = self.table.filter(pred)
expected = self.table[pred]
assert_equal(result, expected)
def test_add_predicate(self):
pred = self.table['a'] > 5
result = self.table[pred]
assert isinstance(result.op(), ops.Filter)
def test_filter_root_table_preserved(self):
result = self.table[self.table['a'] > 5]
roots = result.op().root_tables()
assert roots[0] is self.table.op()
def test_invalid_predicate(self):
# a lookalike
table2 = api.table(self.schema, name='bar')
self.assertRaises(RelationError, self.table.__getitem__,
table2['a'] > 5)
def test_add_predicate_coalesce(self):
# Successive predicates get combined into one rather than nesting. This
# is mainly to enhance readability since we could handle this during
# expression evaluation anyway.
pred1 = self.table['a'] > 5
pred2 = self.table['b'] > 0
result = self.table[pred1][pred2]
expected = self.table.filter([pred1, pred2])
assert_equal(result, expected)
# 59, if we are not careful, we can obtain broken refs
interm = self.table[pred1]
result = interm.filter([interm['b'] > 0])
assert_equal(result, expected)
def test_rewrite_expr_with_parent(self):
table = self.con.table('test1')
table2 = table[table['f'] > 0]
expr = table2['c'] == 2
result = L.substitute_parents(expr)
expected = table['c'] == 2
assert_equal(result, expected)
# Substitution not fully possible if we depend on a new expr in a
# projection
table4 = table[['c', (table['c'] * 2).name('foo')]]
expr = table4['c'] == table4['foo']
result = L.substitute_parents(expr)
expected = table['c'] == table4['foo']
assert_equal(result, expected)
def test_rewrite_distinct_but_equal_objects(self):
t = self.con.table('test1')
t_copy = self.con.table('test1')
table2 = t[t_copy['f'] > 0]
expr = table2['c'] == 2
result = L.substitute_parents(expr)
expected = t['c'] == 2
assert_equal(result, expected)
def test_repr_same_but_distinct_objects(self):
t = self.con.table('test1')
t_copy = self.con.table('test1')
table2 = t[t_copy['f'] > 0]
result = repr(table2)
assert result.count('DatabaseTable') == 1
def test_filter_fusion_distinct_table_objects(self):
t = self.con.table('test1')
tt = self.con.table('test1')
expr = t[t.f > 0][t.c > 0]
expr2 = t[t.f > 0][tt.c > 0]
expr3 = t[tt.f > 0][tt.c > 0]
expr4 = t[tt.f > 0][t.c > 0]
assert_equal(expr, expr2)
assert repr(expr) == repr(expr2)
assert_equal(expr, expr3)
assert_equal(expr, expr4)
def test_rewrite_substitute_distinct_tables(self):
t = self.con.table('test1')
tt = self.con.table('test1')
expr = t[t.c > 0]
expr2 = tt[tt.c > 0]
metric = t.f.sum().name('metric')
expr3 = expr.aggregate(metric)
result = L.sub_for(expr3, [(expr2, t)])
expected = t.aggregate(metric)
assert_equal(result, expected)
def test_rewrite_join_projection_without_other_ops(self):
# Drop out filters and other commutative table operations. Join
# predicates are "lifted" to reference the base, unmodified join roots
# Star schema with fact table
table = self.con.table('star1')
table2 = self.con.table('star2')
table3 = self.con.table('star3')
filtered = table[table['f'] > 0]
pred1 = table['foo_id'] == table2['foo_id']
pred2 = filtered['bar_id'] == table3['bar_id']
j1 = filtered.left_join(table2, [pred1])
j2 = j1.inner_join(table3, [pred2])
# Project out the desired fields
view = j2[[filtered, table2['value1'], table3['value2']]]
# Construct the thing we expect to obtain
ex_pred2 = table['bar_id'] == table3['bar_id']
ex_expr = (table.left_join(table2, [pred1])
.inner_join(table3, [ex_pred2]))
rewritten_proj = L.substitute_parents(view)
op = rewritten_proj.op()
assert_equal(op.table, ex_expr)
# Ensure that filtered table has been substituted with the base table
assert op.selections[0] is table
def test_rewrite_past_projection(self):
table = self.con.table('test1')
# Rewrite past a projection
table3 = table[['c', 'f']]
expr = table3['c'] == 2
result = L.substitute_parents(expr)
expected = table['c'] == 2
assert_equal(result, expected)
# Unsafe to rewrite past projection
table5 = table[(table.f * 2).name('c'), table.f]
expr = table5['c'] == 2
result = L.substitute_parents(expr)
assert result is expr
def test_projection_predicate_pushdown(self):
# Probably test this during the evaluation phase. In SQL, "fusable"
# table operations will be combined together into a single select
# statement
#
# see ibis #71 for more on this
t = self.table
proj = t['a', 'b', 'c']
# Rewrite a little more aggressively here
result = proj[t.a > 0]
# at one point these yielded different results
filtered = t[t.a > 0]
expected = filtered[t.a, t.b, t.c]
expected2 = filtered.projection(['a', 'b', 'c'])
assert_equal(result, expected)
assert_equal(result, expected2)
def test_projection_with_join_pushdown_rewrite_refs(self):
# Observed this expression IR issue in a TopK-rewrite context
table1 = api.table([
('a_key1', 'string'),
('a_key2', 'string'),
('a_value', 'double')
], 'foo')
table2 = api.table([
('b_key1', 'string'),
('b_name', 'string'),
('b_value', 'double')
], 'bar')
table3 = api.table([
('c_key2', 'string'),
('c_name', 'string')
], 'baz')
proj = (table1.inner_join(table2, [('a_key1', 'b_key1')])
.inner_join(table3, [(table1.a_key2, table3.c_key2)])
[table1, table2.b_name.name('b'), table3.c_name.name('c'),
table2.b_value])
cases = [
(proj.a_value > 0, table1.a_value > 0),
(proj.b_value > 0, table2.b_value > 0)
]
for higher_pred, lower_pred in cases:
result = proj.filter([higher_pred])
op = result.op()
assert isinstance(op, ops.Projection)
filter_op = op.table.op()
assert isinstance(filter_op, ops.Filter)
new_pred = filter_op.predicates[0]
assert_equal(new_pred, lower_pred)
def test_limit(self):
limited = self.table.limit(10, offset=5)
assert limited.op().n == 10
assert limited.op().offset == 5
def test_sort_by(self):
# Commit to some API for ascending and descending
#
# table.sort_by(['g', expr1, desc(expr2), desc(expr3)])
#
# Default is ascending for anything coercable to an expression,
# and we'll have ascending/descending wrappers to help.
result = self.table.sort_by(['f'])
sort_key = result.op().keys[0]
assert_equal(sort_key.expr, self.table.f)
assert sort_key.ascending
# non-list input. per #150
result2 = self.table.sort_by('f')
assert_equal(result, result2)
result2 = self.table.sort_by([('f', False)])
result3 = self.table.sort_by([('f', 'descending')])
result4 = self.table.sort_by([('f', 0)])
key2 = result2.op().keys[0]
key3 = result3.op().keys[0]
key4 = result4.op().keys[0]
assert not key2.ascending
assert not key3.ascending
assert not key4.ascending
assert_equal(result2, result3)
def test_sort_by_desc_deferred_sort_key(self):
result = (self.table.group_by('g')
.size()
.sort_by(ibis.desc('count')))
tmp = self.table.group_by('g').size()
expected = tmp.sort_by((tmp['count'], False))
expected2 = tmp.sort_by(ibis.desc(tmp['count']))
assert_equal(result, expected)
assert_equal(result, expected2)
def test_slice_convenience(self):
expr = self.table[:5]
expr2 = self.table[:5:1]
assert_equal(expr, self.table.limit(5))
assert_equal(expr, expr2)
expr = self.table[2:7]
expr2 = self.table[2:7:1]
assert_equal(expr, self.table.limit(5, offset=2))
assert_equal(expr, expr2)
self.assertRaises(ValueError, self.table.__getitem__, slice(2, 15, 2))
self.assertRaises(ValueError, self.table.__getitem__, slice(5, None))
self.assertRaises(ValueError, self.table.__getitem__, slice(None, -5))
self.assertRaises(ValueError, self.table.__getitem__, slice(-10, -5))
class TestAggregation(BasicTestCase, unittest.TestCase):
def test_count(self):
result = self.table['a'].count()
assert isinstance(result, api.Int64Scalar)
assert isinstance(result.op(), ops.Count)
def test_table_count(self):
result = self.table.count()
assert isinstance(result, api.Int64Scalar)
assert isinstance(result.op(), ops.Count)
assert result.get_name() == 'count'
def test_sum_expr_basics(self):
# Impala gives bigint for all integer types
ex_class = api.Int64Scalar
for c in self.int_cols + self.bool_cols:
result = self.table[c].sum()
assert isinstance(result, ex_class)
assert isinstance(result.op(), ops.Sum)
# Impala gives double for all floating point types
ex_class = api.DoubleScalar
for c in self.float_cols:
result = self.table[c].sum()
assert isinstance(result, ex_class)
assert isinstance(result.op(), ops.Sum)
def test_mean_expr_basics(self):
cols = self.int_cols + self.float_cols + self.bool_cols
for c in cols:
result = self.table[c].mean()
assert isinstance(result, api.DoubleScalar)
assert isinstance(result.op(), ops.Mean)
def test_aggregate_no_keys(self):
agg_exprs = [self.table['a'].sum().name('sum(a)'),
self.table['c'].mean().name('mean(c)')]
# A TableExpr, which in SQL at least will yield a table with a single
# row
result = self.table.aggregate(agg_exprs)
assert isinstance(result, TableExpr)
def test_aggregate_keys_basic(self):
agg_exprs = [self.table['a'].sum().name('sum(a)'),
self.table['c'].mean().name('mean(c)')]
# A TableExpr, which in SQL at least will yield a table with a single
# row
result = self.table.aggregate(agg_exprs, by=['g'])
assert isinstance(result, TableExpr)
# it works!
repr(result)
def test_aggregate_non_list_inputs(self):
# per #150
metric = self.table.f.sum().name('total')
by = 'g'
having = self.table.c.sum() > 10
result = self.table.aggregate(metric, by=by, having=having)
expected = self.table.aggregate([metric], by=[by], having=[having])
assert_equal(result, expected)
def test_summary_expand_list(self):
summ = self.table.f.summary()
metric = self.table.g.group_concat().name('bar')
result = self.table.aggregate([metric, summ])
expected = self.table.aggregate([metric] + summ.exprs())
assert_equal(result, expected)
def test_aggregate_invalid(self):
# Pass a non-aggregation or non-scalar expr
pass
def test_filter_aggregate_pushdown_predicate(self):
# In the case where we want to add a predicate to an aggregate
# expression after the fact, rather than having to backpedal and add it
# before calling aggregate.
#
# TODO (design decision): This could happen automatically when adding a
# predicate originating from the same root table; if an expression is
# created from field references from the aggregated table then it
# becomes a filter predicate applied on top of a view
pred = self.table.f > 0
metrics = [self.table.a.sum().name('total')]
agged = self.table.aggregate(metrics, by=['g'])
filtered = agged.filter([pred])
expected = self.table[pred].aggregate(metrics, by=['g'])
assert_equal(filtered, expected)
def test_filter_aggregate_partial_pushdown(self):
pass
def test_aggregate_post_predicate(self):
# Test invalid having clause
metrics = [self.table.f.sum().name('total')]
by = ['g']
invalid_having_cases = [
self.table.f.sum(),
self.table.f > 2
]
for case in invalid_having_cases:
self.assertRaises(com.ExpressionError, self.table.aggregate,
metrics, by=by, having=[case])
def test_group_by_having_api(self):
# #154, add a HAVING post-predicate in a composable way
metric = self.table.f.sum().name('foo')
postp = self.table.d.mean() > 1
expr = (self.table
.group_by('g')
.having(postp)
.aggregate(metric))
expected = self.table.aggregate(metric, by='g', having=postp)
assert_equal(expr, expected)
def test_aggregate_root_table_internal(self):
pass
def test_compound_aggregate_expr(self):
# See ibis #24
compound_expr = (self.table['a'].sum() /
self.table['a'].mean()).name('foo')
assert ops.is_reduction(compound_expr)
# Validates internally
self.table.aggregate([compound_expr])
def test_groupby_convenience(self):
metrics = [self.table.f.sum().name('total')]
expr = self.table.group_by('g').aggregate(metrics)
expected = self.table.aggregate(metrics, by=['g'])
assert_equal(expr, expected)
group_expr = self.table.g.cast('double').name('g')
expr = self.table.group_by(group_expr).aggregate(metrics)
expected = self.table.aggregate(metrics, by=[group_expr])
assert_equal(expr, expected)
def test_group_by_count_size(self):
# #148, convenience for interactive use, and so forth
result1 = self.table.group_by('g').size()
result2 = self.table.group_by('g').count()
expected = (self.table.group_by('g')
.aggregate([self.table.count().name('count')]))
assert_equal(result1, expected)
assert_equal(result2, expected)
result = self.table.group_by('g').count('foo')
expected = (self.table.group_by('g')
.aggregate([self.table.count().name('foo')]))
assert_equal(result, expected)
def test_group_by_column_select_api(self):
grouped = self.table.group_by('g')
result = grouped.f.sum()
expected = grouped.aggregate(self.table.f.sum().name('sum(f)'))
assert_equal(result, expected)
supported_functions = ['sum', 'mean', 'count', 'size', 'max', 'min']
# make sure they all work
for fn in supported_functions:
getattr(grouped.f, fn)()
def test_value_counts_convenience(self):
# #152
result = self.table.g.value_counts()
expected = (self.table.group_by('g')
.aggregate(self.table.count().name('count')))
assert_equal(result, expected)
def test_isin_value_counts(self):
# #157, this code path was untested before
bool_clause = self.table.g.notin(['1', '4', '7'])
# it works!
bool_clause.name('notin').value_counts()
def test_value_counts_unnamed_expr(self):
nation = self.con.table('tpch_nation')
expr = nation.n_name.lower().value_counts()
expected = nation.n_name.lower().name('unnamed').value_counts()
assert_equal(expr, expected)
def test_aggregate_unnamed_expr(self):
nation = self.con.table('tpch_nation')
expr = nation.n_name.lower().left(1)
self.assertRaises(com.ExpressionError, nation.group_by(expr).aggregate,
nation.count().name('metric'))
def test_default_reduction_names(self):
d = self.table.f
cases = [
(d.count(), 'count'),
(d.sum(), 'sum'),
(d.mean(), 'mean'),
(d.approx_nunique(), 'approx_nunique'),
(d.approx_median(), 'approx_median'),
(d.min(), 'min'),
(d.max(), 'max')
]
for expr, ex_name in cases:
assert expr.get_name() == ex_name
class TestJoinsUnions(BasicTestCase, unittest.TestCase):
def test_join_no_predicate_list(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
pred = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, pred)
expected = region.inner_join(nation, [pred])
assert_equal(joined, expected)
def test_equijoin_schema_merge(self):
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'int32')])
pred = table1['key1'] == table2['key2']
join_types = ['inner_join', 'left_join', 'outer_join']
ex_schema = api.Schema(['key1', 'value1', 'key2', 'stuff'],
['string', 'double', 'string', 'int32'])
for fname in join_types:
f = getattr(table1, fname)
joined = f(table2, [pred]).materialize()
assert_equal(joined.schema(), ex_schema)
def test_join_combo_with_projection(self):
# Test a case where there is column name overlap, but the projection
# passed makes it a non-issue. Highly relevant with self-joins
#
# For example, where left/right have some field names in common:
# SELECT left.*, right.a, right.b
# FROM left join right on left.key = right.key
t = self.table
t2 = t.add_column(t['f'] * 2, 'foo')
t2 = t2.add_column(t['f'] * 4, 'bar')
# this works
joined = t.left_join(t2, [t['g'] == t2['g']])
proj = joined.projection([t, t2['foo'], t2['bar']])
repr(proj)
def test_self_join(self):
# Self-joins are problematic with this design because column
# expressions may reference either the left or right self. For example:
#
# SELECT left.key, sum(left.value - right.value) as total_deltas
# FROM table left
# INNER JOIN table right
# ON left.current_period = right.previous_period + 1
# GROUP BY 1
#
# One way around the self-join issue is to force the user to add
# prefixes to the joined fields, then project using those. Not that
# satisfying, though.
left = self.table
right = self.table.view()
metric = (left['a'] - right['b']).mean().name('metric')
joined = left.inner_join(right, [right['g'] == left['g']])
# basic check there's no referential problems
result_repr = repr(joined)
assert 'ref_0' in result_repr
assert 'ref_1' in result_repr
# Cannot be immediately materialized because of the schema overlap
self.assertRaises(RelationError, joined.materialize)
# Project out left table schema
proj = joined[[left]]
assert_equal(proj.schema(), left.schema())
# Try aggregating on top of joined
aggregated = joined.aggregate([metric], by=[left['g']])
ex_schema = api.Schema(['g', 'metric'], ['string', 'double'])
assert_equal(aggregated.schema(), ex_schema)
def test_self_join_no_view_convenience(self):
# #165, self joins ought to be possible when the user specifies the
# column names to join on rather than referentially-valid expressions
result = self.table.join(self.table, [('g', 'g')])
t2 = self.table.view()
expected = self.table.join(t2, self.table.g == t2.g)
assert_equal(result, expected)
def test_materialized_join_reference_bug(self):
# GH#403
orders = self.con.table('tpch_orders')
customer = self.con.table('tpch_customer')
lineitem = self.con.table('tpch_lineitem')
items = (orders
.join(lineitem, orders.o_orderkey == lineitem.l_orderkey)
[lineitem, orders.o_custkey, orders.o_orderpriority]
.join(customer, [('o_custkey', 'c_custkey')])
.materialize())
items['o_orderpriority'].value_counts()
def test_join_project_after(self):
# e.g.
#
# SELECT L.foo, L.bar, R.baz, R.qux
# FROM table1 L
# INNER JOIN table2 R
# ON L.key = R.key
#
# or
#
# SELECT L.*, R.baz
# ...
#
# The default for a join is selecting all fields if possible
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'int32')])
pred = table1['key1'] == table2['key2']
joined = table1.left_join(table2, [pred])
projected = joined.projection([table1, table2['stuff']])
assert projected.schema().names == ['key1', 'value1', 'stuff']
projected = joined.projection([table2, table1['key1']])
assert projected.schema().names == ['key2', 'stuff', 'key1']
def test_semi_join_schema(self):
# A left semi join discards the schema of the right table
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'double')])
pred = table1['key1'] == table2['key2']
semi_joined = table1.semi_join(table2, [pred]).materialize()
result_schema = semi_joined.schema()
assert_equal(result_schema, table1.schema())
def test_cross_join(self):
agg_exprs = [self.table['a'].sum().name('sum_a'),
self.table['b'].mean().name('mean_b')]
scalar_aggs = self.table.aggregate(agg_exprs)
joined = self.table.cross_join(scalar_aggs).materialize()
agg_schema = api.Schema(['sum_a', 'mean_b'], ['int64', 'double'])
ex_schema = self.table.schema().append(agg_schema)
assert_equal(joined.schema(), ex_schema)
def test_join_compound_boolean_predicate(self):
# The user might have composed predicates through logical operations
pass
def test_multiple_join_deeper_reference(self):
# Join predicates down the chain might reference one or more root
# tables in the hierarchy.
table1 = ibis.table({'key1': 'string', 'key2': 'string',
'value1': 'double'})
table2 = ibis.table({'key3': 'string', 'value2': 'double'})
table3 = ibis.table({'key4': 'string', 'value3': 'double'})
joined = table1.inner_join(table2, [table1['key1'] == table2['key3']])
joined2 = joined.inner_join(table3, [table1['key2'] == table3['key4']])
# it works, what more should we test here?
materialized = joined2.materialize()
repr(materialized)
def test_filter_join_unmaterialized(self):
table1 = ibis.table({'key1': 'string', 'key2': 'string',
'value1': 'double'})
table2 = ibis.table({'key3': 'string', 'value2': 'double'})
# It works!
joined = table1.inner_join(table2, [table1['key1'] == table2['key3']])
filtered = joined.filter([table1.value1 > 0])
repr(filtered)
def test_filter_on_projected_field(self):
# See #173. Impala and other SQL engines do not allow filtering on a
# just-created alias in a projection
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate
.cast('timestamp').name('odate')]
all_join = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey))
tpch = all_join[fields_of_interest]
# Correlated subquery, yikes!
t2 = tpch.view()
conditional_avg = t2[(t2.region == tpch.region)].amount.mean()
# `amount` is part of the projection above as an aliased field
amount_filter = tpch.amount > conditional_avg
result = tpch.filter([amount_filter])
# Now then! Predicate pushdown here is inappropriate, so we check that
# it didn't occur.
# If filter were pushed below projection, the top-level operator type
# would be Projection instead.
assert type(result.op()) == ops.Filter
def test_join_can_rewrite_errant_predicate(self):
# Join predicate references a derived table, but we can salvage and
# rewrite it to get the join semantics out
# see ibis #74
table = ibis.table([
('c', 'int32'),
('f', 'double'),
('g', 'string')
], 'foo_table')
table2 = ibis.table([
('key', 'string'),
('value', 'double')
], 'bar_table')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
result = table.inner_join(table2, [table3['g'] == table2['key']])
expected = table.inner_join(table2, [table['g'] == table2['key']])
assert_equal(result, expected)
def test_non_equijoins(self):
# Move non-equijoin predicates to WHERE during SQL translation if
# possible, per #107
pass
def test_join_overlapping_column_names(self):
pass
def test_join_key_alternatives(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
# Join with tuples
joined = t1.inner_join(t2, [('foo_id', 'foo_id')])
joined2 = t1.inner_join(t2, [(t1.foo_id, t2.foo_id)])
# Join with single expr
joined3 = t1.inner_join(t2, t1.foo_id == t2.foo_id)
expected = t1.inner_join(t2, [t1.foo_id == t2.foo_id])
assert_equal(joined, expected)
assert_equal(joined2, expected)
assert_equal(joined3, expected)
self.assertRaises(com.ExpressionError, t1.inner_join, t2,
[('foo_id', 'foo_id', 'foo_id')])
def test_join_invalid_refs(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
predicate = t1.bar_id == t3.bar_id
self.assertRaises(com.RelationError, t1.inner_join, t2, [predicate])
def test_join_non_boolean_expr(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
# oops
predicate = t1.f * t2.value1
self.assertRaises(com.ExpressionError, t1.inner_join, t2, [predicate])
def test_unravel_compound_equijoin(self):
t1 = ibis.table([
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value1', 'double')
], 'foo_table')
t2 = ibis.table([
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value2', 'double')
], 'bar_table')
p1 = t1.key1 == t2.key1
p2 = t1.key2 == t2.key2
p3 = t1.key3 == t2.key3
joined = t1.inner_join(t2, [p1 & p2 & p3])
expected = t1.inner_join(t2, [p1, p2, p3])
assert_equal(joined, expected)
def test_join_add_prefixes(self):
pass
def test_join_nontrivial_exprs(self):
pass
def test_union(self):
schema1 = [
('key', 'string'),
('value', 'double')
]
schema2 = [
('key', 'string'),
('key2', 'string'),
('value', 'double')
]
t1 = ibis.table(schema1, 'foo')
t2 = ibis.table(schema1, 'bar')
t3 = ibis.table(schema2, 'baz')
result = t1.union(t2)
assert isinstance(result.op(), ops.Union)
assert not result.op().distinct
result = t1.union(t2, distinct=True)
assert isinstance(result.op(), ops.Union)
assert result.op().distinct
self.assertRaises(ir.RelationError, t1.union, t3)
def test_column_ref_on_projection_rename(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
joined = (region.inner_join(
nation, [region.r_regionkey == nation.n_regionkey])
.inner_join(
customer, [customer.c_nationkey == nation.n_nationkey]))
proj_exprs = [customer, nation.n_name.name('nation'),
region.r_name.name('region')]
joined = joined.projection(proj_exprs)
metrics = [joined.c_acctbal.sum().name('metric')]
# it works!
joined.aggregate(metrics, by=['region'])
class TestSemiAntiJoinPredicates(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
self.t1 = ibis.table([
('key1', 'string'),
('key2', 'string'),
('value1', 'double')
], 'foo')
self.t2 = ibis.table([
('key1', 'string'),
('key2', 'string')
], 'bar')
def test_simple_existence_predicate(self):
cond = (self.t1.key1 == self.t2.key1).any()
assert isinstance(cond, ir.BooleanArray)
op = cond.op()
assert isinstance(op, ops.Any)
# it works!
expr = self.t1[cond]
assert isinstance(expr.op(), ops.Filter)
def test_cannot_use_existence_expression_in_join(self):
# Join predicates must consist only of comparisons
pass
def test_not_exists_predicate(self):
cond = -(self.t1.key1 == self.t2.key1).any()
assert isinstance(cond.op(), ops.NotAny)
| |
import os
import re
import time
import yaml
from . import command
class CloudFoundryObjectDoesNotExistError(Exception):
pass
class CloudFoundry(object):
def __init__(self, context):
self._context = context
def login(self, api_url, username, password, org, space):
"""
Login to Cloud Foundry
:type api_url: str
:type username: str
:type password: str
:type org: str
:type space: str
"""
self._context.log.info('logging into Cloud Foundry')
cmd_s = 'cf login -a {} -u {} -p {} -o {} -s {}'.format(api_url, username, password, org, space)
command.Command(self._context, cmd_s).run()
def get_api_endpoint(self):
self._context.log.info('getting Cloud Foundry target')
cmd_s = 'cf target'
cmd = command.Command(self._context, cmd_s)
cmd.run()
m = re.search(r'^api endpoint:\s*(.*)', cmd.stdout, re.MULTILINE)
if not m:
raise Exception("couldn't guess domain; cf target did not return api endpoint")
return m.group(1)
def create_space(self, name):
"""
:type name: str
"""
self._context.log.info('creating Cloud Foundry space "{}"'.format(name))
cmd_s = 'cf create-space {}'.format(name)
command.Command(self._context, cmd_s).run()
def target_space(self, name):
"""
:type name: str
"""
self._context.log.info('targeting Cloud Foundry space "{}"'.format(name))
cmd_s = 'cf target -s {}'.format(name)
command.Command(self._context, cmd_s).run()
def create_service(self, service, plan, service_instance, args=None):
"""
:type service: str
:type plan: str
:type service_instance: str
:type args: list
"""
if args is None:
args = []
self._context.log.info('creating Cloud Foundry service "{}:{}" instance "{}"'.format(service, plan, service_instance))
cmd_s = 'cf create-service {} {} {}'.format(service, plan, service_instance)
if args:
cmd_s += ' ' + ' '.join(args)
cmd = command.Command(self._context, cmd_s)
cmd.run()
if cmd.rc != 0:
raise Exception('create service instance failed: {}'.format(service_instance))
self._context.log.info('waiting for service instance "{}" to become available'.format(service_instance))
attempts = 0
while True:
attempts += 1
if self._context.options.cf.max_attempts >= 0:
if attempts > self._context.options.cf.max_attempts:
assert False, "maximum attempts exceeded ({})".format(self._context.options.cf.max_attempts)
self._context.log.info("attempt {}/{}".format(attempts, self._context.options.cf.max_attempts))
else:
self._context.log.info("attempt {}".format(attempts))
status = self.get_service_status(service_instance)
if (status == 'create succeeded') or (status == 'update succeeded'):
break
if status is None:
self._context.log.info('service instance "{}" status not yet available'.format(service_instance))
else:
self._context.log.info('service instance "{}" status: "{}"'.format(service_instance, status))
time.sleep(1)
def create_user_provided_service(self, service_instance, credentials=None):
"""
:type service_instance: str
:type credentials: str
"""
cmd_s = 'cf create-user-provided-service {}'.format(service_instance)
if credentials:
cmd_s += ' -p {}'.format(credentials)
cmd = command.Command(self._context, cmd_s)
cmd.run()
if cmd.rc != 0:
raise Exception('create user provided service instance failed: {}'.format(service_instance))
def delete_service(self, service_instance):
"""
:type service_instance: str
"""
self._context.log.info('deleting Cloud Foundry service instance "{}"'.format(service_instance))
cmd_s = 'cf delete-service -f {}'.format(service_instance)
command.Command(self._context, cmd_s).run()
def service_exists(self, service_instance):
"""
:type service_instance: str
"""
cmd_s = 'cf service {}'.format(service_instance)
cmd = command.Command(self._context, cmd_s)
try:
cmd.run()
return True
except command.CommandException as e:
if 'Service instance {} not found'.format(service_instance) in str(e):
return False
raise e
def get_service_status(self, service_instance):
"""
:type service_instance: str
"""
cmd_s = 'cf service {}'.format(service_instance)
cmd = command.Command(self._context, cmd_s)
try:
cmd.run()
except command.CommandException as e:
if 'Service instance {} not found'.format(service_instance) in str(e):
raise CloudFoundryObjectDoesNotExistError()
raise e
match = re.search(r'^status:\s+(.*)', cmd.stdout, re.MULTILINE)
if match:
return match.group(1)
match = re.search(r'^service:\s+(.*)', cmd.stdout, re.MULTILINE)
return match.group(1)
def push_app(self, manifest):
"""
:type manifest: str
"""
manifest_yaml = yaml.safe_load(open(os.path.join(self._context.project_dir, manifest), 'r'))
app_name = manifest_yaml['applications'][0]['name']
self._context.log.info('pushing Cloud Foundry app "{}" ({})'.format(app_name, manifest))
cmd_s = 'cf push -f {}'.format(manifest)
cmd = command.Command(self._context, cmd_s)
cmd.run()
if cmd.rc != 0:
raise Exception('push app failed: "{}" ({})'.format(app_name, manifest))
attempts = 0
while True:
attempts += 1
if self._context.options.cf.max_attempts >= 0:
if attempts > self._context.options.cf.max_attempts:
assert False, "maximum attempts exceeded ({})".format(self._context.options.cf.max_attempts)
self._context.log.info("attempt {}/{}".format(attempts, self._context.options.cf.max_attempts))
else:
self._context.log.info("attempt {}".format(attempts))
status = self.get_app_status(app_name)
if status == 'running':
break
if status is None:
self._context.log.info('app "{}" status not yet available'.format(app_name))
else:
self._context.log.info('app "{}" status: "{}"'.format(app_name, status))
time.sleep(1)
def delete_app(self, app_name):
"""
:type app_name: str
"""
self._context.log.info('deleting Cloud Foundry app "{}"'.format(app_name))
cmd_s = 'cf delete -f {}'.format(app_name)
command.Command(self._context, cmd_s).run()
def app_exists(self, app_name):
"""
:type app_name: str
"""
cmd_s = 'cf app {}'.format(app_name)
cmd = command.Command(self._context, cmd_s)
try:
cmd.run()
return True
except command.CommandException as e:
if "App '{}' not found".format(app_name) in str(e):
return False
raise e
def get_app_status(self, app_name):
"""
:type app_name: str
"""
cmd_s = 'cf app {}'.format(app_name)
cmd = command.Command(self._context, cmd_s)
try:
cmd.run()
except command.CommandException as e:
if "App '{}' not found".format(app_name) in str(e):
raise CloudFoundryObjectDoesNotExistError()
raise e
match = re.search(r'^#0\s+(\S+)', cmd.stdout, re.MULTILINE)
if not match:
return None
return match.group(1)
| |
"""
Module for parsing Makefile syntax.
Makefiles use a line-based parsing system. Continuations and substitutions are handled differently based on the
type of line being parsed:
Lines with makefile syntax condense continuations to a single space, no matter the actual trailing whitespace
of the first line or the leading whitespace of the continuation. In other situations, trailing whitespace is
relevant.
Lines with command syntax do not condense continuations: the backslash and newline are part of the command.
(GNU Make is buggy in this regard, at least on mac).
Lines with an initial tab are commands if they can be (there is a rule or a command immediately preceding).
Otherwise, they are parsed as makefile syntax.
This file parses into the data structures defined in the parserdata module. Those classes are what actually
do the dirty work of "executing" the parsed data into a data.Makefile.
Four iterator functions are available:
* iterdata
* itermakefilechars
* itercommandchars
The iterators handle line continuations and comments in different ways, but share a common calling
convention:
Called with (data, startoffset, tokenlist, finditer)
yield 4-tuples (flatstr, token, tokenoffset, afteroffset)
flatstr is data, guaranteed to have no tokens (may be '')
token, tokenoffset, afteroffset *may be None*. That means there is more text
coming.
"""
import logging, re, os, sys
import data, functions, util, parserdata
from pymake import errors
_log = logging.getLogger('pymake.parser')
_skipws = re.compile('\S')
class Data(object):
"""
A single virtual "line", which can be multiple source lines joined with
continuations.
"""
__slots__ = ('s', 'lstart', 'lend', 'loc')
def __init__(self, s, lstart, lend, loc):
self.s = s
self.lstart = lstart
self.lend = lend
self.loc = loc
@staticmethod
def fromstring(s, path):
return Data(s, 0, len(s), parserdata.Location(path, 1, 0))
def getloc(self, offset):
assert offset >= self.lstart and offset <= self.lend
return self.loc.offset(self.s, self.lstart, offset)
def skipwhitespace(self, offset):
"""
Return the offset of the first non-whitespace character in data starting at offset, or None if there are
only whitespace characters remaining.
"""
m = _skipws.search(self.s, offset, self.lend)
if m is None:
return self.lend
return m.start(0)
_linere = re.compile(r'\\*\n')
def enumeratelines(s, filename):
"""
Enumerate lines in a string as Data objects, joining line
continuations.
"""
off = 0
lineno = 1
curlines = 0
for m in _linere.finditer(s):
curlines += 1
start, end = m.span(0)
if (start - end) % 2 == 0:
# odd number of backslashes is a continuation
continue
yield Data(s, off, end - 1, parserdata.Location(filename, lineno, 0))
lineno += curlines
curlines = 0
off = end
yield Data(s, off, len(s), parserdata.Location(filename, lineno, 0))
_alltokens = re.compile(r'''\\*\# | # hash mark preceeded by any number of backslashes
:= |
\+= |
\?= |
:: |
(?:\$(?:$|[\(\{](?:%s)\s+|.)) | # dollar sign followed by EOF, a function keyword with whitespace, or any character
:(?![\\/]) | # colon followed by anything except a slash (Windows path detection)
[=#{}();,|'"]''' % '|'.join(functions.functionmap.keys()), re.VERBOSE)
def iterdata(d, offset, tokenlist, it):
"""
Iterate over flat data without line continuations, comments, or any special escaped characters.
Typically used to parse recursively-expanded variables.
"""
assert len(tokenlist), "Empty tokenlist passed to iterdata is meaningless!"
assert offset >= d.lstart and offset <= d.lend, "offset %i should be between %i and %i" % (offset, d.lstart, d.lend)
if offset == d.lend:
return
s = d.s
for m in it:
mstart, mend = m.span(0)
token = s[mstart:mend]
if token in tokenlist or (token[0] == '$' and '$' in tokenlist):
yield s[offset:mstart], token, mstart, mend
else:
yield s[offset:mend], None, None, mend
offset = mend
yield s[offset:d.lend], None, None, None
# multiple backslashes before a newline are unescaped, halving their total number
_makecontinuations = re.compile(r'(?:\s*|((?:\\\\)+))\\\n\s*')
def _replacemakecontinuations(m):
start, end = m.span(1)
if start == -1:
return ' '
return ' '.rjust((end - start) // 2 + 1, '\\')
def itermakefilechars(d, offset, tokenlist, it, ignorecomments=False):
"""
Iterate over data in makefile syntax. Comments are found at unescaped # characters, and escaped newlines
are converted to single-space continuations.
"""
assert offset >= d.lstart and offset <= d.lend, "offset %i should be between %i and %i" % (offset, d.lstart, d.lend)
if offset == d.lend:
return
s = d.s
for m in it:
mstart, mend = m.span(0)
token = s[mstart:mend]
starttext = _makecontinuations.sub(_replacemakecontinuations, s[offset:mstart])
if token[-1] == '#' and not ignorecomments:
l = mend - mstart
# multiple backslashes before a hash are unescaped, halving their total number
if l % 2:
# found a comment
yield starttext + token[:(l - 1) // 2], None, None, None
return
else:
yield starttext + token[-l // 2:], None, None, mend
elif token in tokenlist or (token[0] == '$' and '$' in tokenlist):
yield starttext, token, mstart, mend
else:
yield starttext + token, None, None, mend
offset = mend
yield _makecontinuations.sub(_replacemakecontinuations, s[offset:d.lend]), None, None, None
_findcomment = re.compile(r'\\*\#')
def flattenmakesyntax(d, offset):
"""
A shortcut method for flattening line continuations and comments in makefile syntax without
looking for other tokens.
"""
assert offset >= d.lstart and offset <= d.lend, "offset %i should be between %i and %i" % (offset, d.lstart, d.lend)
if offset == d.lend:
return ''
s = _makecontinuations.sub(_replacemakecontinuations, d.s[offset:d.lend])
elements = []
offset = 0
for m in _findcomment.finditer(s):
mstart, mend = m.span(0)
elements.append(s[offset:mstart])
if (mend - mstart) % 2:
# even number of backslashes... it's a comment
elements.append(''.ljust((mend - mstart - 1) // 2, '\\'))
return ''.join(elements)
# odd number of backslashes
elements.append(''.ljust((mend - mstart - 2) // 2, '\\') + '#')
offset = mend
elements.append(s[offset:])
return ''.join(elements)
def itercommandchars(d, offset, tokenlist, it):
"""
Iterate over command syntax. # comment markers are not special, and escaped newlines are included
in the output text.
"""
assert offset >= d.lstart and offset <= d.lend, "offset %i should be between %i and %i" % (offset, d.lstart, d.lend)
if offset == d.lend:
return
s = d.s
for m in it:
mstart, mend = m.span(0)
token = s[mstart:mend]
starttext = s[offset:mstart].replace('\n\t', '\n')
if token in tokenlist or (token[0] == '$' and '$' in tokenlist):
yield starttext, token, mstart, mend
else:
yield starttext + token, None, None, mend
offset = mend
yield s[offset:d.lend].replace('\n\t', '\n'), None, None, None
_redefines = re.compile('\s*define|\s*endef')
def iterdefinelines(it, startloc):
"""
Process the insides of a define. Most characters are included literally. Escaped newlines are treated
as they would be in makefile syntax. Internal define/endef pairs are ignored.
"""
results = []
definecount = 1
for d in it:
m = _redefines.match(d.s, d.lstart, d.lend)
if m is not None:
directive = m.group(0).strip()
if directive == 'endef':
definecount -= 1
if definecount == 0:
return _makecontinuations.sub(_replacemakecontinuations, '\n'.join(results))
else:
definecount += 1
results.append(d.s[d.lstart:d.lend])
# Falling off the end is an unterminated define!
raise errors.SyntaxError("define without matching endef", startloc)
def _ensureend(d, offset, msg):
"""
Ensure that only whitespace remains in this data.
"""
s = flattenmakesyntax(d, offset)
if s != '' and not s.isspace():
raise errors.SyntaxError(msg, d.getloc(offset))
_eqargstokenlist = ('(', "'", '"')
def ifeq(d, offset):
if offset > d.lend - 1:
raise errors.SyntaxError("No arguments after conditional", d.getloc(offset))
# the variety of formats for this directive is rather maddening
token = d.s[offset]
if token not in _eqargstokenlist:
raise errors.SyntaxError("No arguments after conditional", d.getloc(offset))
offset += 1
if token == '(':
arg1, t, offset = parsemakesyntax(d, offset, (',',), itermakefilechars)
if t is None:
raise errors.SyntaxError("Expected two arguments in conditional", d.getloc(d.lend))
arg1.rstrip()
offset = d.skipwhitespace(offset)
arg2, t, offset = parsemakesyntax(d, offset, (')',), itermakefilechars)
if t is None:
raise errors.SyntaxError("Unexpected text in conditional", d.getloc(offset))
_ensureend(d, offset, "Unexpected text after conditional")
else:
arg1, t, offset = parsemakesyntax(d, offset, (token,), itermakefilechars)
if t is None:
raise errors.SyntaxError("Unexpected text in conditional", d.getloc(d.lend))
offset = d.skipwhitespace(offset)
if offset == d.lend:
raise errors.SyntaxError("Expected two arguments in conditional", d.getloc(offset))
token = d.s[offset]
if token not in '\'"':
raise errors.SyntaxError("Unexpected text in conditional", d.getloc(offset))
arg2, t, offset = parsemakesyntax(d, offset + 1, (token,), itermakefilechars)
_ensureend(d, offset, "Unexpected text after conditional")
return parserdata.EqCondition(arg1, arg2)
def ifneq(d, offset):
c = ifeq(d, offset)
c.expected = False
return c
def ifdef(d, offset):
e, t, offset = parsemakesyntax(d, offset, (), itermakefilechars)
e.rstrip()
return parserdata.IfdefCondition(e)
def ifndef(d, offset):
c = ifdef(d, offset)
c.expected = False
return c
_conditionkeywords = {
'ifeq': ifeq,
'ifneq': ifneq,
'ifdef': ifdef,
'ifndef': ifndef
}
_conditiontokens = tuple(_conditionkeywords.keys())
_conditionre = re.compile(r'(%s)(?:$|\s+)' % '|'.join(_conditiontokens))
_directivestokenlist = _conditiontokens + \
('else', 'endif', 'define', 'endef', 'override', 'include', '-include', 'includedeps', '-includedeps', 'vpath', 'export', 'unexport')
_directivesre = re.compile(r'(%s)(?:$|\s+)' % '|'.join(_directivestokenlist))
_varsettokens = (':=', '+=', '?=', '=')
def _parsefile(pathname):
fd = open(pathname, "rU")
stmts = parsestring(fd.read(), pathname)
stmts.mtime = os.fstat(fd.fileno()).st_mtime
fd.close()
return stmts
def _checktime(path, stmts):
mtime = os.path.getmtime(path)
if mtime != stmts.mtime:
_log.debug("Re-parsing makefile '%s': mtimes differ", path)
return False
return True
_parsecache = util.MostUsedCache(50, _parsefile, _checktime)
def parsefile(pathname):
"""
Parse a filename into a parserdata.StatementList. A cache is used to avoid re-parsing
makefiles that have already been parsed and have not changed.
"""
pathname = os.path.realpath(pathname)
return _parsecache.get(pathname)
# colon followed by anything except a slash (Windows path detection)
_depfilesplitter = re.compile(r':(?![\\/])')
# simple variable references
_vars = re.compile('\$\((\w+)\)')
def parsedepfile(pathname):
"""
Parse a filename listing only depencencies into a parserdata.StatementList.
Simple variable references are allowed in such files.
"""
def continuation_iter(lines):
current_line = []
for line in lines:
line = line.rstrip()
if line.endswith("\\"):
current_line.append(line.rstrip("\\"))
continue
if not len(line):
continue
current_line.append(line)
yield ''.join(current_line)
current_line = []
if current_line:
yield ''.join(current_line)
def get_expansion(s):
if '$' in s:
expansion = data.Expansion()
# for an input like e.g. "foo $(bar) baz",
# _vars.split returns ["foo", "bar", "baz"]
# every other element is a variable name.
for i, element in enumerate(_vars.split(s)):
if i % 2:
expansion.appendfunc(functions.VariableRef(None,
data.StringExpansion(element, None)))
elif element:
expansion.appendstr(element)
return expansion
return data.StringExpansion(s, None)
pathname = os.path.realpath(pathname)
stmts = parserdata.StatementList()
for line in continuation_iter(open(pathname).readlines()):
target, deps = _depfilesplitter.split(line, 1)
stmts.append(parserdata.Rule(get_expansion(target),
get_expansion(deps), False))
return stmts
def parsestring(s, filename):
"""
Parse a string containing makefile data into a parserdata.StatementList.
"""
currule = False
condstack = [parserdata.StatementList()]
fdlines = enumeratelines(s, filename)
for d in fdlines:
assert len(condstack) > 0
offset = d.lstart
if currule and offset < d.lend and d.s[offset] == '\t':
e, token, offset = parsemakesyntax(d, offset + 1, (), itercommandchars)
assert token is None
assert offset is None
condstack[-1].append(parserdata.Command(e))
continue
# To parse Makefile syntax, we first strip leading whitespace and
# look for initial keywords. If there are no keywords, it's either
# setting a variable or writing a rule.
offset = d.skipwhitespace(offset)
if offset is None:
continue
m = _directivesre.match(d.s, offset, d.lend)
if m is not None:
kword = m.group(1)
offset = m.end(0)
if kword == 'endif':
_ensureend(d, offset, "Unexpected data after 'endif' directive")
if len(condstack) == 1:
raise errors.SyntaxError("unmatched 'endif' directive",
d.getloc(offset))
condstack.pop().endloc = d.getloc(offset)
continue
if kword == 'else':
if len(condstack) == 1:
raise errors.SyntaxError("unmatched 'else' directive",
d.getloc(offset))
m = _conditionre.match(d.s, offset, d.lend)
if m is None:
_ensureend(d, offset, "Unexpected data after 'else' directive.")
condstack[-1].addcondition(d.getloc(offset), parserdata.ElseCondition())
else:
kword = m.group(1)
if kword not in _conditionkeywords:
raise errors.SyntaxError("Unexpected condition after 'else' directive.",
d.getloc(offset))
startoffset = offset
offset = d.skipwhitespace(m.end(1))
c = _conditionkeywords[kword](d, offset)
condstack[-1].addcondition(d.getloc(startoffset), c)
continue
if kword in _conditionkeywords:
c = _conditionkeywords[kword](d, offset)
cb = parserdata.ConditionBlock(d.getloc(d.lstart), c)
condstack[-1].append(cb)
condstack.append(cb)
continue
if kword == 'endef':
raise errors.SyntaxError("endef without matching define", d.getloc(offset))
if kword == 'define':
currule = False
vname, t, i = parsemakesyntax(d, offset, (), itermakefilechars)
vname.rstrip()
startloc = d.getloc(d.lstart)
value = iterdefinelines(fdlines, startloc)
condstack[-1].append(parserdata.SetVariable(vname, value=value, valueloc=startloc, token='=', targetexp=None))
continue
if kword in ('include', '-include', 'includedeps', '-includedeps'):
if kword.startswith('-'):
required = False
kword = kword[1:]
else:
required = True
deps = kword == 'includedeps'
currule = False
incfile, t, offset = parsemakesyntax(d, offset, (), itermakefilechars)
condstack[-1].append(parserdata.Include(incfile, required, deps))
continue
if kword == 'vpath':
currule = False
e, t, offset = parsemakesyntax(d, offset, (), itermakefilechars)
condstack[-1].append(parserdata.VPathDirective(e))
continue
if kword == 'override':
currule = False
vname, token, offset = parsemakesyntax(d, offset, _varsettokens, itermakefilechars)
vname.lstrip()
vname.rstrip()
if token is None:
raise errors.SyntaxError("Malformed override directive, need =", d.getloc(d.lstart))
value = flattenmakesyntax(d, offset).lstrip()
condstack[-1].append(parserdata.SetVariable(vname, value=value, valueloc=d.getloc(offset), token=token, targetexp=None, source=data.Variables.SOURCE_OVERRIDE))
continue
if kword == 'export':
currule = False
e, token, offset = parsemakesyntax(d, offset, _varsettokens, itermakefilechars)
e.lstrip()
e.rstrip()
if token is None:
condstack[-1].append(parserdata.ExportDirective(e, concurrent_set=False))
else:
condstack[-1].append(parserdata.ExportDirective(e, concurrent_set=True))
value = flattenmakesyntax(d, offset).lstrip()
condstack[-1].append(parserdata.SetVariable(e, value=value, valueloc=d.getloc(offset), token=token, targetexp=None))
continue
if kword == 'unexport':
e, token, offset = parsemakesyntax(d, offset, (), itermakefilechars)
condstack[-1].append(parserdata.UnexportDirective(e))
continue
e, token, offset = parsemakesyntax(d, offset, _varsettokens + ('::', ':'), itermakefilechars)
if token is None:
e.rstrip()
e.lstrip()
if not e.isempty():
condstack[-1].append(parserdata.EmptyDirective(e))
continue
# if we encountered real makefile syntax, the current rule is over
currule = False
if token in _varsettokens:
e.lstrip()
e.rstrip()
value = flattenmakesyntax(d, offset).lstrip()
condstack[-1].append(parserdata.SetVariable(e, value=value, valueloc=d.getloc(offset), token=token, targetexp=None))
else:
doublecolon = token == '::'
# `e` is targets or target patterns, which can end up as
# * a rule
# * an implicit rule
# * a static pattern rule
# * a target-specific variable definition
# * a pattern-specific variable definition
# any of the rules may have order-only prerequisites
# delimited by |, and a command delimited by ;
targets = e
e, token, offset = parsemakesyntax(d, offset,
_varsettokens + (':', '|', ';'),
itermakefilechars)
if token in (None, ';'):
condstack[-1].append(parserdata.Rule(targets, e, doublecolon))
currule = True
if token == ';':
offset = d.skipwhitespace(offset)
e, t, offset = parsemakesyntax(d, offset, (), itercommandchars)
condstack[-1].append(parserdata.Command(e))
elif token in _varsettokens:
e.lstrip()
e.rstrip()
value = flattenmakesyntax(d, offset).lstrip()
condstack[-1].append(parserdata.SetVariable(e, value=value, valueloc=d.getloc(offset), token=token, targetexp=targets))
elif token == '|':
raise errors.SyntaxError('order-only prerequisites not implemented', d.getloc(offset))
else:
assert token == ':'
# static pattern rule
pattern = e
deps, token, offset = parsemakesyntax(d, offset, (';',), itermakefilechars)
condstack[-1].append(parserdata.StaticPatternRule(targets, pattern, deps, doublecolon))
currule = True
if token == ';':
offset = d.skipwhitespace(offset)
e, token, offset = parsemakesyntax(d, offset, (), itercommandchars)
condstack[-1].append(parserdata.Command(e))
if len(condstack) != 1:
raise errors.SyntaxError("Condition never terminated with endif", condstack[-1].loc)
return condstack[0]
_PARSESTATE_TOPLEVEL = 0 # at the top level
_PARSESTATE_FUNCTION = 1 # expanding a function call
_PARSESTATE_VARNAME = 2 # expanding a variable expansion.
_PARSESTATE_SUBSTFROM = 3 # expanding a variable expansion substitution "from" value
_PARSESTATE_SUBSTTO = 4 # expanding a variable expansion substitution "to" value
_PARSESTATE_PARENMATCH = 5 # inside nested parentheses/braces that must be matched
class ParseStackFrame(object):
__slots__ = ('parsestate', 'parent', 'expansion', 'tokenlist', 'openbrace', 'closebrace', 'function', 'loc', 'varname', 'substfrom')
def __init__(self, parsestate, parent, expansion, tokenlist, openbrace, closebrace, function=None, loc=None):
self.parsestate = parsestate
self.parent = parent
self.expansion = expansion
self.tokenlist = tokenlist
self.openbrace = openbrace
self.closebrace = closebrace
self.function = function
self.loc = loc
def __str__(self):
return "<state=%i expansion=%s tokenlist=%s openbrace=%s closebrace=%s>" % (self.parsestate, self.expansion, self.tokenlist, self.openbrace, self.closebrace)
_matchingbrace = {
'(': ')',
'{': '}',
}
def parsemakesyntax(d, offset, stopon, iterfunc):
"""
Given Data, parse it into a data.Expansion.
@param stopon (sequence)
Indicate characters where toplevel parsing should stop.
@param iterfunc (generator function)
A function which is used to iterate over d, yielding (char, offset, loc)
@see iterdata
@see itermakefilechars
@see itercommandchars
@return a tuple (expansion, token, offset). If all the data is consumed,
token and offset will be None
"""
assert callable(iterfunc)
stacktop = ParseStackFrame(_PARSESTATE_TOPLEVEL, None, data.Expansion(loc=d.getloc(d.lstart)),
tokenlist=stopon + ('$',),
openbrace=None, closebrace=None)
tokeniterator = _alltokens.finditer(d.s, offset, d.lend)
di = iterfunc(d, offset, stacktop.tokenlist, tokeniterator)
while True: # this is not a for loop because `di` changes during the function
assert stacktop is not None
try:
s, token, tokenoffset, offset = next(di)
except StopIteration:
break
stacktop.expansion.appendstr(s)
if token is None:
continue
parsestate = stacktop.parsestate
if token[0] == '$':
if tokenoffset + 1 == d.lend:
# an unterminated $ expands to nothing
break
loc = d.getloc(tokenoffset)
c = token[1]
if c == '$':
assert len(token) == 2
stacktop.expansion.appendstr('$')
elif c in ('(', '{'):
closebrace = _matchingbrace[c]
if len(token) > 2:
fname = token[2:].rstrip()
fn = functions.functionmap[fname](loc)
e = data.Expansion()
if len(fn) + 1 == fn.maxargs:
tokenlist = (c, closebrace, '$')
else:
tokenlist = (',', c, closebrace, '$')
stacktop = ParseStackFrame(_PARSESTATE_FUNCTION, stacktop,
e, tokenlist, function=fn,
openbrace=c, closebrace=closebrace)
else:
e = data.Expansion()
tokenlist = (':', c, closebrace, '$')
stacktop = ParseStackFrame(_PARSESTATE_VARNAME, stacktop,
e, tokenlist,
openbrace=c, closebrace=closebrace, loc=loc)
else:
assert len(token) == 2
e = data.Expansion.fromstring(c, loc)
stacktop.expansion.appendfunc(functions.VariableRef(loc, e))
elif token in ('(', '{'):
assert token == stacktop.openbrace
stacktop.expansion.appendstr(token)
stacktop = ParseStackFrame(_PARSESTATE_PARENMATCH, stacktop,
stacktop.expansion,
(token, stacktop.closebrace, '$'),
openbrace=token, closebrace=stacktop.closebrace, loc=d.getloc(tokenoffset))
elif parsestate == _PARSESTATE_PARENMATCH:
assert token == stacktop.closebrace
stacktop.expansion.appendstr(token)
stacktop = stacktop.parent
elif parsestate == _PARSESTATE_TOPLEVEL:
assert stacktop.parent is None
return stacktop.expansion.finish(), token, offset
elif parsestate == _PARSESTATE_FUNCTION:
if token == ',':
stacktop.function.append(stacktop.expansion.finish())
stacktop.expansion = data.Expansion()
if len(stacktop.function) + 1 == stacktop.function.maxargs:
tokenlist = (stacktop.openbrace, stacktop.closebrace, '$')
stacktop.tokenlist = tokenlist
elif token in (')', '}'):
fn = stacktop.function
fn.append(stacktop.expansion.finish())
fn.setup()
stacktop = stacktop.parent
stacktop.expansion.appendfunc(fn)
else:
assert False, "Not reached, _PARSESTATE_FUNCTION"
elif parsestate == _PARSESTATE_VARNAME:
if token == ':':
stacktop.varname = stacktop.expansion
stacktop.parsestate = _PARSESTATE_SUBSTFROM
stacktop.expansion = data.Expansion()
stacktop.tokenlist = ('=', stacktop.openbrace, stacktop.closebrace, '$')
elif token in (')', '}'):
fn = functions.VariableRef(stacktop.loc, stacktop.expansion.finish())
stacktop = stacktop.parent
stacktop.expansion.appendfunc(fn)
else:
assert False, "Not reached, _PARSESTATE_VARNAME"
elif parsestate == _PARSESTATE_SUBSTFROM:
if token == '=':
stacktop.substfrom = stacktop.expansion
stacktop.parsestate = _PARSESTATE_SUBSTTO
stacktop.expansion = data.Expansion()
stacktop.tokenlist = (stacktop.openbrace, stacktop.closebrace, '$')
elif token in (')', '}'):
# A substitution of the form $(VARNAME:.ee) is probably a mistake, but make
# parses it. Issue a warning. Combine the varname and substfrom expansions to
# make the compatible varname. See tests/var-substitutions.mk SIMPLE3SUBSTNAME
_log.warning("%s: Variable reference looks like substitution without =", stacktop.loc)
stacktop.varname.appendstr(':')
stacktop.varname.concat(stacktop.expansion)
fn = functions.VariableRef(stacktop.loc, stacktop.varname.finish())
stacktop = stacktop.parent
stacktop.expansion.appendfunc(fn)
else:
assert False, "Not reached, _PARSESTATE_SUBSTFROM"
elif parsestate == _PARSESTATE_SUBSTTO:
assert token in (')','}'), "Not reached, _PARSESTATE_SUBSTTO"
fn = functions.SubstitutionRef(stacktop.loc, stacktop.varname.finish(),
stacktop.substfrom.finish(), stacktop.expansion.finish())
stacktop = stacktop.parent
stacktop.expansion.appendfunc(fn)
else:
assert False, "Unexpected parse state %s" % stacktop.parsestate
if stacktop.parent is not None and iterfunc == itercommandchars:
di = itermakefilechars(d, offset, stacktop.tokenlist, tokeniterator,
ignorecomments=True)
else:
di = iterfunc(d, offset, stacktop.tokenlist, tokeniterator)
if stacktop.parent is not None:
raise errors.SyntaxError("Unterminated function call", d.getloc(offset))
assert stacktop.parsestate == _PARSESTATE_TOPLEVEL
return stacktop.expansion.finish(), None, None
| |
"""
Flux for Home-Assistant.
The idea was taken from https://github.com/KpaBap/hue-flux/
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.flux/
"""
from datetime import time
import logging
import voluptuous as vol
from homeassistant.components.light import is_on, turn_on
from homeassistant.components.sun import next_setting, next_rising
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util.color import color_temperature_to_rgb as temp_to_rgb
from homeassistant.util.color import color_RGB_to_xy
from homeassistant.util.dt import now as dt_now
from homeassistant.util.dt import as_local
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['sun', 'light']
SUN = "sun.sun"
_LOGGER = logging.getLogger(__name__)
CONF_LIGHTS = 'lights'
CONF_START_TIME = 'start_time'
CONF_STOP_TIME = 'stop_time'
CONF_START_CT = 'start_colortemp'
CONF_SUNSET_CT = 'sunset_colortemp'
CONF_STOP_CT = 'stop_colortemp'
CONF_BRIGHTNESS = 'brightness'
CONF_MODE = 'mode'
MODE_XY = 'xy'
MODE_MIRED = 'mired'
MODE_KELVIN = 'kelvin'
DEFAULT_MODE = MODE_XY
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'flux',
vol.Required(CONF_LIGHTS): cv.entity_ids,
vol.Optional(CONF_NAME, default="Flux"): cv.string,
vol.Optional(CONF_START_TIME): cv.time,
vol.Optional(CONF_STOP_TIME, default=time(22, 0)): cv.time,
vol.Optional(CONF_START_CT, default=4000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_SUNSET_CT, default=3000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_STOP_CT, default=1900):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_BRIGHTNESS):
vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
vol.Optional(CONF_MODE, default=DEFAULT_MODE):
vol.Any(MODE_XY, MODE_MIRED, MODE_KELVIN)
})
def set_lights_xy(hass, lights, x_val, y_val, brightness):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
xy_color=[x_val, y_val],
brightness=brightness,
transition=30)
def set_lights_temp(hass, lights, kelvin, mode):
"""Set color of array of lights."""
temp = kelvin
if mode == MODE_MIRED:
temp = 1000000 / kelvin
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
color_temp=int(temp),
transition=30)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Flux switches."""
name = config.get(CONF_NAME)
lights = config.get(CONF_LIGHTS)
start_time = config.get(CONF_START_TIME)
stop_time = config.get(CONF_STOP_TIME)
start_colortemp = config.get(CONF_START_CT)
sunset_colortemp = config.get(CONF_SUNSET_CT)
stop_colortemp = config.get(CONF_STOP_CT)
brightness = config.get(CONF_BRIGHTNESS)
mode = config.get(CONF_MODE)
flux = FluxSwitch(name, hass, False, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, mode)
add_devices([flux])
def update(call=None):
"""Update lights."""
flux.flux_update()
hass.services.register(DOMAIN, name + '_update', update)
# pylint: disable=too-many-instance-attributes
class FluxSwitch(SwitchDevice):
"""Representation of a Flux switch."""
# pylint: disable=too-many-arguments
def __init__(self, name, hass, state, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, mode):
"""Initialize the Flux switch."""
self._name = name
self.hass = hass
self._state = state
self._lights = lights
self._start_time = start_time
self._stop_time = stop_time
self._start_colortemp = start_colortemp
self._sunset_colortemp = sunset_colortemp
self._stop_colortemp = stop_colortemp
self._brightness = brightness
self._mode = mode
self.unsub_tracker = None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn on flux."""
self._state = True
self.unsub_tracker = track_utc_time_change(self.hass, self.flux_update,
second=[0, 30])
self.update_ha_state()
def turn_off(self, **kwargs):
"""Turn off flux."""
if self.unsub_tracker is not None:
self.unsub_tracker()
self.unsub_tracker = None
self._state = False
self.update_ha_state()
# pylint: disable=too-many-locals
def flux_update(self, now=None):
"""Update all the lights using flux."""
if now is None:
now = dt_now()
sunset = next_setting(self.hass, SUN).replace(day=now.day,
month=now.month,
year=now.year)
start_time = self.find_start_time(now)
stop_time = now.replace(hour=self._stop_time.hour,
minute=self._stop_time.minute,
second=0)
if start_time < now < sunset:
# Daytime
time_state = 'day'
temp_range = abs(self._start_colortemp - self._sunset_colortemp)
day_length = int(sunset.timestamp() - start_time.timestamp())
seconds_from_start = int(now.timestamp() - start_time.timestamp())
percentage_complete = seconds_from_start / day_length
temp_offset = temp_range * percentage_complete
if self._start_colortemp > self._sunset_colortemp:
temp = self._start_colortemp - temp_offset
else:
temp = self._start_colortemp + temp_offset
else:
# Nightime
time_state = 'night'
if now < stop_time and now > start_time:
now_time = now
else:
now_time = stop_time
temp_range = abs(self._sunset_colortemp - self._stop_colortemp)
night_length = int(stop_time.timestamp() - sunset.timestamp())
seconds_from_sunset = int(now_time.timestamp() -
sunset.timestamp())
percentage_complete = seconds_from_sunset / night_length
temp_offset = temp_range * percentage_complete
if self._sunset_colortemp > self._stop_colortemp:
temp = self._sunset_colortemp - temp_offset
else:
temp = self._sunset_colortemp + temp_offset
if self._mode == MODE_XY:
x_val, y_val, b_val = color_RGB_to_xy(*temp_to_rgb(temp))
brightness = self._brightness if self._brightness else b_val
set_lights_xy(self.hass, self._lights, x_val,
y_val, brightness)
_LOGGER.info("Lights updated to x:%s y:%s brightness:%s, %s%%"
" of %s cycle complete at %s", x_val, y_val,
brightness, round(
percentage_complete * 100), time_state,
as_local(now))
else:
set_lights_temp(self.hass, self._lights, temp, self._mode)
_LOGGER.info("Lights updated to temp:%s, %s%%"
" of %s cycle complete at %s", temp,
round(percentage_complete * 100),
time_state, as_local(now))
def find_start_time(self, now):
"""Return sunrise or start_time if given."""
if self._start_time:
sunrise = now.replace(hour=self._start_time.hour,
minute=self._start_time.minute,
second=0)
else:
sunrise = next_rising(self.hass, SUN).replace(day=now.day,
month=now.month,
year=now.year)
return sunrise
| |
################################################################################
#
# Copyright 2016 Crown copyright (c)
# Land Information New Zealand and the New Zealand Government.
# All rights reserved
#
# This program is released under the terms of the 3 clause BSD license. See the
# LICENSE file for more information.
#
################################################################################
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
# DictionaryListView and DictionaryListModel.
class DictionaryListView( QTableView ):
rowSelected = pyqtSignal( int, name="rowSelected" )
rowDoubleClicked = pyqtSignal( int, name="rowDoubleClicked" )
rowSelectionChanged = pyqtSignal( name="rowSelectionChanged" )
modelReset = pyqtSignal( name="modelReset" )
def __init__( self, parent=None ):
QTableView.__init__( self, parent )
# Change default settings
if parent.__class__.__name__ in ('DelAddressDialog', 'MoveAddressDialog'):
self.setSelectionMode(QAbstractItemView.MultiSelection)
else:
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.horizontalHeader().setStretchLastSection(True)
self.horizontalHeader().setHighlightSections(False)
self.verticalHeader().setVisible(False)
self.verticalHeader().setDefaultSectionSize(17)
self.setSortingEnabled(True)
self.setEditTriggers(QAbstractItemView.AllEditTriggers)
self.setStyleSheet("* { gridline-color: gray }")
# Own variables
self._model=None
self._dictionaryList = None
self._selectedId =None
self._alternativeId =None
self.doubleClicked.connect( self.onDoubleClicked )
# Reimplemented QTableView functions
def selectionChanged( self, selected, deselected ):
QTableView.selectionChanged( self, selected, deselected )
self.rowSelectionChanged.emit()
row = self.selectedRow()
if row != None:
self.rowSelected.emit( row )
def setList( self, list, columns=None, headers=None ):
self.setModel( DictionaryListModel(list,columns,headers))
def setModel( self, model ):
QTableView.setModel( self, model )
if self._model:
self._model.modelReset.disconnect( self._onModelReset )
self._model.layoutAboutToBeChanged.disconnect( self._saveSelectedRow )
self._model.layoutChanged.disconnect( self._restoreSelectedRow )
if self._dictionaryList:
self._dictionaryList.resettingModel.disconnect( self._saveSelectedRow )
self._model = model
self._dictionaryList = self._model if isinstance(self._model,DictionaryListModel) else None
if self._model:
self._model.modelReset.connect( self._onModelReset )
self._model.layoutAboutToBeChanged.connect( self._saveSelectedRow )
self._model.layoutChanged.connect( self._restoreSelectedRow )
if self._dictionaryList:
self._dictionaryList.resettingModel.connect( self._saveSelectedRow )
self._onModelReset()
# Select first row by default
def _saveSelectedRow( self ):
if not self._dictionaryList:
self._selectedId = None
self._alternativeId = None
return
self._selectedId = self.selectedId()
if self._selectedId != None:
row = self.selectedRow() + 1
self._alternativeId = self._dictionaryList.getId( row )
def _restoreSelectedRow( self ):
if not self.selectId(self._selectedId) and not self.selectId( self._alternativeId ):
self.selectRow(0)
def _onModelReset(self):
self.modelReset.emit()
if self.rowCount() > 0:
self.resizeColumnsToContents()
self._restoreSelectedRow()
else:
self.rowSelected.emit( -1 )
def onDoubleClicked( self, index ):
row = self.selectedRow()
if row != None:
self.rowDoubleClicked.emit( row )
def selectId( self, id ):
if self._dictionaryList and id != None:
row = self._dictionaryList.getIdDisplayRow( id )
if row != None:
self.selectRow( row )
return True
return False
def selectedRow( self ):
rows = self.selectionModel().selectedIndexes()
if len(rows) == 1:
return rows[0].row()
def selectedId( self ):
if not self._dictionaryList:
return None
row = self.selectedIndexes()
return self._dictionaryList.getId( row )
def selectedItem( self ):
if not self._dictionaryList:
return None
row = self.selectedIndexes()
return self._dictionaryList.getItem( row )# row == index
def selectedRows( self ):
return set([r.row() for r in self.selectionModel().selectedIndexes()])
def selectedItems( self ):
if self._dictionaryList:
list = self._dictionaryList
return [list.getItem(r) for r in self.selectedIndexes()]
return []
def confirmSelection(self):
if self._dictionaryList:
list = self._dictionaryList
return [list.getItem(r) for r in self.selectedRows()]
return []
def rowCount( self ):
model = self.model()
if not model:
return 0
return model.rowCount(QModelIndex())
class DictionaryListModel( QAbstractTableModel ):
itemUpdated = pyqtSignal( int, name="itemUpdated" )
resettingModel = pyqtSignal( name="resettingModel" )
def __init__( self, list=None, columns=None, headers=None, idColumn=None ):
QAbstractTableModel.__init__(self)
self._columns = []
self._headers = []
self._editCols = []
self._editable = []
self._idColumn = id
self._filter = None
self._sortColumn = None
self._sortReverse = False
self._index = []
self._lookup = None
self._idLookup = None
self._readonlyBrush = None
self.setList( list, columns, headers, id )
def list( self ):
return self._list
def setList( self, list, columns=None, headers=None, idColumn=None ):
self.resettingModel.emit()
self._list = list if list != None else []
if not columns: columns = []
self._createIndex()
if idColumn:
self._idColumn = idColumn
self._setColumns( columns, headers )
self._resetList()
def setEditColumns( self, editColumns ):
self._editable = [False] * len(self._columns)
self._editCols = editColumns
if editColumns:
for editCol in editColumns:
for i, col in enumerate(self._columns):
if editCol == col:
self._editable[i] = True
def setFilter( self, filter=None ):
self.resettingModel.emit()
self._filter = filter
self._createIndex()
self._resetList()
def resetFilter( self ):
self.resettingModel.emit()
self._createIndex()
self._resetList()
def setColumns( self, columns=None, headers=None ):
self.resettingModel.emit()
self._setColumns( columns, headers )
self._resetList()
def _setColumns( self, columns, headers ):
if columns:
self._columns = columns
self._headers = headers
if self._list and not self._columns:
self._columns = sorted(self._list[0].keys())
if not self._headers or len(self._headers) != len(self._columns):
self._headers = self._columns
self._editable = [False] * len(self._columns)
self.setIdColumn( self._idColumn )
self.setEditColumns( self._editCols )
def setIdColumn( self, idColumn ):
self._idColumn = idColumn
self._idLookup = None
def setReadonlyColour( self, colour ):
self._readonlyBrush = QBrush(colour)
def _createIndex( self ):
if self._filter:
self._index = [i
for i in range(len(self._list))
if self._filter(self._list[i])]
else:
self._index = range( len( self._list) )
self._sortIndex()
self._lookup = None
def getDisplayRow( self, row ):
if row == None:
return None
if row < 0 or row >= len(self._list):
return None
if self._lookup == None:
lookup = [None] * len( self._list)
for i in range(len(self._index)):
lookup[self._index[i]] = i
self._lookup = lookup
return self._lookup[row]
def _setupColumns( self ):
if self._list and not self._columns:
columns = [k for k in list[0].keys() if not k.startswith("_")]
self._columns = sorted(columns)
if not self._headers or len(self._headers) != len(self._columns):
self._headers = self._columns
def _resetList( self ):
self.modelReset.emit()
def count( self ):
return len( self._index )
def rowCount( self, parent ):
return len(self._index) if not parent.isValid() else 0
def columnCount( self, parent ):
return len(self._columns) if not parent.isValid() else 0
def getItem( self, row ):
if row != None and row >= 0 and row < len( self._index ):
return self._list[self._index[row]]
return None
def getItems( self, rows ):
return [self.getItem(r) for r in rows]
def getId( self, row ):
item = self.getItem( row )
return item.get(self._idColumn) if item else None
def getIdRow( self, id ):
if not self._idLookup:
self._idLookup=dict()
if self._idColumn:
for i in range(len(self._list)):
itemid = self._list[i].get(self._idColumn)
if itemid:
self._idLookup[itemid] = i
return self._idLookup.get(id)
def getIdDisplayRow( self, id ):
return self.getDisplayRow( self.getIdRow( id ))
def flags( self, index ):
flag = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if self._editable[index.column()]:
flag |= Qt.ItemIsEditable
return flag
def data( self, index, role ):
row = index.row()
col = index.column()
if role == Qt.DisplayRole or role == Qt.EditRole:
return unicode(self._list[self._index[row]].get(self._columns[col],''))
elif role == Qt.BackgroundRole and not self._editable[col] and self._readonlyBrush:
return self._readonlyBrush
return None
def setData( self, index, value, role ):
if not index.isValid() or role != Qt.EditRole:
return False
row = index.row()
col = index.column()
if not self._editable[col]:
return False
item = self.getItem( row )
item[self._columns[col]] = str(value)
self.dataChanged.emit(index,index)
return True
def headerData( self, section, orientation, role ):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if self._headers and section < len(self._headers):
return self._headers[section]
return None
def sort( self, column, order ):
self.layoutAboutToBeChanged.emit()
self._sortColumn = column
self._sortReverse = order == Qt.DescendingOrder
self._sortIndex()
self.layoutChanged.emit()
def _sortIndex( self ):
if self._sortColumn == None:
return
key = self._columns[self._sortColumn]
keyfunc = lambda x: self._list[x].get(key)
self._index.sort( None, keyfunc, self._sortReverse )
self._lookup = None
def updateItem( self, index ):
row = self.getDisplayRow(index)
showing = True
if self._filter:
showing = row != None
show = self._filter(self._list[index])
if showing != show:
self.resettingModel.emit()
self._createIndex()
self._resetList()
elif showing:
self.dataChanged.emit(self.index(row,0),self.index(row,len(self._columns)))
self.itemUpdated.emit( index )
| |
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module uprevs Chrome for cbuildbot.
After calling, it prints outs CHROME_VERSION_ATOM=(version atom string). A
caller could then use this atom with emerge to build the newly uprevved version
of Chrome e.g.
./cros_mark_chrome_as_stable tot
Returns chrome-base/chromeos-chrome-8.0.552.0_alpha_r1
emerge-x86-generic =chrome-base/chromeos-chrome-8.0.552.0_alpha_r1
"""
from __future__ import print_function
import base64
import distutils.version
import filecmp
import optparse
import os
import re
import sys
import urlparse
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import gob_util
from chromite.lib import portage_util
from chromite.lib import timeout_util
from chromite.scripts import cros_mark_as_stable
# Helper regex's for finding ebuilds.
_CHROME_VERSION_REGEX = r'\d+\.\d+\.\d+\.\d+'
_NON_STICKY_REGEX = r'%s[(_rc.*)|(_alpha.*)]+' % _CHROME_VERSION_REGEX
# Dir where all the action happens.
_OVERLAY_DIR = '%(srcroot)s/third_party/chromiumos-overlay/'
_GIT_COMMIT_MESSAGE = ('Marking %(chrome_rev)s for %(chrome_pn)s ebuild '
'with version %(chrome_version)s as stable.')
# URLs that print lists of chrome revisions between two versions of the browser.
_CHROME_VERSION_URL = ('http://omahaproxy.appspot.com/changelog?'
'old_version=%(old)s&new_version=%(new)s')
# Only print links when we rev these types.
_REV_TYPES_FOR_LINKS = [constants.CHROME_REV_LATEST,
constants.CHROME_REV_STICKY]
# TODO(szager): This is inaccurate, but is it safe to change? I have no idea.
_CHROME_SVN_TAG = 'CROS_SVN_COMMIT'
def _GetVersionContents(chrome_version_info):
"""Returns the current Chromium version, from the contents of a VERSION file.
Args:
chrome_version_info: The contents of a chromium VERSION file.
"""
chrome_version_array = []
for line in chrome_version_info.splitlines():
chrome_version_array.append(line.rpartition('=')[2])
return '.'.join(chrome_version_array)
def _GetSpecificVersionUrl(git_url, revision, time_to_wait=600):
"""Returns the Chromium version, from a repository URL and version.
Args:
git_url: Repository URL for chromium.
revision: the git revision we want to use.
time_to_wait: the minimum period before abandoning our wait for the
desired revision to be present.
"""
parsed_url = urlparse.urlparse(git_url)
host = parsed_url[1]
path = parsed_url[2].rstrip('/') + (
'/+/%s/chrome/VERSION?format=text' % revision)
# Allow for git repository replication lag with sleep/retry loop.
def _fetch():
fh = gob_util.FetchUrl(host, path, ignore_404=True)
return fh.read() if fh else None
def _wait_msg(_remaining):
cros_build_lib.Info(
'Repository does not yet have revision %s. Sleeping...',
revision)
content = timeout_util.WaitForSuccess(
retry_check=lambda x: not bool(x),
func=_fetch,
timeout=time_to_wait,
period=30,
side_effect_func=_wait_msg)
return _GetVersionContents(base64.b64decode(content))
def _GetTipOfTrunkVersionFile(root):
"""Returns the current Chromium version, from a file in a checkout.
Args:
root: path to the root of the chromium checkout.
"""
version_file = os.path.join(root, 'src', 'chrome', 'VERSION')
chrome_version_info = cros_build_lib.RunCommand(
['cat', version_file],
redirect_stdout=True,
error_message='Could not read version file at %s.' % version_file).output
return _GetVersionContents(chrome_version_info)
def CheckIfChromeRightForOS(deps_content):
"""Checks if DEPS is right for Chrome OS.
This function checks for a variable called 'buildspec_platforms' to
find out if its 'chromeos' or 'all'. If any of those values,
then it chooses that DEPS.
Args:
deps_content: Content of release buildspec DEPS file.
Returns:
True if DEPS is the right Chrome for Chrome OS.
"""
platforms_search = re.search(r'buildspec_platforms.*\s.*\s', deps_content)
if platforms_search:
platforms = platforms_search.group()
if 'chromeos' in platforms or 'all' in platforms:
return True
return False
def GetLatestRelease(git_url, branch=None):
"""Gets the latest release version from the release tags in the repository.
Args:
git_url: URL of git repository.
branch: If set, gets the latest release for branch, otherwise latest
release.
Returns:
Latest version string.
"""
# TODO(szager): This only works for public release buildspecs in the chromium
# src repository. Internal buildspecs are tracked differently. At the time
# of writing, I can't find any callers that use this method to scan for
# internal buildspecs. But there may be something lurking...
parsed_url = urlparse.urlparse(git_url)
path = parsed_url[2].rstrip('/') + '/+refs/tags?format=JSON'
j = gob_util.FetchUrlJson(parsed_url[1], path, ignore_404=False)
if branch:
chrome_version_re = re.compile(r'^%s\.\d+.*' % branch)
else:
chrome_version_re = re.compile(r'^[0-9]+\..*')
matching_versions = [key for key in j.keys() if chrome_version_re.match(key)]
matching_versions.sort(key=distutils.version.LooseVersion)
for chrome_version in reversed(matching_versions):
path = parsed_url[2].rstrip() + (
'/+/refs/tags/%s/DEPS?format=text' % chrome_version)
fh = gob_util.FetchUrl(parsed_url[1], path, ignore_404=False)
content = fh.read() if fh else None
if content:
deps_content = base64.b64decode(content)
if CheckIfChromeRightForOS(deps_content):
return chrome_version
return None
def _GetStickyEBuild(stable_ebuilds):
"""Returns the sticky ebuild."""
sticky_ebuilds = []
non_sticky_re = re.compile(_NON_STICKY_REGEX)
for ebuild in stable_ebuilds:
if not non_sticky_re.match(ebuild.version):
sticky_ebuilds.append(ebuild)
if not sticky_ebuilds:
raise Exception('No sticky ebuilds found')
elif len(sticky_ebuilds) > 1:
cros_build_lib.Warning('More than one sticky ebuild found')
return portage_util.BestEBuild(sticky_ebuilds)
class ChromeEBuild(portage_util.EBuild):
"""Thin sub-class of EBuild that adds a chrome_version field."""
chrome_version_re = re.compile(r'.*-(%s|9999).*' % (
_CHROME_VERSION_REGEX))
chrome_version = ''
def __init__(self, path):
portage_util.EBuild.__init__(self, path)
re_match = self.chrome_version_re.match(self.ebuild_path_no_revision)
if re_match:
self.chrome_version = re_match.group(1)
def __str__(self):
return self.ebuild_path
def FindChromeCandidates(package_dir):
"""Return a tuple of chrome's unstable ebuild and stable ebuilds.
Args:
package_dir: The path to where the package ebuild is stored.
Returns:
Tuple [unstable_ebuild, stable_ebuilds].
Raises:
Exception: if no unstable ebuild exists for Chrome.
"""
stable_ebuilds = []
unstable_ebuilds = []
for path in [
os.path.join(package_dir, entry) for entry in os.listdir(package_dir)]:
if path.endswith('.ebuild'):
ebuild = ChromeEBuild(path)
if not ebuild.chrome_version:
cros_build_lib.Warning('Poorly formatted ebuild found at %s' % path)
else:
if '9999' in ebuild.version:
unstable_ebuilds.append(ebuild)
else:
stable_ebuilds.append(ebuild)
# Apply some sanity checks.
if not unstable_ebuilds:
raise Exception('Missing 9999 ebuild for %s' % package_dir)
if not stable_ebuilds:
cros_build_lib.Warning('Missing stable ebuild for %s' % package_dir)
return portage_util.BestEBuild(unstable_ebuilds), stable_ebuilds
def FindChromeUprevCandidate(stable_ebuilds, chrome_rev, sticky_branch):
"""Finds the Chrome uprev candidate for the given chrome_rev.
Using the pre-flight logic, this means the stable ebuild you are uprevving
from. The difference here is that the version could be different and in
that case we want to find it to delete it.
Args:
stable_ebuilds: A list of stable ebuilds.
chrome_rev: The chrome_rev designating which candidate to find.
sticky_branch: The the branch that is currently sticky with Major/Minor
components. For example: 9.0.553. Can be None but not if chrome_rev
is CHROME_REV_STICKY.
Returns:
The EBuild, otherwise None if none found.
"""
candidates = []
if chrome_rev in [constants.CHROME_REV_LOCAL, constants.CHROME_REV_TOT,
constants.CHROME_REV_SPEC]:
# These are labelled alpha, for historic reasons,
# not just for the fun of confusion.
chrome_branch_re = re.compile(r'%s.*_alpha.*' % _CHROME_VERSION_REGEX)
for ebuild in stable_ebuilds:
if chrome_branch_re.search(ebuild.version):
candidates.append(ebuild)
elif chrome_rev == constants.CHROME_REV_STICKY:
assert sticky_branch is not None
chrome_branch_re = re.compile(r'%s\..*' % sticky_branch)
for ebuild in stable_ebuilds:
if chrome_branch_re.search(ebuild.version):
candidates.append(ebuild)
else:
chrome_branch_re = re.compile(r'%s.*_rc.*' % _CHROME_VERSION_REGEX)
for ebuild in stable_ebuilds:
if chrome_branch_re.search(ebuild.version):
candidates.append(ebuild)
if candidates:
return portage_util.BestEBuild(candidates)
else:
return None
def _AnnotateAndPrint(text, url):
"""Add buildbot trappings to print <a href='url'>text</a> in the waterfall.
Args:
text: Anchor text for the link
url: the URL to which to link
"""
print('\n@@@STEP_LINK@%(text)s@%(url)s@@@' % {'text': text, 'url': url},
file=sys.stderr)
def GetChromeRevisionLinkFromVersions(old_chrome_version, chrome_version):
"""Return appropriately formatted link to revision info, given versions
Given two chrome version strings (e.g. 9.0.533.0), generate a link to a
page that prints the Chromium revisions between those two versions.
Args:
old_chrome_version: version to diff from
chrome_version: version to which to diff
Returns:
The desired URL.
"""
return _CHROME_VERSION_URL % {'old': old_chrome_version,
'new': chrome_version}
def GetChromeRevisionListLink(old_chrome, new_chrome, chrome_rev):
"""Returns a link to the list of revisions between two Chromium versions
Given two ChromeEBuilds and the kind of rev we're doing, generate a
link to a page that prints the Chromium changes between those two
revisions, inclusive.
Args:
old_chrome: ebuild for the version to diff from
new_chrome: ebuild for the version to which to diff
chrome_rev: one of constants.VALID_CHROME_REVISIONS
Returns:
The desired URL.
"""
assert chrome_rev in _REV_TYPES_FOR_LINKS
return GetChromeRevisionLinkFromVersions(old_chrome.chrome_version,
new_chrome.chrome_version)
def MarkChromeEBuildAsStable(stable_candidate, unstable_ebuild, chrome_pn,
chrome_rev, chrome_version, commit, package_dir):
r"""Uprevs the chrome ebuild specified by chrome_rev.
This is the main function that uprevs the chrome_rev from a stable candidate
to its new version.
Args:
stable_candidate: ebuild that corresponds to the stable ebuild we are
revving from. If None, builds the a new ebuild given the version
and logic for chrome_rev type with revision set to 1.
unstable_ebuild: ebuild corresponding to the unstable ebuild for chrome.
chrome_pn: package name.
chrome_rev: one of constants.VALID_CHROME_REVISIONS or LOCAL
constants.CHROME_REV_SPEC - Requires commit value. Revs the ebuild for
the specified version and uses the portage suffix of _alpha.
constants.CHROME_REV_TOT - Requires commit value. Revs the ebuild for
the TOT version and uses the portage suffix of _alpha.
constants.CHROME_REV_LOCAL - Requires a chrome_root. Revs the ebuild for
the local version and uses the portage suffix of _alpha.
constants.CHROME_REV_LATEST - This uses the portage suffix of _rc as they
are release candidates for the next sticky version.
constants.CHROME_REV_STICKY - Revs the sticky version.
chrome_version: The \d.\d.\d.\d version of Chrome.
commit: Used with constants.CHROME_REV_TOT. The git revision of chrome.
package_dir: Path to the chromeos-chrome package dir.
Returns:
Full portage version atom (including rc's, etc) that was revved.
"""
def IsTheNewEBuildRedundant(new_ebuild, stable_ebuild):
"""Returns True if the new ebuild is redundant.
This is True if there if the current stable ebuild is the exact same copy
of the new one.
"""
if not stable_ebuild:
return False
if stable_candidate.chrome_version == new_ebuild.chrome_version:
return filecmp.cmp(
new_ebuild.ebuild_path, stable_ebuild.ebuild_path, shallow=False)
# Mark latest release and sticky branches as stable.
mark_stable = chrome_rev not in [constants.CHROME_REV_TOT,
constants.CHROME_REV_SPEC,
constants.CHROME_REV_LOCAL]
# Case where we have the last stable candidate with same version just rev.
if stable_candidate and stable_candidate.chrome_version == chrome_version:
new_ebuild_path = '%s-r%d.ebuild' % (
stable_candidate.ebuild_path_no_revision,
stable_candidate.current_revision + 1)
else:
suffix = 'rc' if mark_stable else 'alpha'
pf = '%s-%s_%s-r1' % (chrome_pn, chrome_version, suffix)
new_ebuild_path = os.path.join(package_dir, '%s.ebuild' % pf)
chrome_variables = dict()
if commit:
chrome_variables[_CHROME_SVN_TAG] = commit
portage_util.EBuild.MarkAsStable(
unstable_ebuild.ebuild_path, new_ebuild_path,
chrome_variables, make_stable=mark_stable)
new_ebuild = ChromeEBuild(new_ebuild_path)
# Determine whether this is ebuild is redundant.
if IsTheNewEBuildRedundant(new_ebuild, stable_candidate):
msg = 'Previous ebuild with same version found and ebuild is redundant.'
cros_build_lib.Info(msg)
os.unlink(new_ebuild_path)
return None
if stable_candidate and chrome_rev in _REV_TYPES_FOR_LINKS:
_AnnotateAndPrint('Chromium revisions',
GetChromeRevisionListLink(stable_candidate,
new_ebuild,
chrome_rev))
git.RunGit(package_dir, ['add', new_ebuild_path])
if stable_candidate and not stable_candidate.IsSticky():
git.RunGit(package_dir, ['rm', stable_candidate.ebuild_path])
portage_util.EBuild.CommitChange(
_GIT_COMMIT_MESSAGE % {'chrome_pn': chrome_pn,
'chrome_rev': chrome_rev,
'chrome_version': chrome_version},
package_dir)
return '%s-%s' % (new_ebuild.package, new_ebuild.version)
def main(_argv):
usage_options = '|'.join(constants.VALID_CHROME_REVISIONS)
usage = '%s OPTIONS [%s]' % (__file__, usage_options)
parser = optparse.OptionParser(usage)
parser.add_option('-b', '--boards', default=None)
parser.add_option('-c', '--chrome_url',
default=constants.CHROMIUM_GOB_URL)
parser.add_option('-f', '--force_version', default=None,
help='Chrome version or git revision hash to use')
parser.add_option('-s', '--srcroot', default=os.path.join(os.environ['HOME'],
'trunk', 'src'),
help='Path to the src directory')
parser.add_option('-t', '--tracking_branch', default='cros/master',
help='Branch we are tracking changes against')
(options, args) = parser.parse_args()
if len(args) != 1 or args[0] not in constants.VALID_CHROME_REVISIONS:
parser.error('Commit requires arg set to one of %s.'
% constants.VALID_CHROME_REVISIONS)
if options.force_version and args[0] not in (constants.CHROME_REV_SPEC,
constants.CHROME_REV_LATEST):
parser.error('--force_version is not compatible with the %r '
'option.' % (args[0],))
overlay_dir = os.path.abspath(_OVERLAY_DIR % {'srcroot': options.srcroot})
chrome_package_dir = os.path.join(overlay_dir, constants.CHROME_CP)
chrome_rev = args[0]
version_to_uprev = None
commit_to_use = None
sticky_branch = None
(unstable_ebuild, stable_ebuilds) = FindChromeCandidates(chrome_package_dir)
if chrome_rev == constants.CHROME_REV_LOCAL:
if 'CHROME_ROOT' in os.environ:
chrome_root = os.environ['CHROME_ROOT']
else:
chrome_root = os.path.join(os.environ['HOME'], 'chrome_root')
version_to_uprev = _GetTipOfTrunkVersionFile(chrome_root)
commit_to_use = 'Unknown'
cros_build_lib.Info('Using local source, versioning is untrustworthy.')
elif chrome_rev == constants.CHROME_REV_SPEC:
if '.' in options.force_version:
version_to_uprev = options.force_version
else:
commit_to_use = options.force_version
if '@' in commit_to_use:
commit_to_use = commit_to_use.rpartition('@')[2]
version_to_uprev = _GetSpecificVersionUrl(options.chrome_url,
commit_to_use)
elif chrome_rev == constants.CHROME_REV_TOT:
commit_to_use = gob_util.GetTipOfTrunkRevision(options.chrome_url)
version_to_uprev = _GetSpecificVersionUrl(options.chrome_url,
commit_to_use)
elif chrome_rev == constants.CHROME_REV_LATEST:
if options.force_version:
if '.' not in options.force_version:
parser.error('%s only accepts released Chrome versions, not SVN or '
'Git revisions.' % (chrome_rev,))
version_to_uprev = options.force_version
else:
version_to_uprev = GetLatestRelease(options.chrome_url)
else:
sticky_ebuild = _GetStickyEBuild(stable_ebuilds)
sticky_version = sticky_ebuild.chrome_version
sticky_branch = sticky_version.rpartition('.')[0]
version_to_uprev = GetLatestRelease(options.chrome_url, sticky_branch)
stable_candidate = FindChromeUprevCandidate(stable_ebuilds, chrome_rev,
sticky_branch)
if stable_candidate:
cros_build_lib.Info('Stable candidate found %s' % stable_candidate)
else:
cros_build_lib.Info('No stable candidate found.')
tracking_branch = 'remotes/m/%s' % os.path.basename(options.tracking_branch)
existing_branch = git.GetCurrentBranch(chrome_package_dir)
work_branch = cros_mark_as_stable.GitBranch(constants.STABLE_EBUILD_BRANCH,
tracking_branch,
chrome_package_dir)
work_branch.CreateBranch()
# In the case of uprevving overlays that have patches applied to them,
# include the patched changes in the stabilizing branch.
if existing_branch:
git.RunGit(chrome_package_dir, ['rebase', existing_branch])
chrome_version_atom = MarkChromeEBuildAsStable(
stable_candidate, unstable_ebuild, 'chromeos-chrome', chrome_rev,
version_to_uprev, commit_to_use, chrome_package_dir)
if chrome_version_atom:
if options.boards:
cros_mark_as_stable.CleanStalePackages(options.boards.split(':'),
[chrome_version_atom])
# If we did rev Chrome, now is a good time to uprev other packages.
for other_ebuild in constants.OTHER_CHROME_PACKAGES:
other_ebuild_name = os.path.basename(other_ebuild)
other_package_dir = os.path.join(overlay_dir, other_ebuild)
(other_unstable_ebuild, other_stable_ebuilds) = FindChromeCandidates(
other_package_dir)
other_stable_candidate = FindChromeUprevCandidate(other_stable_ebuilds,
chrome_rev,
sticky_branch)
revved_atom = MarkChromeEBuildAsStable(other_stable_candidate,
other_unstable_ebuild,
other_ebuild_name,
chrome_rev, version_to_uprev,
commit_to_use, other_package_dir)
if revved_atom and options.boards:
cros_mark_as_stable.CleanStalePackages(options.boards.split(':'),
[revved_atom])
# Explicit print to communicate to caller.
if chrome_version_atom:
print('CHROME_VERSION_ATOM=%s' % chrome_version_atom)
| |
import datetime
import os
import logging
from django.contrib.auth.models import User
from hq.utils import build_url
from domain.models import Domain
from domain.decorators import login_and_domain_required
from requestlogger.models import RequestLog
from xformmanager.manager import readable_form, csv_dump
from buildmanager.exceptions import BuildError
from buildmanager.models import *
from buildmanager.forms import *
from buildmanager.jar import validate_jar
from buildmanager import xformvalidator
from rapidsms.webui.utils import render_to_response
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
from django.http import *
from django.http import HttpResponse
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
import mimetypes
import urllib
@login_required()
def all_projects(request, template_name="buildmanager/all_projects.html"):
context = {}
if request.user.selected_domain:
context['projects'] = Project.objects.filter(domain=request.user.selected_domain)
else:
context['projects'] = Project.objects.all()
return render_to_response(request, template_name, context)
@login_required()
def show_project(request, project_id, template_name="buildmanager/show_project.html"):
context = {}
try:
project = Project.objects.get(id=project_id)
context['project'] = project
context['build_map'] = _get_single_project_builds(project)
context['latest_build'] = project.get_latest_released_build()
except:
raise Http404
return render_to_response(request, template_name, context)
@login_and_domain_required
def all_builds(request, template_name="buildmanager/all_builds.html"):
context = {}
domain = request.user.selected_domain
projects = Project.objects.filter(domain=domain)
builds = _get_build_dictionary(projects)
context['all_builds'] = builds
return render_to_response(request, template_name, context)
def _get_build_dictionary(projects):
builds = {}
for project in projects:
builds[project] = _get_single_project_builds(project)
return builds
def _get_single_project_builds(project):
this_project_dict = {}
this_project_dict["normal"] = project.get_non_released_builds()
this_project_dict["release"] = project.get_released_builds()
return this_project_dict
def show_latest_build(request, project_id, template_name="buildmanager/show_build.html"):
context = {}
try:
project = Project.objects.get(id=project_id)
except Project.DoesNotExist:
raise Http404
build = project.get_latest_released_build()
context['build'] = build
return render_to_response(request, template_name, context)
@login_required()
def show_build(request, build_id, template_name="buildmanager/show_build.html"):
context = {}
try:
context['build'] = ProjectBuild.objects.get(id=build_id)
except:
raise Http404
return render_to_response(request, template_name, context)
def get_buildfile(request, project_id, build_number, filename, template_name=None):
"""For a given build, we now have a direct and unique download URL for it
within a given project. This will directly stream the file to the
browser. This is because we want to track download counts
"""
try:
proj = Project.objects.get(id=project_id)
build = ProjectBuild.objects.filter(project=proj).get(build_number=build_number)
return _get_buildfile(request, proj, build, filename)
except Exception, e:
return _handle_error(request,
"problem accessing build/file: %s/%s for project: %s. error is: %s" %
(build_number, filename, project_id, e))
def get_latest_buildfile(request, project_id, filename, template_name=None):
'''Gets the latest released build file for a given project'''
try:
proj = Project.objects.get(id=project_id)
except Project.DoesNotExist:
raise Http404
build = proj.get_latest_released_build()
if build:
return _get_buildfile(request, proj, build, filename)
else:
raise Http404
def _get_buildfile(request, project, build, filename):
returndata = None
if filename.endswith('.jar'):
fpath = build.get_jar_filename()
if fpath != filename:
raise Http404
mtype = mimetypes.guess_type(build.jar_file)[0]
fin = build.get_jar_filestream()
if not fin:
raise Http404
_log_build_download(request, build, "jar")
returndata = fin.read()
fin.close()
elif filename.endswith('.jad'):
fpath = build.get_jad_filename()
mtype = mimetypes.guess_type(build.jad_file)[0]
if fpath != filename:
raise Http404
fin = build.get_jad_filestream()
if not fin:
raise Http404
_log_build_download(request, build, "jad")
returndata = fin.read()
fin.close()
elif filename.endswith('.zip'):
mtype = "application/zip"
returndata = build.get_zip_filestream()
if not returndata:
raise Http404
_log_build_download(request, build, "zip")
if mtype == None:
response = HttpResponse(mimetype='text/plain')
else:
response = HttpResponse(mimetype=mtype)
response.write(returndata)
return response
def _log_build_download(request, build, type):
'''Logs and saves a build download.'''
log = RequestLog.from_request(request)
log.save()
download = BuildDownload(type=type, build=build, log=log)
download.save()
# Also add a little notifier here so we can track build downloads
if (hasattr(request, "user")):
user_display = str(request.user)
else:
user_display = "An anonymous user"
logging.error("Hey! %s just downloaded the %s file for build %s! The request was a %s"\
%(user_display, type, build.get_display_string(), log))
def _log_build_upload(request, build):
'''Logs and saves a build upload.'''
log = RequestLog.from_request(request)
log.save()
upload = BuildUpload(build=build, log=log)
upload.save()
@login_required
def release(request, build_id, template_name="buildmanager/release_confirmation.html"):
try:
build = ProjectBuild.objects.get(id=build_id)
except ProjectBuild.DoesNotExist:
raise Http404
try:
build.release(request.user)
context = {}
context["build"] = build
context["jad_url"] = build_url(build.get_jad_downloadurl(), request)
context["latest_url"] = build_url(build.project.get_latest_jad_url(), request)
return render_to_response(request, template_name, context)
except BuildError, e:
error_string = "Problem releasing build: %s, the errors are as follows:<br><br>%s" % (build, e.get_error_string("<br><br>"))
return _handle_error(request, error_string)
except Exception, e:
# we may want to differentiate from expected (BuildError) and unexpected
# (everything else) errors down the road, for now we treat them the same.
error_string = "Problem releasing build: %s, the error is: %s" % (build.get_display_string(), unicode(e))
return _handle_error(request, error_string)
@login_required
def new_build(request, template_name="buildmanager/new_build.html"):
context = {}
form = ProjectBuildForm()
if request.method == 'POST':
form = ProjectBuildForm(request.POST, request.FILES)
if form.is_valid():
try:
newbuild = form.save(commit=False)
newbuild.uploaded_by=request.user
newbuild.description = urllib.unquote(newbuild.description)
newbuild.package_created = datetime.datetime.now()
newbuild.set_jadfile(request.FILES['jad_file_upload'].name, request.FILES['jad_file_upload'])
newbuild.set_jarfile(request.FILES['jar_file_upload'].name, request.FILES['jar_file_upload'])
newbuild.save()
_log_build_upload(request, newbuild)
return HttpResponseRedirect(reverse('buildmanager.views.all_builds'))
except Exception, e:
logging.error("buildmanager new ProjectBuild creation error.",
extra={'exception':e,
'request.POST': request.POST,
'request.FILES': request.FILES,
'form':form})
context['errors'] = "Could not commit build: " + str(e)
context['form'] = form
return render_to_response(request, template_name, context)
@login_required
def get_build_xform(request, id, template_name="buildmanager/display_xform.html"):
form = BuildForm.objects.get(id=id)
if "download" in request.GET:
fin = form.as_filestream()
# unfortunately, neither of these more correct displays actually look good in firefox
#response = HttpResponse(fin.read(), mimetype='application/xhtml+xml')
#response = HttpResponse(fin.read(), mimetype='text/xml')
response = HttpResponse(fin.read(), mimetype='text/xml')
response["content-disposition"] = 'attachment; filename=%s' % form.get_file_name()
fin.close()
return response
else:
# display it inline on HQ
return render_to_response(request, template_name, { "xform": form })
@login_required
def translation_csv(req, id):
"""Get csv of the translation file for an xform"""
form = BuildForm.objects.get(id=id)
xform_body = form.get_text()
try:
result, errors, has_error = csv_dump(xform_body)
response = HttpResponse(result,
mimetype='application/ms-excel')
response["content-disposition"] = 'attachment; filename="%s-translations.csv"' % ( form.get_file_name())
return response
except Exception, e:
return _handle_error(req, e.message)
def readable_xform(req, template_name="buildmanager/readable_form_creator.html"):
"""Get a readable xform"""
def get(req, template_name):
return render_to_response(req, template_name, {})
def post(req, template_name):
xform_body = req.POST["xform"]
try:
result, errors, has_error = readable_form(xform_body)
return render_to_response(req, template_name, {"success": True,
"message": "Your form was successfully validated!",
"xform": xform_body,
"readable_form": result
})
except Exception, e:
return render_to_response(req, template_name, {"success": False,
"message": "Failure to generate readable xform! %s" % e,
"xform": xform_body
})
# invoke the correct function...
# this should be abstracted away
if req.method == "GET": return get(req, template_name)
elif req.method == "POST": return post(req, template_name)
def validator(req, template_name="buildmanager/validator.html"):
"""Validate an xform"""
def get(req, template_name):
return render_to_response(req, template_name, {})
def post(req, template_name):
xform_body = req.POST["xform"]
hq_validation = True if "hq-validate" in req.POST else False
try:
xformvalidator.validate_xml(xform_body, do_hq_validation=hq_validation)
return render_to_response(req, template_name, {"success": True,
"message": "Your form was successfully validated!",
"xform": xform_body
})
except Exception, e:
return render_to_response(req, template_name, {"success": False,
"message": "Validation Fail! %s" % e,
"xform": xform_body
})
# invoke the correct function...
# this should be abstracted away
if req.method == "GET": return get(req, template_name)
elif req.method == "POST": return post(req, template_name)
def _handle_error(request, error_message):
"""Handles an error, by logging it and returning a 500 page"""
logging.error(error_message)
return render_to_response(request, "500.html", {"error_message" : error_message})
| |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# New functions
# <markdowncell>
# These are recently written functions that have not made it into the main documentation
# <headingcell level=2>
# Python Lesson: Errors and Exceptions
# <codecell>
# you would normaly install eppy by doing
# python setup.py install
# or
# pip install eppy
# or
# easy_install eppy
# if you have not done so, uncomment the following three lines
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = '../'
sys.path.append(pathnameto_eppy)
# <markdowncell>
# When things go wrong in your eppy script, you get "Errors and Exceptions".
#
# To know more about how this works in python and eppy, take a look at [Python: Errors and Exceptions](http://docs.python.org/2/tutorial/errors.html)
# <headingcell level=2>
# Setting IDD name
# <markdowncell>
# When you work with Energyplus you are working with **idf** files (files that have the extension \*.idf). There is another file that is very important, called the **idd** file. This is the file that defines all the objects in Energyplus. Esch version of Energyplus has a different **idd** file.
#
# So eppy needs to know which **idd** file to use. Only one **idd** file can be used in a script or program. This means that you cannot change the **idd** file once you have selected it. Of course you have to first select an **idd** file before eppy can work.
#
# If you use eppy and break the above rules, eppy will raise an exception. So let us use eppy incorrectly and make eppy raise the exception, just see how that happens.
#
# First let us try to open an **idf** file without setting an **idd** file.
# <codecell>
from eppy import modeleditor
from eppy.modeleditor import IDF
fname1 = "../eppy/resources/idffiles/V_7_2/smallfile.idf"
# <markdowncell>
# Now let us open file fname1 without setting the **idd** file
# <codecell>
try:
idf1 = IDF(fname1)
except modeleditor.IDDNotSetError as e:
print("raised eppy.modeleditor.IDDNotSetError")
# <markdowncell>
# OK. It does not let you do that and it raises an exception
#
# So let us set the **idd** file and then open the idf file
# <codecell>
iddfile = "../eppy/resources/iddfiles/Energy+V7_2_0.idd"
IDF.setiddname(iddfile)
idf1 = IDF(fname1)
# <markdowncell>
# That worked without raising an exception
#
# Now let us try to change the **idd** file. Eppy should not let you do this and should raise an exception.
# <codecell>
try:
IDF.setiddname("anotheridd.idd")
except modeleditor.IDDAlreadySetError as e:
print("raised modeleditor.IDDAlreadySetError")
# <markdowncell>
# Excellent!! It raised the exception we were expecting.
# <headingcell level=2>
# Check range for fields
# <markdowncell>
# The fields of idf objects often have a range of legal values. The following functions will let you discover what that range is and test if your value lies within that range
# <markdowncell>
# demonstrate two new functions:
#
# - EpBunch.getrange(fieldname) # will return the ranges for that field
# - EpBunch.checkrange(fieldname) # will throw an exception if the value is outside the range
# <codecell>
from eppy import modeleditor
from eppy.modeleditor import IDF
iddfile = "../eppy/resources/iddfiles/Energy+V7_2_0.idd"
fname1 = "../eppy/resources/idffiles/V_7_2/smallfile.idf"
# <codecell>
# IDF.setiddname(iddfile)# idd ws set further up in this page
idf1 = IDF(fname1)
# <codecell>
building = idf1.idfobjects['building'.upper()][0]
print(building)
# <codecell>
print(building.getrange("Loads_Convergence_Tolerance_Value"))
# <codecell>
print(building.checkrange("Loads_Convergence_Tolerance_Value"))
# <markdowncell>
# Let us set these values outside the range and see what happens
# <codecell>
building.Loads_Convergence_Tolerance_Value = 0.6
from eppy.bunch_subclass import RangeError
try:
print(building.checkrange("Loads_Convergence_Tolerance_Value"))
except RangeError as e:
print("raised range error")
# <markdowncell>
# So the Range Check works
# <headingcell level=2>
# Looping through all the fields in an idf object
# <markdowncell>
# We have seen how to check the range of field in the idf object. What if you want to do a *range check* on all the fields in an idf object ? To do this we will need a list of all the fields in the idf object. We can do this easily by the following line
# <codecell>
print(building.fieldnames)
# <markdowncell>
# So let us use this
# <codecell>
for fieldname in building.fieldnames:
print("%s = %s" % (fieldname, building[fieldname]))
# <markdowncell>
# Now let us test if the values are in the legal range. We know that "Loads_Convergence_Tolerance_Value" is out of range
# <codecell>
from eppy.bunch_subclass import RangeError
for fieldname in building.fieldnames:
try:
building.checkrange(fieldname)
print("%s = %s #-in range" % (fieldname, building[fieldname],))
except RangeError as e:
print("%s = %s #-****OUT OF RANGE****" % (fieldname, building[fieldname],))
# <markdowncell>
# You see, we caught the out of range value
# <headingcell level=2>
# Blank idf file
# <markdowncell>
# Until now in all our examples, we have been reading an idf file from disk:
#
# - How do I create a blank new idf file
# - give it a file name
# - Save it to the disk
#
# Here are the steps to do that
# <codecell>
# some initial steps
from eppy.modeleditor import IDF
iddfile = "../eppy/resources/iddfiles/Energy+V7_2_0.idd"
# IDF.setiddname(iddfile) # Has already been set
# - Let us first open a file from the disk
fname1 = "../eppy/resources/idffiles/V_7_2/smallfile.idf"
idf_fromfilename = IDF(fname1) # initialize the IDF object with the file name
idf_fromfilename.printidf()
# <codecell>
# - now let us open a file from the disk differently
fname1 = "../eppy/resources/idffiles/V_7_2/smallfile.idf"
fhandle = open(fname1, 'r') # open the file for reading and assign it a file handle
idf_fromfilehandle = IDF(fhandle) # initialize the IDF object with the file handle
idf_fromfilehandle.printidf()
# <codecell>
# So IDF object can be initialized with either a file name or a file handle
# - How do I create a blank new idf file
idftxt = "" # empty string
from io import StringIO
fhandle = StringIO(idftxt) # we can make a file handle of a string
idf_emptyfile = IDF(fhandle) # initialize the IDF object with the file handle
idf_emptyfile.printidf()
# <markdowncell>
# It did not print anything. Why should it. It was empty.
#
# What if we give it a string that was not blank
# <codecell>
# - The string does not have to be blank
idftxt = "VERSION, 7.3;" # Not an emplty string. has just the version number
fhandle = StringIO(idftxt) # we can make a file handle of a string
idf_notemptyfile = IDF(fhandle) # initialize the IDF object with the file handle
idf_notemptyfile.printidf()
# <markdowncell>
# Aha !
#
# Now let us give it a file name
# <codecell>
# - give it a file name
idf_notemptyfile.idfname = "notemptyfile.idf"
# - Save it to the disk
idf_notemptyfile.save()
# <markdowncell>
# Let us confirm that the file was saved to disk
# <codecell>
txt = open("notemptyfile.idf", 'r').read()# read the file from the disk
print(txt)
# <markdowncell>
# Yup ! that file was saved. Let us delete it since we were just playing
# <codecell>
import os
os.remove("notemptyfile.idf")
# <headingcell level=2>
# Deleting, copying/adding and making new idfobjects
# <headingcell level=3>
# Making a new idf object
# <markdowncell>
# Let us start with a blank idf file and make some new "MATERIAL" objects in it
# <codecell>
# making a blank idf object
blankstr = ""
from io import StringIO
idf = IDF(StringIO(blankstr))
# <markdowncell>
# To make and add a new idfobject object, we use the function IDF.newidfobject(). We want to make an object of type "MATERIAL"
# <codecell>
newobject = idf.newidfobject("material".upper()) # the key for the object type has to be in upper case
# .upper() makes it upper case
# <codecell>
print(newobject)
# <markdowncell>
# Let us give this a name, say "Shiny new material object"
# <codecell>
newobject.Name = "Shiny new material object"
print(newobject)
# <codecell>
anothermaterial = idf.newidfobject("material".upper())
anothermaterial.Name = "Lousy material"
thirdmaterial = idf.newidfobject("material".upper())
thirdmaterial.Name = "third material"
print(thirdmaterial)
# <markdowncell>
# Let us look at all the "MATERIAL" objects
# <codecell>
print(idf.idfobjects["MATERIAL"])
# <markdowncell>
# As we can see there are three MATERIAL idfobjects. They are:
#
# 1. Shiny new material object
# 2. Lousy material
# 3. third material
# <headingcell level=3>
# Deleting an idf object
# <markdowncell>
# Let us remove 2. Lousy material. It is the second material in the list. So let us remove the second material
# <codecell>
idf.popidfobject('MATERIAL', 1) # first material is '0', second is '1'
# <codecell>
print(idf.idfobjects['MATERIAL'])
# <markdowncell>
# You can see that the second material is gone ! Now let us remove the first material, but do it using a different function
# <codecell>
firstmaterial = idf.idfobjects['MATERIAL'][-1]
# <codecell>
idf.removeidfobject(firstmaterial)
# <codecell>
print(idf.idfobjects['MATERIAL'])
# <markdowncell>
# So we have two ways of deleting an idf object:
#
# 1. popidfobject -> give it the idf key: "MATERIAL", and the index number
# 2. removeidfobject -> give it the idf object to be deleted
# <headingcell level=3>
# Copying/Adding an idf object
# <markdowncell>
# Having deleted two "MATERIAL" objects, we have only one left. Let us make a copy of this object and add it to our idf file
# <codecell>
onlymaterial = idf.idfobjects["MATERIAL"][0]
# <codecell>
idf.copyidfobject(onlymaterial)
# <codecell>
print(idf.idfobjects["MATERIAL"])
# <markdowncell>
# So now we have a copy of the material. You can use this method to copy idf objects from other idf files too.
# <headingcell level=2>
# Making an idf object with named arguments
# <markdowncell>
# What if we wanted to make an idf object with values for it's fields? We can do that too.
# <headingcell level=2>
# Renaming an idf object
# <codecell>
gypboard = idf.newidfobject('MATERIAL', Name="G01a 19mm gypsum board",
Roughness="MediumSmooth",
Thickness=0.019,
Conductivity=0.16,
Density=800,
Specific_Heat=1090)
# <codecell>
print(gypboard)
# <markdowncell>
# newidfobject() also fills in the default values like "Thermal Absorptance", "Solar Absorptance", etc.
# <codecell>
print(idf.idfobjects["MATERIAL"])
# <headingcell level=2>
# Renaming an idf object
# <markdowncell>
# It is easy to rename an idf object. If we want to rename the gypboard object that we created above, we simply say:
# <rawcell>
# gypboard.Name = "a new name".
# <markdowncell>
# But this could create a problem. What if this gypboard is part of a "CONSTRUCTION" object. The construction object will refer to the gypboard by name. If we change the name of the gypboard, we should change it in the construction object.
#
# But there may be many constructions objects using the gypboard. Now we will have to change it in all those construction objects. Sounds painfull.
#
# Let us try this with an example:
# <codecell>
interiorwall = idf.newidfobject("CONSTRUCTION", Name="Interior Wall",
Outside_Layer="G01a 19mm gypsum board",
Layer_2="Shiny new material object",
Layer_3="G01a 19mm gypsum board")
print(interiorwall)
# <markdowncell>
# to rename gypboard and have that name change in all the places we call modeleditor.rename(idf, key, oldname, newname)
# <codecell>
modeleditor.rename(idf, "MATERIAL", "G01a 19mm gypsum board", "peanut butter")
# <codecell>
print(interiorwall)
# <markdowncell>
# Now we have "peanut butter" everywhere. At least where we need it. Let us look at the entir idf file, just to be sure
# <codecell>
idf.printidf()
# <headingcell level=2>
# Zone area and volume
# <markdowncell>
# The idf file has zones with surfaces and windows. It is easy to get the attributes of the surfaces and windows as we have seen in the tutorial. Let us review this once more:
# <codecell>
from eppy import modeleditor
from eppy.modeleditor import IDF
iddfile = "../eppy/resources/iddfiles/Energy+V7_2_0.idd"
fname1 = "../eppy/resources/idffiles/V_7_2/box.idf"
# IDF.setiddname(iddfile)
# <codecell>
idf = IDF(fname1)
# <codecell>
surfaces = idf.idfobjects["BuildingSurface:Detailed".upper()]
surface = surfaces[0]
print("area = %s" % (surface.area, ))
print("tilt = %s" % (surface.tilt, ))
print("azimuth = %s" % (surface.azimuth, ))
# <markdowncell>
# Can we do the same for zones ?
#
# Not yet .. not yet. Not in this version on eppy
#
# But we can still get the area and volume of the zone
# <codecell>
zones = idf.idfobjects["ZONE"]
zone = zones[0]
area = modeleditor.zonearea(idf, zone.Name)
volume = modeleditor.zonevolume(idf, zone.Name)
print("zone area = %s" % (area, ))
print("zone volume = %s" % (volume, ))
# <markdowncell>
# Not as slick, but still pretty easy
# <markdowncell>
# Some notes on the zone area calculation:
#
# - area is calculated by summing up all the areas of the floor surfaces
# - if there are no floors, then the sum of ceilings and roof is taken as zone area
# - if there are no floors, ceilings or roof, we are out of luck. The function returns 0
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import os
import time
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.libvirt import config
from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DECLARE('num_iscsi_scan_tries', 'nova.volume.driver')
class LibvirtVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection):
self.connection = connection
def connect_volume(self, connection_info, mount_device):
"""Connect the volume. Returns xml for libvirt."""
conf = config.LibvirtConfigGuestDisk()
conf.source_type = "block"
conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=True)
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.source_path = connection_info['data']['device_path']
conf.target_dev = mount_device
conf.target_bus = "virtio"
return conf
def disconnect_volume(self, connection_info, mount_device):
"""Disconnect the volume"""
pass
class LibvirtFakeVolumeDriver(LibvirtVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def connect_volume(self, connection_info, mount_device):
conf = config.LibvirtConfigGuestDisk()
conf.source_type = "network"
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.source_protocol = "fake"
conf.source_host = "fake"
conf.target_dev = mount_device
conf.target_bus = "virtio"
return conf
class LibvirtNetVolumeDriver(LibvirtVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def connect_volume(self, connection_info, mount_device):
conf = config.LibvirtConfigGuestDisk()
conf.source_type = "network"
conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=False)
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_host = connection_info['data']['name']
conf.target_dev = mount_device
conf.target_bus = "virtio"
netdisk_properties = connection_info['data']
if netdisk_properties.get('auth_enabled'):
conf.auth_username = netdisk_properties['auth_username']
conf.auth_secret_type = netdisk_properties['secret_type']
conf.auth_secret_uuid = netdisk_properties['secret_uuid']
return conf
class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, mount_device):
"""Attach the volume to instance_name"""
iscsi_properties = connection_info['data']
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except exception.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
# NOTE(vish): If we have another lun on the same target, we may
# have a duplicate login
self._run_iscsiadm(iscsi_properties, ("--login",),
check_exit_code=[0, 255])
self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn'],
iscsi_properties.get('target_lun', 0)))
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(host_device):
if tries >= FLAGS.num_iscsi_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s") %
locals())
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(mount_device)s "
"(after %(tries)s rescans)") %
locals())
connection_info['data']['device_path'] = host_device
sup = super(LibvirtISCSIVolumeDriver, self)
return sup.connect_volume(connection_info, mount_device)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name"""
sup = super(LibvirtISCSIVolumeDriver, self)
sup.disconnect_volume(connection_info, mount_device)
iscsi_properties = connection_info['data']
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection.get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 255])
| |
import numpy as np
import multiprocessing
import itertools as it
import collections as coll
from functools import partial
import logging
import h5py
import scipy.ndimage as nd
from scipy.sparse import coo_matrix
from scipy.ndimage.measurements import label
from scipy.spatial.distance import pdist, cdist, squareform
from scipy.misc import comb as nchoosek
from sklearn.metrics import precision_recall_curve
def bin_values(a, bins=255):
if len(unique(a)) < 2*bins:
return a.copy()
b = np.zeros_like(a)
m, M = a.min(), a.max()
r = M - m
step = r / bins
lows = np.arange(m, M, step)
highs = np.arange(m+step, M+step, step)
for low, high in zip(lows, highs):
locations = np.flatnonzero((low <= a) * (a < high))
if len(locations) > 0:
values = a.ravel()[locations]
b.ravel()[locations] = values.mean()
return b
def pixel_wise_boundary_precision_recall(aseg, gt):
tp = float((gt * aseg).sum())
fp = (aseg * (1-gt)).sum()
fn = (gt * (1-aseg)).sum()
return tp/(tp+fp), tp/(tp+fn)
def wiggle_room_precision_recall(pred, boundary, margin=2, connectivity=1):
struct = nd.generate_binary_structure(boundary.ndim, connectivity)
gtd = nd.binary_dilation(boundary, struct, margin)
struct_m = nd.iterate_structure(struct, margin)
pred_dil = nd.grey_dilation(pred, footprint=struct_m)
missing = np.setdiff1d(np.unique(pred), np.unique(pred_dil))
for m in missing:
pred_dil.ravel()[np.flatnonzero(pred==m)[0]] = m
prec, _, ts = precision_recall_curve(gtd.ravel(), pred.ravel())
_, rec, _ = precision_recall_curve(boundary.ravel(), pred_dil.ravel())
return zip(ts, prec, rec)
def get_stratified_sample(a, n):
u = np.unique(a)
if len(u) <= 2*n:
return u
else:
return u[0:len(u):len(u)/n]
def edit_distance(aseg, gt, ws=None):
if ws is None:
return edit_distance_to_bps(aseg, gt)
import agglo
return edit_distance_to_bps(aseg, agglo.best_possible_segmentation(ws, gt))
def edit_distance_to_bps(aseg, bps):
aseg = relabel_from_one(aseg)[0]
bps = relabel_from_one(bps)[0]
r = contingency_table(aseg, bps).astype(np.bool)
if (bps==0).any(): r[:,0] = 0
if (aseg==0).any(): r[0,:] = 0
false_splits = (r.sum(axis=0)-1)[1:].sum()
false_merges = (r.sum(axis=1)-1)[1:].sum()
return (false_merges, false_splits)
def relabel_from_one(a):
labels = np.unique(a)
labels0 = labels[labels!=0]
m = labels.max()
if m == len(labels0): # nothing to do, already 1...n labels
return a, labels, labels
forward_map = np.zeros(m+1, int)
forward_map[labels0] = np.arange(1, len(labels0)+1)
if not (labels == 0).any():
labels = np.concatenate(([0], labels))
inverse_map = labels
return forward_map[a], forward_map, inverse_map
def contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0], norm=True):
"""Return the contingency table for all regions in matched segmentations."""
gtr = gt.ravel()
segr = seg.ravel()
ij = np.zeros((2,len(gtr)))
ij[0,:] = segr
ij[1,:] = gtr
cont = coo_matrix((np.ones((len(gtr))), ij)).toarray()
cont[:, ignore_gt] = 0
cont[ignore_seg,:] = 0
if norm:
cont /= float(cont.sum())
return cont
def xlogx(x, out=None):
"""Compute x * log_2(x) with 0 log(0) defined to be 0."""
nz = x.nonzero()
if out is None:
y = x.copy()
else:
y = out
y[nz] *= np.log2(y[nz])
return y
def special_points_evaluate(eval_fct, coords, flatten=True, coord_format=True):
if coord_format:
coords = [coords[:,i] for i in range(coords.shape[1])]
def special_eval_fct(x, y, *args, **kwargs):
if flatten:
for i in range(len(coords)):
if coords[i][0] < 0:
coords[i] += x.shape[i]
coords2 = np.ravel_multi_index(coords, x.shape)
else:
coords2 = coords
sx = x.ravel()[coords2]
sy = y.ravel()[coords2]
return eval_fct(sx, sy, *args, **kwargs)
return special_eval_fct
def make_synaptic_vi(fn):
return make_synaptic_functions(fn, split_vi)
def make_synaptic_functions(fn, fncts):
from syngeo import io as synio
synapse_coords = \
synio.raveler_synapse_annotations_to_coords(fn, 'arrays')
synapse_coords = np.array(list(it.chain(*synapse_coords)))
make_function = partial(special_points_evaluate, coords=synapse_coords)
if not isinstance(fncts, coll.Iterable):
return make_function(fncts)
else:
return map(make_function, fncts)
def vi(x, y=None, weights=np.ones(2), ignore_x=[0], ignore_y=[0]):
"""Return the variation of information metric."""
return np.dot(weights, split_vi(x, y, ignore_x, ignore_y))
def split_vi(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return the symmetric conditional entropies associated with the VI.
The variation of information is defined as VI(X,Y) = H(X|Y) + H(Y|X).
If Y is the ground-truth segmentation, then H(Y|X) can be interpreted
as the amount of under-segmentation of Y and H(X|Y) is then the amount
of over-segmentation. In other words, a perfect over-segmentation
will have H(Y|X)=0 and a perfect under-segmentation will have H(X|Y)=0.
If y is None, x is assumed to be a contingency table.
"""
_, _, _ , hxgy, hygx, _, _ = vi_tables(x, y, ignore_x, ignore_y)
# false merges, false splits
return np.array([hygx.sum(), hxgy.sum()])
def vi_pairwise_matrix(segs, split=False):
"""Compute the pairwise VI distances within a set of segmentations.
If 'split' is set to True, two matrices are returned, one for each
direction of the conditional entropy.
0-labeled pixels are ignored.
"""
d = np.array([s.ravel() for s in segs])
if split:
def dmerge(x, y): return split_vi(x, y)[0]
def dsplit(x, y): return split_vi(x, y)[1]
merges, splits = [squareform(pdist(d, df)) for df in [dmerge, dsplit]]
out = merges
tri = np.tril(np.ones(splits.shape), -1).astype(bool)
out[tri] = splits[tri]
else:
out = squareform(pdist(d, vi))
return out
def split_vi_threshold(tup):
"""Compute VI with tuple input (to support multiprocessing).
Tuple elements:
- the UCM for the candidate segmentation,
- the gold standard,
- list of ignored labels in the segmentation,
- list of ignored labels in the gold standard,
- threshold to use for the UCM.
Value:
- array of length 2 containing the undersegmentation and
oversegmentation parts of the VI.
"""
ucm, gt, ignore_seg, ignore_gt, t = tup
return split_vi(label(ucm<t)[0], gt, ignore_seg, ignore_gt)
def vi_by_threshold(ucm, gt, ignore_seg=[], ignore_gt=[], npoints=None,
nprocessors=None):
ts = np.unique(ucm)[1:]
if npoints is None:
npoints = len(ts)
if len(ts) > 2*npoints:
ts = ts[np.arange(1, len(ts), len(ts)/npoints)]
if nprocessors == 1: # this should avoid pickling overhead
result = [split_vi_threshold((ucm, gt, ignore_seg, ignore_gt, t))
for t in ts]
else:
p = multiprocessing.Pool(nprocessors)
result = p.map(split_vi_threshold,
((ucm, gt, ignore_seg, ignore_gt, t) for t in ts))
return np.concatenate((ts[np.newaxis, :], np.array(result).T), axis=0)
def rand_by_threshold(ucm, gt, npoints=None):
ts = np.unique(ucm)[1:]
if npoints is None:
npoints = len(ts)
if len(ts) > 2*npoints:
ts = ts[np.arange(1, len(ts), len(ts)/npoints)]
result = np.zeros((2,len(ts)))
for i, t in enumerate(ts):
seg = label(ucm<t)[0]
result[0,i] = rand_index(seg, gt)
result[1,i] = adj_rand_index(seg, gt)
return np.concatenate((ts[np.newaxis, :], result), axis=0)
def vi_tables(x, y=None, ignore_x=[0], ignore_y=[0]):
"""Return probability tables used for calculating VI.
If y is None, x is assumed to be a contingency table.
"""
if y is not None:
pxy = contingency_table(x, y, ignore_x, ignore_y)
else:
cont = x
cont[:, ignore_y] = 0
cont[ignore_x, :] = 0
pxy = cont/float(cont.sum())
# Calculate probabilities
px = pxy.sum(axis=1)
py = pxy.sum(axis=0)
# Remove zero rows/cols
nzx = px.nonzero()[0]
nzy = py.nonzero()[0]
nzpx = px[nzx]
nzpy = py[nzy]
nzpxy = pxy[nzx,:][:,nzy]
# Calculate log conditional probabilities and entropies
ax = np.newaxis
lpygx = np.zeros(np.shape(px))
lpygx[nzx] = xlogx(nzpxy / nzpx[:,ax]).sum(axis=1)
# \sum_x{p_{y|x} \log{p_{y|x}}}
hygx = -(px*lpygx) # \sum_x{p_x H(Y|X=x)} = H(Y|X)
lpxgy = np.zeros(np.shape(py))
lpxgy[nzy] = xlogx(nzpxy / nzpy[ax,:]).sum(axis=0)
hxgy = -(py*lpxgy)
return pxy, px, py, hxgy, hygx, lpygx, lpxgy
def sorted_vi_components(s1, s2, ignore1=[0], ignore2=[0], compress=True):
"""Return lists of the most entropic segments in s1|s2 and s2|s1.
The 'compress' flag performs a remapping of the labels before doing the
VI computation, resulting in massive memory savings when many labels are
not used in the volume. (For example, if you have just two labels, 1 and
1,000,000, 'compress=False' will give a VI contingency table having
1,000,000 entries to a side, whereas 'compress=True' will have just size
2.)
"""
if compress:
s1, forw1, back1 = relabel_from_one(s1)
s2, forw2, back2 = relabel_from_one(s2)
_, _, _, h1g2, h2g1, _, _ = vi_tables(s1, s2, ignore1, ignore2)
i1 = (-h2g1).argsort()
i2 = (-h1g2).argsort()
ii1 = back1[i1] if compress else i1
ii2 = back2[i2] if compress else i2
return ii1, h2g1[i1], ii2, h1g2[i2]
def split_components(idx, contingency, num_elems=4, axis=0):
"""Return the indices of the bodies most overlapping with body idx.
Arguments:
- idx: the body id being examined.
- contingency: the normalized contingency table.
- num_elems: the number of overlapping bodies desired.
- axis: the axis along which to perform the calculations.
Value:
A list of tuples of (body_idx, overlap_int, overlap_ext).
"""
if axis == 1:
contingency = contingency.T
cc = contingency / contingency.sum(axis=1)[:,np.newaxis]
cct = contingency / contingency.sum(axis=0)[np.newaxis,:]
idxs = (-cc[idx]).argsort()[:num_elems]
probs = cc[idx][idxs]
probst = cct[idx][idxs]
return zip(idxs, probs, probst)
def rand_values(cont_table):
"""Calculate values for rand indices."""
n = cont_table.sum()
sum1 = (cont_table*cont_table).sum()
sum2 = (cont_table.sum(axis=1)**2).sum()
sum3 = (cont_table.sum(axis=0)**2).sum()
a = (sum1 - n)/2.0;
b = (sum2 - sum1)/2
c = (sum3 - sum1)/2
d = (sum1 + n**2 - sum2 - sum3)/2
return a, b, c, d
def rand_index(x, y=None):
"""Return the unadjusted Rand index."""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
return (a+d)/(a+b+c+d)
def adj_rand_index(x, y=None):
"""Return the adjusted Rand index."""
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
nk = a+b+c+d
return (nk*(a+d) - ((a+b)*(a+c) + (c+d)*(b+d)))/(
nk**2 - ((a+b)*(a+c) + (c+d)*(b+d)))
def fm_index(x, y=None):
""" Return the Fowlkes-Mallows index. """
cont = x if y is None else contingency_table(x, y, norm=False)
a, b, c, d = rand_values(cont)
return a/(np.sqrt((a+b)*(a+c)))
def reduce_vi(fn='testing/%i/flat-single-channel-tr%i-%i-%.2f.lzf.h5',
iterable=[(ts, tr, ts) for ts, tr in it.permutations(range(8), 2)],
thresholds=np.arange(0, 1.01, 0.01)):
iterable = list(iterable)
vi = np.zeros((3, len(thresholds), len(iterable)), np.double)
current_vi = np.zeros(3)
for i, t in enumerate(thresholds):
for j, v in enumerate(iterable):
current_fn = fn % (tuple(v) + (t,))
try:
f = h5py.File(current_fn, 'r')
except IOError:
logging.warning('IOError: could not open file %s' % current_fn)
else:
try:
current_vi = np.array(f['vi'])[:, 0]
except IOError:
logging.warning('IOError: could not open file %s'
% current_fn)
except KeyError:
logging.warning('KeyError: could not find vi in file %s'
% current_fn)
finally:
f.close()
vi[:, i, j] += current_vi
return vi
def sem(a, axis=None):
if axis is None:
a = a.ravel()
axis = 0
return np.std(a, axis=axis) / np.sqrt(a.shape[axis])
def vi_statistics(vi_table):
return np.mean(vi_table, axis=-1), sem(vi_table, axis=-1), \
np.median(vi_table, axis=-1)
| |
from __future__ import division, print_function
import os
import re
import struct
import sys
import textwrap
sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
Zero = "PyUFunc_Zero"
One = "PyUFunc_One"
None_ = "PyUFunc_None"
ReorderableNone = "PyUFunc_ReorderableNone"
# Sentinel value to specify using the full type description in the
# function name
class FullTypeDescr(object):
pass
class FuncNameSuffix(object):
"""Stores the suffix to append when generating functions names.
"""
def __init__(self, suffix):
self.suffix = suffix
class TypeDescription(object):
"""Type signature for a ufunc.
Attributes
----------
type : str
Character representing the nominal type.
func_data : str or None or FullTypeDescr or FuncNameSuffix, optional
The string representing the expression to insert into the data
array, if any.
in_ : str or None, optional
The typecode(s) of the inputs.
out : str or None, optional
The typecode(s) of the outputs.
astype : dict or None, optional
If astype['x'] is 'y', uses PyUFunc_x_x_As_y_y/PyUFunc_xx_x_As_yy_y
instead of PyUFunc_x_x/PyUFunc_xx_x.
"""
def __init__(self, type, f=None, in_=None, out=None, astype=None):
self.type = type
self.func_data = f
if astype is None:
astype = {}
self.astype_dict = astype
if in_ is not None:
in_ = in_.replace('P', type)
self.in_ = in_
if out is not None:
out = out.replace('P', type)
self.out = out
def finish_signature(self, nin, nout):
if self.in_ is None:
self.in_ = self.type * nin
assert len(self.in_) == nin
if self.out is None:
self.out = self.type * nout
assert len(self.out) == nout
self.astype = self.astype_dict.get(self.type, None)
_fdata_map = dict(e='npy_%sf', f='npy_%sf', d='npy_%s', g='npy_%sl',
F='nc_%sf', D='nc_%s', G='nc_%sl')
def build_func_data(types, f):
func_data = []
for t in types:
d = _fdata_map.get(t, '%s') % (f,)
func_data.append(d)
return func_data
def TD(types, f=None, astype=None, in_=None, out=None):
if f is not None:
if isinstance(f, str):
func_data = build_func_data(types, f)
else:
assert len(f) == len(types)
func_data = f
else:
func_data = (None,) * len(types)
if isinstance(in_, str):
in_ = (in_,) * len(types)
elif in_ is None:
in_ = (None,) * len(types)
if isinstance(out, str):
out = (out,) * len(types)
elif out is None:
out = (None,) * len(types)
tds = []
for t, fd, i, o in zip(types, func_data, in_, out):
tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype))
return tds
class Ufunc(object):
"""Description of a ufunc.
Attributes
----------
nin : number of input arguments
nout : number of output arguments
identity : identity element for a two-argument function
docstring : docstring for the ufunc
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
*type_descriptions):
self.nin = nin
self.nout = nout
if identity is None:
identity = None_
self.identity = identity
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
# String-handling utilities to avoid locale-dependence.
import string
if sys.version_info[0] < 3:
UPPER_TABLE = string.maketrans(string.ascii_lowercase,
string.ascii_uppercase)
else:
UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
bytes(string.ascii_uppercase, "ascii"))
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.lib.utils import english_upper
>>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_upper(s)
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
#each entry in defdict is a Ufunc object.
#name: [string of chars for which it is defined,
# string of characters using func interface,
# tuple of strings giving funcs for data,
# (in, out), or (instr, outstr) giving the signature as character codes,
# identity,
# docstring,
# output specification (optional)
# ]
chartoname = {'?': 'bool',
'b': 'byte',
'B': 'ubyte',
'h': 'short',
'H': 'ushort',
'i': 'int',
'I': 'uint',
'l': 'long',
'L': 'ulong',
'q': 'longlong',
'Q': 'ulonglong',
'e': 'half',
'f': 'float',
'd': 'double',
'g': 'longdouble',
'F': 'cfloat',
'D': 'cdouble',
'G': 'clongdouble',
'M': 'datetime',
'm': 'timedelta',
'O': 'OBJECT',
# '.' is like 'O', but calls a method of the object instead
# of a function
'P': 'OBJECT',
}
all = '?bBhHiIlLqQefdgFDGOMm'
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
times = 'Mm'
timedeltaonly = 'm'
intsO = ints + O
bints = '?' + ints
bintsO = bints + O
flts = 'efdg'
fltsO = flts + O
fltsP = flts + P
cmplx = 'FDG'
cmplxO = cmplx + O
cmplxP = cmplx + P
inexact = flts + cmplx
inexactvec = 'fd'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
nobool = all[1:]
noobj = all[:-3]+all[-2:]
nobool_or_obj = all[1:-3]+all[-2:]
nobool_or_datetime = all[1:-2]+all[-1:]
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
nocmplxO = nocmplx+O
nocmplxP = nocmplx+P
notimes_or_obj = bints + inexact
nodatetime_or_obj = bints + inexact
# Find which code corresponds to int64.
int64 = ''
uint64 = ''
for code in 'bhilq':
if struct.calcsize(code) == 8:
int64 = code
uint64 = english_upper(code)
break
# This dictionary describes all the ufunc implementations, generating
# all the function names and their corresponding ufunc signatures. TD is
# an object which expands a list of character codes into an array of
# TypeDescriptions.
defdict = {
'add':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.add'),
'PyUFunc_AdditionTypeResolver',
TD(notimes_or_obj),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'mM', 'M'),
],
TD(O, f='PyNumber_Add'),
),
'subtract':
Ufunc(2, 1, None, # Zero is only a unit to the right, not the left
docstrings.get('numpy.core.umath.subtract'),
'PyUFunc_SubtractionTypeResolver',
TD(notimes_or_obj),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'MM', 'm'),
],
TD(O, f='PyNumber_Subtract'),
),
'multiply':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.multiply'),
'PyUFunc_MultiplicationTypeResolver',
TD(notimes_or_obj),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'qm', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'dm', 'm'),
],
TD(O, f='PyNumber_Multiply'),
),
'divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.divide'),
'PyUFunc_DivisionTypeResolver',
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_Divide'),
),
'floor_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.floor_divide'),
'PyUFunc_DivisionTypeResolver',
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
#TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_FloorDivide'),
),
'true_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.true_divide'),
'PyUFunc_DivisionTypeResolver',
TD('bBhH', out='d'),
TD('iIlLqQ', out='d'),
TD(flts+cmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
TypeDescription('m', FullTypeDescr, 'mm', 'd'),
],
TD(O, f='PyNumber_TrueDivide'),
),
'conjugate':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.conjugate'),
None,
TD(ints+flts+cmplx),
TD(P, f='conjugate'),
),
'fmod':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.fmod'),
None,
TD(ints),
TD(flts, f='fmod', astype={'e':'f'}),
TD(P, f='fmod'),
),
'square':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
None,
TD(ints+inexact),
TD(O, f='Py_square'),
),
'reciprocal':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.reciprocal'),
None,
TD(ints+inexact),
TD(O, f='Py_reciprocal'),
),
# This is no longer used as numpy.ones_like, however it is
# still used by some internal calls.
'_ones_like':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._ones_like'),
'PyUFunc_OnesLikeTypeResolver',
TD(noobj),
TD(O, f='Py_get_one'),
),
'power':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.power'),
None,
TD(ints),
TD(inexact, f='pow', astype={'e':'f'}),
TD(O, f='npy_ObjectPower'),
),
'absolute':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.absolute'),
'PyUFunc_AbsoluteTypeResolver',
TD(bints+flts+timedeltaonly),
TD(cmplx, out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
'_arg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath._arg'),
None,
TD(cmplx, out=('f', 'd', 'g')),
),
'negative':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.negative'),
'PyUFunc_NegativeTypeResolver',
TD(bints+flts+timedeltaonly),
TD(cmplx, f='neg'),
TD(O, f='PyNumber_Negative'),
),
'sign':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sign'),
'PyUFunc_SimpleUnaryOperationTypeResolver',
TD(nobool_or_datetime),
),
'greater':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'greater_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.greater_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'less':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'less_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.less_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'not_equal':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.not_equal'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?'),
),
'logical_and':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.logical_and'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalAnd'),
),
'logical_not':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.logical_not'),
None,
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalNot'),
),
'logical_or':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.logical_or'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(O, f='npy_ObjectLogicalOr'),
),
'logical_xor':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
TD(P, f='logical_xor'),
),
'maximum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.maximum'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'minimum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.minimum'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'fmax':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmax'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'fmin':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmin'),
'PyUFunc_SimpleBinaryOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'logaddexp':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp'),
None,
TD(flts, f="logaddexp", astype={'e':'f'})
),
'logaddexp2':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.logaddexp2'),
None,
TD(flts, f="logaddexp2", astype={'e':'f'})
),
'bitwise_and':
Ufunc(2, 1, One,
docstrings.get('numpy.core.umath.bitwise_and'),
None,
TD(bints),
TD(O, f='PyNumber_And'),
),
'bitwise_or':
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.bitwise_or'),
None,
TD(bints),
TD(O, f='PyNumber_Or'),
),
'bitwise_xor':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.bitwise_xor'),
None,
TD(bints),
TD(O, f='PyNumber_Xor'),
),
'invert':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.invert'),
None,
TD(bints),
TD(O, f='PyNumber_Invert'),
),
'left_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.left_shift'),
None,
TD(ints),
TD(O, f='PyNumber_Lshift'),
),
'right_shift':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.right_shift'),
None,
TD(ints),
TD(O, f='PyNumber_Rshift'),
),
'degrees':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.degrees'),
None,
TD(fltsP, f='degrees', astype={'e':'f'}),
),
'rad2deg':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rad2deg'),
None,
TD(fltsP, f='rad2deg', astype={'e':'f'}),
),
'radians':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.radians'),
None,
TD(fltsP, f='radians', astype={'e':'f'}),
),
'deg2rad':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.deg2rad'),
None,
TD(fltsP, f='deg2rad', astype={'e':'f'}),
),
'arccos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccos'),
None,
TD(inexact, f='acos', astype={'e':'f'}),
TD(P, f='arccos'),
),
'arccosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arccosh'),
None,
TD(inexact, f='acosh', astype={'e':'f'}),
TD(P, f='arccosh'),
),
'arcsin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsin'),
None,
TD(inexact, f='asin', astype={'e':'f'}),
TD(P, f='arcsin'),
),
'arcsinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arcsinh'),
None,
TD(inexact, f='asinh', astype={'e':'f'}),
TD(P, f='arcsinh'),
),
'arctan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctan'),
None,
TD(inexact, f='atan', astype={'e':'f'}),
TD(P, f='arctan'),
),
'arctanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.arctanh'),
None,
TD(inexact, f='atanh', astype={'e':'f'}),
TD(P, f='arctanh'),
),
'cos':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
TD(inexact, f='cos', astype={'e':'f'}),
TD(P, f='cos'),
),
'sin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
TD(inexact, f='sin', astype={'e':'f'}),
TD(P, f='sin'),
),
'tan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tan'),
None,
TD(inexact, f='tan', astype={'e':'f'}),
TD(P, f='tan'),
),
'cosh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cosh'),
None,
TD(inexact, f='cosh', astype={'e':'f'}),
TD(P, f='cosh'),
),
'sinh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sinh'),
None,
TD(inexact, f='sinh', astype={'e':'f'}),
TD(P, f='sinh'),
),
'tanh':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.tanh'),
None,
TD(inexact, f='tanh', astype={'e':'f'}),
TD(P, f='tanh'),
),
'exp':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
None,
TD(inexact, f='exp', astype={'e':'f'}),
TD(P, f='exp'),
),
'exp2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp2'),
None,
TD(inexact, f='exp2', astype={'e':'f'}),
TD(P, f='exp2'),
),
'expm1':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.expm1'),
None,
TD(inexact, f='expm1', astype={'e':'f'}),
TD(P, f='expm1'),
),
'log':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
None,
TD(inexact, f='log', astype={'e':'f'}),
TD(P, f='log'),
),
'log2':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log2'),
None,
TD(inexact, f='log2', astype={'e':'f'}),
TD(P, f='log2'),
),
'log10':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log10'),
None,
TD(inexact, f='log10', astype={'e':'f'}),
TD(P, f='log10'),
),
'log1p':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log1p'),
None,
TD(inexact, f='log1p', astype={'e':'f'}),
TD(P, f='log1p'),
),
'sqrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sqrt'),
None,
TD(inexactvec),
TD(inexact, f='sqrt', astype={'e':'f'}),
TD(P, f='sqrt'),
),
'cbrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cbrt'),
None,
TD(flts, f='cbrt', astype={'e':'f'}),
TD(P, f='cbrt'),
),
'ceil':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
None,
TD(flts, f='ceil', astype={'e':'f'}),
TD(P, f='ceil'),
),
'trunc':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
None,
TD(flts, f='trunc', astype={'e':'f'}),
TD(P, f='trunc'),
),
'fabs':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.fabs'),
None,
TD(flts, f='fabs', astype={'e':'f'}),
TD(P, f='fabs'),
),
'floor':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
None,
TD(flts, f='floor', astype={'e':'f'}),
TD(P, f='floor'),
),
'rint':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
None,
TD(inexact, f='rint', astype={'e':'f'}),
TD(P, f='rint'),
),
'arctan2':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.arctan2'),
None,
TD(flts, f='atan2', astype={'e':'f'}),
TD(P, f='arctan2'),
),
'remainder':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.remainder'),
None,
TD(intflt),
TD(O, f='PyNumber_Remainder'),
),
'hypot':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.hypot'),
None,
TD(flts, f='hypot', astype={'e':'f'}),
TD(P, f='hypot'),
),
'isnan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
None,
TD(inexact, out='?'),
),
'isinf':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
None,
TD(inexact, out='?'),
),
'isfinite':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isfinite'),
None,
TD(inexact, out='?'),
),
'signbit':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.signbit'),
None,
TD(flts, out='?'),
),
'copysign':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.copysign'),
None,
TD(flts),
),
'nextafter':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.nextafter'),
None,
TD(flts),
),
'spacing':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.spacing'),
None,
TD(flts),
),
'modf':
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.modf'),
None,
TD(flts),
),
'ldexp' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.ldexp'),
None,
[TypeDescription('e', None, 'ei', 'e'),
TypeDescription('f', None, 'fi', 'f'),
TypeDescription('e', FuncNameSuffix('long'), 'el', 'e'),
TypeDescription('f', FuncNameSuffix('long'), 'fl', 'f'),
TypeDescription('d', None, 'di', 'd'),
TypeDescription('d', FuncNameSuffix('long'), 'dl', 'd'),
TypeDescription('g', None, 'gi', 'g'),
TypeDescription('g', FuncNameSuffix('long'), 'gl', 'g'),
],
),
'frexp' :
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.frexp'),
None,
[TypeDescription('e', None, 'e', 'ei'),
TypeDescription('f', None, 'f', 'fi'),
TypeDescription('d', None, 'd', 'di'),
TypeDescription('g', None, 'g', 'gi'),
],
)
}
if sys.version_info[0] >= 3:
# Will be aliased to true_divide in umathmodule.c.src:InitOtherOperators
del defdict['divide']
def indent(st, spaces):
indention = ' '*spaces
indented = indention + st.replace('\n', '\n'+indention)
# trim off any trailing spaces
indented = re.sub(r' +$', r'', indented)
return indented
chartotype1 = {'e': 'e_e',
'f': 'f_f',
'd': 'd_d',
'g': 'g_g',
'F': 'F_F',
'D': 'D_D',
'G': 'G_G',
'O': 'O_O',
'P': 'O_O_method'}
chartotype2 = {'e': 'ee_e',
'f': 'ff_f',
'd': 'dd_d',
'g': 'gg_g',
'F': 'FF_F',
'D': 'DD_D',
'G': 'GG_G',
'O': 'OO_O',
'P': 'OO_O_method'}
#for each name
# 1) create functions, data, and signature
# 2) fill in functions and data in InitOperators
# 3) add function.
def make_arrays(funcdict):
# functions array contains an entry for every type implemented NULL
# should be placed where PyUfunc_ style function will be filled in
# later
code1list = []
code2list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
funclist = []
datalist = []
siglist = []
k = 0
sub = 0
if uf.nin > 1:
assert uf.nin == 2
thedict = chartotype2 # two inputs and one output
else:
thedict = chartotype1 # one input and one output
for t in uf.type_descriptions:
if (t.func_data not in (None, FullTypeDescr) and
not isinstance(t.func_data, FuncNameSuffix)):
funclist.append('NULL')
astype = ''
if not t.astype is None:
astype = '_As_%s' % thedict[t.astype]
astr = ('%s_functions[%d] = PyUFunc_%s%s;' %
(name, k, thedict[t.type], astype))
code2list.append(astr)
if t.type == 'O':
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
elif t.type == 'P':
datalist.append('(void *)"%s"' % t.func_data)
else:
astr = ('%s_data[%d] = (void *) %s;' %
(name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
#datalist.append('(void *)%s' % t.func_data)
sub += 1
elif t.func_data is FullTypeDescr:
tname = english_upper(chartoname[t.type])
datalist.append('(void *)NULL')
funclist.append(
'%s_%s_%s_%s' % (tname, t.in_, t.out, name))
elif isinstance(t.func_data, FuncNameSuffix):
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append(
'%s_%s_%s' % (tname, name, t.func_data.suffix))
else:
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
funclist.append('%s_%s' % (tname, name))
for x in t.in_ + t.out:
siglist.append('NPY_%s' % (english_upper(chartoname[x]),))
k += 1
funcnames = ', '.join(funclist)
signames = ', '.join(siglist)
datanames = ', '.join(datalist)
code1list.append("static PyUFuncGenericFunction %s_functions[] = {%s};"
% (name, funcnames))
code1list.append("static void * %s_data[] = {%s};"
% (name, datanames))
code1list.append("static char %s_signatures[] = {%s};"
% (name, signames))
return "\n".join(code1list), "\n".join(code2list)
def make_ufuncs(funcdict):
code3list = []
names = sorted(funcdict.keys())
for name in names:
uf = funcdict[name]
mlist = []
docstring = textwrap.dedent(uf.docstring).strip()
if sys.version_info[0] < 3:
docstring = docstring.encode('string-escape')
docstring = docstring.replace(r'"', r'\"')
else:
docstring = docstring.encode('unicode-escape').decode('ascii')
docstring = docstring.replace(r'"', r'\"')
# XXX: I don't understand why the following replace is not
# necessary in the python 2 case.
docstring = docstring.replace(r"'", r"\'")
# Split the docstring because some compilers (like MS) do not like big
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
mlist.append(\
r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d,
%d, %d, %s, "%s",
"%s", 0);""" % (name, name, name,
len(uf.type_descriptions),
uf.nin, uf.nout,
uf.identity,
name, docstring))
if uf.typereso != None:
mlist.append(
r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name)
mlist.append(r"""Py_DECREF(f);""")
code3list.append('\n'.join(mlist))
return '\n'.join(code3list)
def make_code(funcdict, filename):
code1, code2 = make_arrays(funcdict)
code3 = make_ufuncs(funcdict)
code2 = indent(code2, 4)
code3 = indent(code3, 4)
code = r"""
/** Warning this file is autogenerated!!!
Please make changes to the code generator program (%s)
**/
%s
static void
InitOperators(PyObject *dictionary) {
PyObject *f;
%s
%s
}
""" % (filename, code1, code2, code3)
return code
if __name__ == "__main__":
filename = __file__
fid = open('__umath_generated.c', 'w')
code = make_code(defdict, filename)
fid.write(code)
fid.close()
| |
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
Shaper instances help for storing data in formated tables :
if the shape of a rowed variable is depending on other flexible int attribute
in the environment, it then build or set another table with the good format
size to store again.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Modelers.Findoer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import copy
import collections
import numpy
from ShareYourSystem.Standards.Modelers import Modeler,Tabularer,Tabler
from ShareYourSystem.Standards.Itemizers import Getter,Setter
#</ImportSpecificModules>
#<DefineLocals>
ShapeJoiningStr='__'
ShapeTuplingStr='_'
#</DefineLocals>
#<DefineClass>
@DecorationClass(
**{
'ClassingSwitchMethodStrsList':[
'shape',
'model',
'tabular'
]
}
)
class ShaperClass(BaseClass):
def default_init(self,
_ShapingDimensionTuplesList=None,
_ShapingDescriptionVariable=None,
_ShapedDescriptionGetKeyStrsList=None,
_ShapedDescriptionDimensionGetKeyStrsListsList=None,
_ShapedDescriptionDimensionIntsListsList=None,
_ShapedIndexIntsList=None,
_ShapedDimensionGetKeyStrsList=None,
_ShapedDimensionIntsList=None,
_ShapedStr="",
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_shape(self):
#/################/#
# Shaper is going to modify modeling description so keep an old version of this before
#
#keep a memory
if self.ShapingDescriptionVariable==None:
self.ShapingDescriptionVariable=copy.copy(
self.ModelingDescriptionTuplesList
)
else:
self.ModelingDescriptionTuplesList=copy.copy(self.ShapingDescriptionVariable)
#/################/#
# Pick the shape ints and their get key strs
#
#debug
self.debug(
[
'We shape here',
("self.",self,['ShapingDimensionTuplesList'])
]
)
#Check
if len(self.ShapingDimensionTuplesList)>0:
#set
[
self.ShapedDescriptionGetKeyStrsList,
ShapedDescriptionDimensionGetTuplesList
]=SYS.unzip(self.ShapingDimensionTuplesList,[0,1])
#list
self.ShapedDescriptionGetKeyStrsList=list(self.ShapedDescriptionGetKeyStrsList)
#debug
self.debug(
[
'ShapedDescriptionDimensionGetTuplesList is ',
str(ShapedDescriptionDimensionGetTuplesList)
]
)
#unzip
self.ShapedDescriptionDimensionGetKeyStrsListsList=SYS.unzip(
list(ShapedDescriptionDimensionGetTuplesList),[1]
)
#debug
self.debug(
[
('self.',self,['ShapedDescriptionDimensionGetKeyStrsListsList'])
]
)
#get the corresponding real dimensions
self.ShapedDescriptionDimensionIntsListsList=map(
lambda __ShapedDescriptionDimensionGetKeyStrsList:
self.ModelDeriveControllerVariable[Getter.GetMapStr](
*__ShapedDescriptionDimensionGetKeyStrsList
).ItemizedMapValueVariablesList,
self.ShapedDescriptionDimensionGetKeyStrsListsList
)
#debug
self.debug(
[
('self.',self,['ShapedDescriptionDimensionIntsListsList'])
]
)
else:
#Default
self.ShapedDescriptionGetKeyStrsList=[]
self.ShapedDimensionGetKeyStrsList=[]
self.ShapedDescriptionDimensionGetKeyStrsListsList=[]
#debug
self.debug(
[
("self.",self,[
'ShapedDescriptionGetKeyStrsList',
'ShapedDescriptionDimensionGetKeyStrsListsList',
'ShapedDescriptionDimensionIntsListsList'
])
]
)
#/################/#
# Find where in the description tuokes list it has to be modified and
#
#Definition
ModeledGetKeyStrsList=SYS.unzip(self.ModelingDescriptionTuplesList,[0])
#set
self.ShapedIndexIntsList=map(
lambda __ShapedDescriptionGetKeyStr:
ModeledGetKeyStrsList.index(__ShapedDescriptionGetKeyStr),
self.ShapedDescriptionGetKeyStrsList
)
#debug
'''
self.debug(
[
'Check if we know already the modeler',
'self.ModelDeriveControllerVariable!=None is '+str(
self.ModelDeriveControllerVariable!=None
)
]
)
'''
#/################/#
# set flat all the get key str for the shaping int
#
#Check
if self.ModelDeriveControllerVariable!=None:
#Flat and set
self.ShapedDimensionGetKeyStrsList=list(
set(
SYS.flat(
self.ShapedDescriptionDimensionGetKeyStrsListsList
)
)
)
#Pick
self.ShapedDimensionIntsList=self.ModelDeriveControllerVariable[Getter.GetMapStr
](
*self.ShapedDimensionGetKeyStrsList
).ItemizedMapValueVariablesList
else:
#Default
self.ShapedDimensionIntsList=[]
#/################/#
# map a join str with this
#
#debug
'''
self.debug(("self.",self,['ShapedDimensionIntsList']))
'''
#Bind with ModeledShapedStr setting
self.ShapedStr=ShapeJoiningStr.join(
map(
lambda __ShapedDescriptionGetKeyStr,__ShapedDimensionVariable:
ShapeJoiningStr+str(
__ShapedDescriptionGetKeyStr
)+ShapeTuplingStr+str(
__ShapedDimensionVariable),
self.ShapedDimensionGetKeyStrsList,
self.ShapedDimensionIntsList
)
)
#debug
'''
self.debug(
[
('self.',self,['ShapedStr'])
]
)
'''
def mimic_model(self):
#/#################/#
# Check if we have to shape before
#
#debug
self.debug(
[
'Do we have to shape before model',
('self.',self,['ModelingHdfBool'])
]
)
#Check
if self.ModelingHdfBool:
#shape
self.shape()
#/#################/#
# Adapt the name of the descriptionmodel given the shape
#
#debug
self.debug(
[
'Ok we have shaped',
('self.',self,['ShapedStr'])
]
)
#Get the new ModeledKeyStr
if self.ShapedStr!="":
#debug
'''
self.debug(
[
'We set the new ModeledDescriptionKeyStr',
('self.',self,['ShapedStr','ModelTagStr'])
]
)
'''
#set
self.ModeledDescriptionKeyStr=self.ShapedStr+ShapeJoiningStr+self.ModelTagStr
else:
self.ModeledDescriptionKeyStr=self.ModeledSuffixStr
#debug
self.debug(
[
'We set the new ModeledDescriptionKeyStr',
('self.',self,['ShapedStr','ModeledDescriptionKeyStr'])
]
)
#/#################/#
# Set the good format for the Description tuples list
#
#Unnzip
ModeledGetKeyStrsList=SYS.unzip(self.ModelingDescriptionTuplesList,[0])
#debug
self.debug(
[
('Now change the shape of the shaping cols'),
('self.',self,[
'ModelingDescriptionTuplesList',
'ShapedIndexIntsList'
])
]
)
#map
ShapedModelingDescriptionTuplesList=map(
self.ModelingDescriptionTuplesList.__getitem__,
self.ShapedIndexIntsList
)
#debug
self.debug(
[
'ShapedModelingDescriptionTuplesList is '+str(
ShapedModelingDescriptionTuplesList
),
('self.',self,['ShapedDescriptionDimensionIntsListsList'])
]
)
#map
ModeledShapeDescriptionTuplesList=map(
lambda __ShapedModelingDescriptionTuple,__ShapedDescriptionDimensionIntsList:
(
__ShapedModelingDescriptionTuple[0],
__ShapedModelingDescriptionTuple[1],
__ShapedModelingDescriptionTuple[2][0](
shape=__ShapedDescriptionDimensionIntsList
)
),
ShapedModelingDescriptionTuplesList,
self.ShapedDescriptionDimensionIntsListsList
)
#debug
self.debug(
[
'ModeledShapeDescriptionTuplesList is '+str(
ModeledShapeDescriptionTuplesList)
]
)
#set the shaping cols
map(
lambda __ShapedIndexInt,__ShapedModelingDescriptionTuple:
self.ModelingDescriptionTuplesList.__setitem__(
__ShapedIndexInt,
__ShapedModelingDescriptionTuple
),
self.ShapedIndexIntsList,
ModeledShapeDescriptionTuplesList
)
#debug
self.debug(
[
"After the shape",
"Now self.ModelingDescriptionTuplesList is "+SYS._str(
self.ModelingDescriptionTuplesList)
]
)
#/#################/#
# base method
#
#debug
self.debug(
'Now we call the base model method'
)
#model then
BaseClass.model(self)
def mimic_tabular(self):
#/#################/#
# first tabular
#
#debug
self.debug(
'First we tabular with the base'
)
#tabular Tabularer method first
Tabularer.TabularerClass.tabular(self)
#Check
if self.ModelingHdfBool:
#/#################/#
# Now adapt also the name of the tables
#
#debug
self.debug(
[
'We add the ShapedStr to the TabularedSuffix Str ?',
('self.',self,[
'ShapedStr',
'TabularedHdfSuffixStr'
])
]
)
#Add the ShapedStr
if self.ShapedStr!="":
#debug
'''
self.debug(
[
' ShapeJoiningStr not in self.TabularedSuffixStr is '+str(
ShapeJoiningStr not in self.TabularedSuffixStr))
]
)
'''
#Check
if ShapeJoiningStr not in self.TabularedHdfSuffixStr:
#debug
'''
self.debug('Yes we add')
'''
#Add
self.TabularedHdfSuffixStr=self.ShapedStr+ShapeJoiningStr+self.TabularedHdfSuffixStr
#debug
self.debug(
[
('self.',self,['TabularedHdfSuffixStr'])
]
)
#/##################/#
# Rehook with the table process
#
#then table
BaseClass.table(self)
def mimic_insert(self):
#/###################/#
# try to insert
#
#debug
'''
self.debug(
[
('self.',self,[
'We check the good dimensions of the shaping variables'
'TabledKeyStr',
'TabledTable'
])
]
)
'''
try:
#insert first
BaseClass.insert(self)
except ValueError:
#/###################/#
# Then find where the shape was not good
#
#Definition the InsertedOldDimensionIntsListsList
InsertedOldDimensionIntsList=map(
lambda __ShapedDescriptionDimensionGetKeyStrsList:
self.ModelDeriveControllerVariable[Getter.GetMapStr](
__ShapedDescriptionDimensionGetKeyStrsList
).ItemizedMapValueVariablesList,
self.ShapedDescriptionDimensionGetKeyStrsListsList
)
#Definition the InsertedNewDimensionIntsListsList
InsertedNewDimensionIntsListsList=map(
lambda __ShapedDescriptionGetKeyStr:
list(
numpy.shape(
self.ModelDeriveControllerVariable[__ShapedDescriptionGetKeyStr]
)
),
self.ShapedDescriptionGetKeyStrsList
)
#debug
'''
self.debug(('vars ',vars(),[
'InsertedOldDimensionIntsList',
'InsertedNewDimensionIntsListsList'
]))
'''
#set the shaping attributes to their new values
map(
lambda __ShapedDescriptionDimensionGetKeyStrsList,__InsertedOldDimensionList,__InsertedNewDimensionList:
self.__setitem__(
'ShapedErrorBool',
True
).ModelDeriveControllerVariable[Setter.SetMapStr](
zip(
__ShapedDescriptionDimensionGetKeyStrsList,
__InsertedNewDimensionList
)
) if __InsertedNewDimensionList!=__InsertedOldDimensionList
else None,
self.ShapedDescriptionDimensionGetKeyStrsListsList,
InsertedOldDimensionIntsList,
InsertedNewDimensionIntsListsList
)
#/###################/#
# Reset the configurating methods
#
#debug
self.debug(
[
'We reset some methods',
#('self.',self,['SwitchMethodDict'])
]
)
#switch we want all the classes for each method
map(
lambda __MethodStr:
self.setSwitch(__MethodStr),
[
'model',
'shape',
'tabular',
'table'
]
)
#setDone
self.setDone(
[
Modeler.ModelerClass,
Tabularer.TabularerClass,
Tabler.TablerClass,
SYS.ShaperClass
]
)
#debug
self.debug(
[
'Now we remodel...',
('self.',self,[
'WatchBeforeModelWithModelerBool',
'WatchBeforeModelWithShaperBool'
])
]
)
#model to relaunch everything
self.model()
#/###################/#
# insert again
#
#debug
self.debug(
[
'Ok table again is done, so now we insert'
]
)
#insert first
BaseClass.insert(self)
def propertize_setModelingDescriptionTuplesList(self,_SettingValueVariable):
#set
BaseClass.propertize_setModelingDescriptionTuplesList(self,_SettingValueVariable)
#filter
self.ShapingDimensionTuplesList=map(
lambda __DescriptionTuple:
(__DescriptionTuple[0], __DescriptionTuple[2]),
SYS._filter(
lambda __DescriptionTuple:
type(__DescriptionTuple[2]) in [list,tuple],
_SettingValueVariable
)
)
#debug
'''
self.debug(
[
'We have setted the ShapingDimensionTuplesList',
('self.',self,['ShapingDimensionTuplesList'])
]
)
'''
#</DefineClass>
#</DefinePrint>
ShaperClass.PrintingClassSkipKeyStrsList.extend(
[
'ShapingDimensionTuplesList',
'ShapingDescriptionVariable',
'ShapedDescriptionGetKeyStrsList',
'ShapedDescriptionDimensionIntsListsList',
'ShapedIndexIntsList',
'ShapedDimensionGetKeyStrsList',
'ShapedDimensionIntsList',
'ShapedStr'
]
)
#<DefinePrint>
| |
import os
import re
from io import BytesIO
from urllib.parse import urlparse
import arrow
import sqlalchemy as sa
from sqlalchemy import bindparam
from libweasyl.cache import region
from libweasyl.models.media import MediaItem
from libweasyl.models.tables import google_doc_embeds
from libweasyl import (
html,
images,
images_new,
ratings,
staff,
text,
)
from weasyl import api
from weasyl import blocktag
from weasyl import collection
from weasyl import comment
from weasyl import define as d
from weasyl import embed
from weasyl import favorite
from weasyl import files
from weasyl import folder
from weasyl import frienduser
from weasyl import ignoreuser
from weasyl import image
from weasyl import macro as m
from weasyl import media
from weasyl import orm
from weasyl import profile
from weasyl import report
from weasyl import searchtag
from weasyl import welcome
from weasyl.error import WeasylError
COUNT_LIMIT = 250
_MEGABYTE = 1048576
_LIMITS = {
".jpg": 50 * _MEGABYTE,
".png": 50 * _MEGABYTE,
".gif": 50 * _MEGABYTE,
".txt": 2 * _MEGABYTE,
".pdf": 10 * _MEGABYTE,
".htm": 10 * _MEGABYTE,
".mp3": 15 * _MEGABYTE,
".swf": 15 * _MEGABYTE,
}
def _limit(size, extension):
"""
Return True if the file size exceeds the limit designated to the specified
file type, else False.
"""
limit = _LIMITS[extension]
return size > limit
def _create_notifications(userid, submitid, rating, *, friends_only):
"""
Creates notifications to watchers.
"""
welcome.submission_insert(userid, submitid, rating=rating.code, friends_only=friends_only)
def check_for_duplicate_media(userid, mediaid):
db = d.connect()
q = (
db.query(orm.Submission)
.filter_by(userid=userid, hidden=False)
.join(orm.SubmissionMediaLink)
.filter_by(mediaid=mediaid, link_type='submission'))
if q.first():
raise WeasylError('duplicateSubmission')
def _create_submission(expected_type):
valid_types = {id for (id, name) in m.MACRO_SUBCAT_LIST if id // 1000 == expected_type}
def wrapper(create_specific):
def create_generic(userid, submission, **kwargs):
tags = kwargs['tags']
if submission.subtype not in valid_types:
submission.subtype = expected_type * 1000 + 999
if not submission.title:
raise WeasylError("titleInvalid")
elif not submission.rating:
raise WeasylError("ratingInvalid")
elif len(tags) < 2:
raise WeasylError("notEnoughTags")
elif not folder.check(userid, submission.folderid):
raise WeasylError("Unexpected")
profile.check_user_rating_allowed(userid, submission.rating)
newid = create_specific(
userid=userid,
submission=submission,
**kwargs)
if newid:
p = d.meta.tables['profile']
d.connect().execute(p.update().where(p.c.userid == userid).values(latest_submission_time=arrow.utcnow()))
return newid
return create_generic
return wrapper
_ALLOWED_CROSSPOST_HOSTS = frozenset([
# DeviantArt
"wixmp.com",
# Fur Affinity
"furaffinity.net",
"facdn.net",
# Imgur
"imgur.com",
# Inkbunny
"ib.metapix.net",
# SoFurry
"sofurryfiles.com",
])
_ALLOWED_CROSSPOST_HOST = re.compile(
r"(?:\A|\.)"
+ "(?:" + "|".join(map(re.escape, _ALLOWED_CROSSPOST_HOSTS)) + ")"
+ r"\Z"
)
def _http_get_if_crosspostable(url):
parsed = urlparse(url)
if parsed.scheme not in ("http", "https") or _ALLOWED_CROSSPOST_HOST.search(parsed.netloc) is None:
raise WeasylError("crosspostInvalid")
return d.http_get(url, timeout=5)
@_create_submission(expected_type=1)
def create_visual(userid, submission,
friends_only, tags, imageURL, thumbfile,
submitfile, critique, create_notifications):
if imageURL:
resp = _http_get_if_crosspostable(imageURL)
submitfile = resp.content
# Determine filesizes
thumbsize = len(thumbfile)
submitsize = len(submitfile)
if not submitsize:
raise WeasylError("submitSizeZero")
elif thumbsize > 10 * _MEGABYTE:
raise WeasylError("thumbSizeExceedsLimit")
im = image.from_string(submitfile)
submitextension = images.image_extension(im)
if submitextension not in [".jpg", ".png", ".gif"]:
raise WeasylError("submitType")
if _limit(submitsize, submitextension):
raise WeasylError("submitSizeExceedsLimit")
submit_file_type = submitextension.lstrip('.')
submit_media_item = orm.MediaItem.fetch_or_create(
submitfile, file_type=submit_file_type, im=im)
check_for_duplicate_media(userid, submit_media_item.mediaid)
cover_media_item = submit_media_item.ensure_cover_image(im)
# Thumbnail stuff.
# Always create a 'generated' thumbnail from the source image.
with BytesIO(submitfile) as buf:
thumbnail_formats = images_new.get_thumbnail(buf)
thumb_generated, thumb_generated_file_type, thumb_generated_attributes = thumbnail_formats.compatible
thumb_generated_media_item = orm.MediaItem.fetch_or_create(
thumb_generated,
file_type=thumb_generated_file_type,
attributes=thumb_generated_attributes,
)
if thumbnail_formats.webp is None:
thumb_generated_media_item_webp = None
else:
thumb_generated, thumb_generated_file_type, thumb_generated_attributes = thumbnail_formats.webp
thumb_generated_media_item_webp = orm.MediaItem.fetch_or_create(
thumb_generated,
file_type=thumb_generated_file_type,
attributes=thumb_generated_attributes,
)
# If requested, also create a 'custom' thumbnail.
thumb_media_item = media.make_cover_media_item(thumbfile)
if thumb_media_item:
thumb_custom = images.make_thumbnail(image.from_string(thumbfile))
thumb_custom_media_item = orm.MediaItem.fetch_or_create(
thumb_custom.to_buffer(format=submit_file_type), file_type=submit_file_type,
im=thumb_custom)
# TODO(kailys): maintain ORM object
db = d.connect()
now = arrow.get()
q = (
d.meta.tables['submission'].insert().values([{
"folderid": submission.folderid,
"userid": userid,
"unixtime": now,
"title": submission.title,
"content": submission.content,
"subtype": submission.subtype,
"rating": submission.rating.code,
"friends_only": friends_only,
"critique": critique,
"favorites": 0,
"submitter_ip_address": submission.submitter_ip_address,
"submitter_user_agent_id": submission.submitter_user_agent_id,
}]).returning(d.meta.tables['submission'].c.submitid))
submitid = db.scalar(q)
orm.SubmissionMediaLink.make_or_replace_link(
submitid, 'submission', submit_media_item)
orm.SubmissionMediaLink.make_or_replace_link(
submitid, 'cover', cover_media_item)
orm.SubmissionMediaLink.make_or_replace_link(
submitid, 'thumbnail-generated', thumb_generated_media_item)
if thumb_generated_media_item_webp is not None:
orm.SubmissionMediaLink.make_or_replace_link(
submitid, 'thumbnail-generated-webp', thumb_generated_media_item_webp)
if thumb_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'thumbnail-source', thumb_media_item)
orm.SubmissionMediaLink.make_or_replace_link(
submitid, 'thumbnail-custom', thumb_custom_media_item)
# Assign search tags
searchtag.associate(userid, tags, submitid=submitid)
# Create notifications
if create_notifications:
_create_notifications(userid, submitid, submission.rating, friends_only=friends_only)
d.metric('increment', 'submissions')
d.metric('increment', 'visualsubmissions')
return submitid
_GOOGLE_DOCS_EMBED = re.compile(
r"\bdocs\.google\.com/document/d/e/([0-9a-z_\-]+)/pub\b",
re.IGNORECASE,
)
def _normalize_google_docs_embed(embedlink):
match = _GOOGLE_DOCS_EMBED.search(embedlink.strip())
if match is None:
raise WeasylError('googleDocsEmbedLinkInvalid', level='info')
return f"https://docs.google.com/document/d/e/{match.group(1)}/pub?embedded=true"
@_create_submission(expected_type=2)
def create_literary(userid, submission, embedlink=None, friends_only=False, tags=None,
coverfile=None, thumbfile=None, submitfile=None, critique=False,
create_notifications=True):
if embedlink:
embedlink = _normalize_google_docs_embed(embedlink)
# Determine filesizes
coversize = len(coverfile)
thumbsize = len(thumbfile)
submitsize = len(submitfile)
if not submitsize and not embedlink:
raise WeasylError("submitSizeZero")
elif coversize > 10 * _MEGABYTE:
raise WeasylError("coverSizeExceedsLimit")
elif thumbsize > 10 * _MEGABYTE:
raise WeasylError("thumbSizeExceedsLimit")
if submitsize:
submitextension = files.get_extension_for_category(submitfile, m.TEXT_SUBMISSION_CATEGORY)
if submitextension is None:
raise WeasylError("submitType")
if _limit(submitsize, submitextension):
raise WeasylError("submitSizeExceedsLimit")
submit_media_item = orm.MediaItem.fetch_or_create(
submitfile, file_type=submitextension.lstrip('.'))
check_for_duplicate_media(userid, submit_media_item.mediaid)
else:
submit_media_item = None
thumb_media_item = media.make_cover_media_item(thumbfile)
cover_media_item = media.make_cover_media_item(coverfile)
if cover_media_item and not thumb_media_item:
thumb_media_item = cover_media_item
# Create submission
# TODO(kailys): use ORM object
db = d.connect()
now = arrow.get()
q = (
d.meta.tables['submission'].insert().values([{
"folderid": submission.folderid,
"userid": userid,
"unixtime": now,
"title": submission.title,
"content": submission.content,
"subtype": submission.subtype,
"rating": submission.rating.code,
"friends_only": friends_only,
"critique": critique,
"embed_type": 'google-drive' if embedlink else None,
"favorites": 0,
"submitter_ip_address": submission.submitter_ip_address,
"submitter_user_agent_id": submission.submitter_user_agent_id,
}])
.returning(d.meta.tables['submission'].c.submitid))
submitid = db.scalar(q)
if embedlink:
q = (d.meta.tables['google_doc_embeds'].insert()
.values(submitid=submitid, embed_url=embedlink))
db.execute(q)
# Assign search tags
searchtag.associate(userid, tags, submitid=submitid)
if submit_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'submission', submit_media_item)
if cover_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'cover', cover_media_item)
if thumb_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'thumbnail-source', thumb_media_item)
# Create notifications
if create_notifications:
_create_notifications(userid, submitid, submission.rating, friends_only=friends_only)
d.metric('increment', 'submissions')
d.metric('increment', 'literarysubmissions')
return submitid, bool(thumb_media_item)
@_create_submission(expected_type=3)
def create_multimedia(userid, submission, embedlink=None, friends_only=None,
tags=None, coverfile=None, thumbfile=None, submitfile=None,
critique=False, create_notifications=True, auto_thumb=False):
embedlink = embedlink.strip()
# Determine filesizes
coversize = len(coverfile)
thumbsize = len(thumbfile)
submitsize = len(submitfile)
if not submitsize and not embedlink:
raise WeasylError("submitSizeZero")
elif embedlink and not embed.check_valid(embedlink):
raise WeasylError("embedlinkInvalid")
elif coversize > 10 * _MEGABYTE:
raise WeasylError("coverSizeExceedsLimit")
elif thumbsize > 10 * _MEGABYTE:
raise WeasylError("thumbSizeExceedsLimit")
if submitsize:
submitextension = files.get_extension_for_category(submitfile, m.MULTIMEDIA_SUBMISSION_CATEGORY)
if submitextension is None:
raise WeasylError("submitType")
elif submitextension not in [".mp3", ".swf"] and not embedlink:
raise WeasylError("submitType")
elif _limit(submitsize, submitextension):
raise WeasylError("submitSizeExceedsLimit")
submit_media_item = orm.MediaItem.fetch_or_create(
submitfile, file_type=submitextension.lstrip('.'))
check_for_duplicate_media(userid, submit_media_item.mediaid)
else:
submit_media_item = None
thumb_media_item = media.make_cover_media_item(thumbfile)
cover_media_item = media.make_cover_media_item(coverfile)
if cover_media_item and not thumb_media_item:
thumb_media_item = cover_media_item
tempthumb_media_item = None
im = None
if auto_thumb:
if thumbsize == 0 and coversize == 0:
# Fetch default thumbnail from source if available
thumb_url = embed.thumbnail(embedlink)
if thumb_url:
resp = d.http_get(thumb_url, timeout=5)
im = image.from_string(resp.content)
if not im and (thumbsize or coversize):
im = image.from_string(thumbfile or coverfile)
if im:
tempthumb = images.make_thumbnail(im)
tempthumb_type = images.image_file_type(tempthumb)
tempthumb_media_item = orm.MediaItem.fetch_or_create(
tempthumb.to_buffer(format=tempthumb_type),
file_type=tempthumb_type,
im=tempthumb)
# Inject embedlink
if embedlink:
submission.content = "".join([embedlink, "\n", submission.content])
# Create submission
db = d.connect()
now = arrow.get()
q = (
d.meta.tables['submission'].insert().values([{
"folderid": submission.folderid,
"userid": userid,
"unixtime": now,
"title": submission.title,
"content": submission.content,
"subtype": submission.subtype,
"rating": submission.rating,
"friends_only": friends_only,
"critique": critique,
"embed_type": 'other' if embedlink else None,
"favorites": 0,
"submitter_ip_address": submission.submitter_ip_address,
"submitter_user_agent_id": submission.submitter_user_agent_id,
}])
.returning(d.meta.tables['submission'].c.submitid))
submitid = db.scalar(q)
# Assign search tags
searchtag.associate(userid, tags, submitid=submitid)
if submit_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'submission', submit_media_item)
if cover_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'cover', cover_media_item)
if thumb_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'thumbnail-source', thumb_media_item)
if tempthumb_media_item:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'thumbnail-custom',
tempthumb_media_item)
# Create notifications
if create_notifications:
_create_notifications(userid, submitid, submission.rating, friends_only=friends_only)
d.metric('increment', 'submissions')
d.metric('increment', 'multimediasubmissions')
return submitid, bool(thumb_media_item)
def reupload(userid, submitid, submitfile):
submitsize = len(submitfile)
# Select submission data
query = d.engine.execute(
"SELECT userid, subtype, embed_type FROM submission WHERE submitid = %(id)s AND NOT hidden",
id=submitid,
).first()
if not query:
raise WeasylError("Unexpected")
elif userid != query[0]:
raise WeasylError("Unexpected")
elif query[2] is not None:
raise WeasylError("Unexpected")
subcat = query[1] // 1000 * 1000
if subcat not in m.ALL_SUBMISSION_CATEGORIES:
raise WeasylError("Unexpected")
# Check invalid file data
if not submitsize:
raise WeasylError("submitSizeZero")
# Write temporary submission file
submitextension = files.get_extension_for_category(submitfile, subcat)
if submitextension is None:
raise WeasylError("submitType")
elif subcat == m.ART_SUBMISSION_CATEGORY and submitextension not in [".jpg", ".png", ".gif"]:
raise WeasylError("submitType")
elif subcat == m.MULTIMEDIA_SUBMISSION_CATEGORY and submitextension not in [".mp3", ".swf"]:
raise WeasylError("submitType")
elif _limit(submitsize, submitextension):
raise WeasylError("submitSizeExceedsLimit")
submit_file_type = submitextension.lstrip('.')
im = None
if submit_file_type in {'jpg', 'png', 'gif'}:
im = image.from_string(submitfile)
submit_media_item = orm.MediaItem.fetch_or_create(
submitfile, file_type=submit_file_type, im=im)
check_for_duplicate_media(userid, submit_media_item.mediaid)
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'submission', submit_media_item)
if subcat == m.ART_SUBMISSION_CATEGORY:
cover_media_item = submit_media_item.ensure_cover_image(im)
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'cover', cover_media_item)
# Always create a 'generated' thumbnail from the source image.
with BytesIO(submitfile) as buf:
thumbnail_formats = images_new.get_thumbnail(buf)
thumb_generated, thumb_generated_file_type, thumb_generated_attributes = thumbnail_formats.compatible
thumb_generated_media_item = orm.MediaItem.fetch_or_create(
thumb_generated,
file_type=thumb_generated_file_type,
attributes=thumb_generated_attributes,
)
if thumbnail_formats.webp is None:
thumb_generated_media_item_webp = None
else:
thumb_generated, thumb_generated_file_type, thumb_generated_attributes = thumbnail_formats.webp
thumb_generated_media_item_webp = orm.MediaItem.fetch_or_create(
thumb_generated,
file_type=thumb_generated_file_type,
attributes=thumb_generated_attributes,
)
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'thumbnail-generated', thumb_generated_media_item)
if thumbnail_formats.webp is not None:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'thumbnail-generated-webp', thumb_generated_media_item_webp)
_GOOGLE_DOCS_EMBED_URL_QUERY = (
sa.select([google_doc_embeds.c.embed_url])
.where(google_doc_embeds.c.submitid == bindparam('submitid'))
)
def get_google_docs_embed_url(submitid):
embed_url = d.engine.scalar(
_GOOGLE_DOCS_EMBED_URL_QUERY,
{"submitid": submitid},
)
if embed_url is None:
raise WeasylError("Unexpected") # pragma: no cover
return embed_url
def select_view(userid, submitid, rating, ignore=True, anyway=None):
# TODO(hyena): This `query[n]` stuff is monstrous. Use named fields.
# Also some of these don't appear to be used? e.g. pr.config
query = d.engine.execute("""
SELECT
su.userid, pr.username, su.folderid, su.unixtime, su.title, su.content, su.subtype, su.rating,
su.hidden, su.friends_only, su.critique, su.embed_type,
su.page_views, fd.title, su.favorites
FROM submission su
INNER JOIN profile pr USING (userid)
LEFT JOIN folder fd USING (folderid)
WHERE su.submitid = %(id)s
""", id=submitid).first()
# Sanity check
if query and userid in staff.MODS and anyway == "true":
pass
elif not query or query[8]:
raise WeasylError("submissionRecordMissing")
elif query[7] > rating and ((userid != query[0] and userid not in staff.MODS) or d.is_sfw_mode()):
raise WeasylError("RatingExceeded")
elif query[9] and not frienduser.check(userid, query[0]):
raise WeasylError("FriendsOnly")
elif ignore and ignoreuser.check(userid, query[0]):
raise WeasylError("UserIgnored")
elif ignore and blocktag.check(userid, submitid=submitid):
raise WeasylError("TagBlocked")
# Get submission filename
submitfile = media.get_submission_media(submitid).get('submission', [None])[0]
# Get submission text
if submitfile and submitfile['file_type'] in ['txt', 'htm']:
submittext = files.read(os.path.join(MediaItem._base_file_path, submitfile['file_url'][1:]))
else:
submittext = None
embedlink = d.text_first_line(query[5]) if query[11] == 'other' else None
google_doc_embed = None
if query[11] == 'google-drive':
google_doc_embed = get_google_docs_embed_url(submitid)
tags, artist_tags = searchtag.select_with_artist_tags(submitid)
settings = d.get_profile_settings(query[0])
sub_media = media.get_submission_media(submitid)
return {
"submitid": submitid,
"userid": query[0],
"username": query[1],
"folderid": query[2],
"unixtime": query[3],
"title": query[4],
"content": (d.text_first_line(query[5], strip=True) if 'other' == query[11] else query[5]),
"subtype": query[6],
"rating": query[7],
"hidden": query[8],
"friends_only": query[9],
"critique": query[10],
"embed_type": query[11],
"page_views": (
query[12] + 1 if d.common_view_content(userid, 0 if anyway == "true" else submitid, "submit") else query[12]),
"fave_count": query[14],
"mine": userid == query[0],
"reported": report.check(submitid=submitid),
"favorited": favorite.check(userid, submitid=submitid),
"collected": collection.owns(userid, submitid),
"no_request": not settings.allow_collection_requests,
"text": submittext,
"sub_media": sub_media,
"user_media": media.get_user_media(query[0]),
"submit": submitfile,
"embedlink": embedlink,
"embed": embed.html(embedlink) if embedlink is not None else None,
"google_doc_embed": google_doc_embed,
"tags": tags,
"artist_tags": artist_tags,
"removable_tags": searchtag.removable_tags(userid, query[0], tags, artist_tags),
"can_remove_tags": searchtag.can_remove_tags(userid, query[0]),
"folder_more": select_near(userid, rating, 1, query[0], query[2], submitid),
"folder_title": query[13] if query[13] else "Root",
"comments": comment.select(userid, submitid=submitid),
}
def select_view_api(userid, submitid, anyway=False, increment_views=False):
rating = d.get_rating(userid)
db = d.connect()
sub = db.query(orm.Submission).get(submitid)
if sub is None or sub.hidden:
raise WeasylError("submissionRecordMissing")
sub_rating = sub.rating.code
if sub.friends_only and not frienduser.check(userid, sub.userid):
raise WeasylError("submissionRecordMissing")
elif sub_rating > rating and userid != sub.userid:
raise WeasylError("RatingExceeded")
elif not anyway and ignoreuser.check(userid, sub.userid):
raise WeasylError("UserIgnored")
elif not anyway and blocktag.check(userid, submitid=submitid):
raise WeasylError("TagBlocked")
description = sub.content
embedlink = None
if sub.embed_type == 'other':
embedlink, _, description = description.partition('\n')
elif sub.embed_type == 'google-drive':
embedlink = get_google_docs_embed_url(submitid)
views = sub.page_views
if increment_views and d.common_view_content(userid, submitid, 'submit'):
views += 1
return {
'submitid': submitid,
'title': sub.title,
'owner': sub.owner.profile.username,
'owner_login': sub.owner.login_name,
'owner_media': api.tidy_all_media(media.get_user_media(sub.userid)),
'media': api.tidy_all_media(media.get_submission_media(submitid)),
'description': text.markdown(description),
'embedlink': embedlink,
'folderid': sub.folderid,
'folder_name': sub.folder.title if sub.folderid else None,
'posted_at': d.iso8601(sub.unixtime),
'tags': searchtag.select(submitid=submitid),
'link': d.absolutify_url("/submission/%d/%s" % (submitid, text.slug_for(sub.title))),
'type': 'submission',
'subtype': m.CATEGORY_PARSABLE_MAP[sub.subtype // 1000 * 1000],
'rating': sub.rating.name,
'views': views,
'favorites': favorite.count(submitid),
'comments': comment.count(submitid),
'favorited': favorite.check(userid, submitid=submitid),
'friends_only': sub.friends_only,
}
def twitter_card(request, submitid):
query = d.engine.execute("""
SELECT
su.title, su.hidden, su.friends_only, su.embed_type, su.content, su.subtype, su.userid,
pr.username, pr.full_name, pr.config, ul.link_value, su.rating
FROM submission su
INNER JOIN profile pr USING (userid)
LEFT JOIN user_links ul ON su.userid = ul.userid AND ul.link_type = 'twitter'
WHERE submitid = %(id)s
LIMIT 1
""", id=submitid).first()
if not query:
raise WeasylError("submissionRecordMissing")
(title, hidden, friends_only, embed_type, content, subtype, userid,
username, full_name, config, twitter, rating) = query
if hidden:
raise WeasylError("submissionRecordMissing")
elif friends_only:
raise WeasylError("FriendsOnly")
if 'other' == embed_type:
content = d.text_first_line(content, strip=True)
content = d.summarize(html.strip_html(content))
if not content:
content = "[This submission has no description.]"
ret = {
'url': d.absolutify_url(
request.route_path(
'submission_detail_profile',
name=d.get_sysname(username),
submitid=submitid,
slug=text.slug_for(title),
)
),
}
if twitter:
ret['creator'] = '@%s' % (twitter.lstrip('@'),)
ret['title'] = title
else:
ret['title'] = '%s by %s' % (title, full_name)
if ratings.CODE_MAP[rating].minimum_age >= 18:
ret['card'] = 'summary'
ret['description'] = 'This image is rated 18+ and only viewable on weasyl.com'
return ret
ret['description'] = content
subcat = subtype // 1000 * 1000
media_items = media.get_submission_media(submitid)
if subcat == m.ART_SUBMISSION_CATEGORY and media_items.get('submission'):
ret['card'] = 'photo'
ret['image:src'] = d.absolutify_url(media_items['submission'][0]['display_url'])
else:
ret['card'] = 'summary'
thumb = media_items.get('thumbnail-custom') or media_items.get('thumbnail-generated')
if thumb:
ret['image:src'] = d.absolutify_url(thumb[0]['display_url'])
return ret
def _select_query(
*,
userid,
rating,
otherid,
folderid,
backid,
nextid,
subcat,
profile_page_filter,
index_page_filter,
featured_filter,
critique_only
):
statement = [
"FROM submission su "
"INNER JOIN profile pr ON su.userid = pr.userid "
"LEFT JOIN folder f USING (folderid) "
"WHERE NOT hidden"]
if profile_page_filter:
statement.append(" AND COALESCE(f.settings !~ 'u', true)")
if index_page_filter:
statement.append(" AND COALESCE(f.settings !~ 'm', true)")
if featured_filter:
statement.append(" AND COALESCE(f.settings ~ 'f', false)")
# Logged in users will see their own submissions regardless of rating
# EXCEPT if they are in SFW mode
if userid and not d.is_sfw_mode():
statement.append(" AND (su.rating <= %i OR su.userid = %i)" % (rating, userid))
else:
statement.append(" AND su.rating <= %i" % (rating,))
if otherid:
statement.append(" AND su.userid = %i" % (otherid,))
if folderid:
statement.append(" AND su.folderid = %i" % (folderid,))
if subcat:
statement.append(" AND su.subtype >= %i AND su.subtype < %i" % (subcat, subcat + 1000))
if critique_only:
statement.append(" AND su.critique")
if backid:
statement.append(" AND su.submitid > %i" % (backid,))
elif nextid:
statement.append(" AND su.submitid < %i" % (nextid,))
if userid:
statement.append(m.MACRO_FRIENDUSER_SUBMIT % (userid, userid, userid))
if not otherid:
statement.append(m.MACRO_IGNOREUSER % (userid, "su"))
statement.append(m.MACRO_BLOCKTAG_SUBMIT % (userid, userid))
else:
statement.append(" AND NOT su.friends_only")
return statement
def select_count(
userid,
rating,
*,
otherid=None,
folderid=None,
backid=None,
nextid=None,
subcat=None,
profile_page_filter=False,
index_page_filter=False,
featured_filter=False,
critique_only=False,
):
statement = "".join((
"SELECT count(*) FROM (SELECT ",
*_select_query(
userid=userid,
rating=rating,
otherid=otherid,
folderid=folderid,
backid=backid,
nextid=nextid,
subcat=subcat,
profile_page_filter=profile_page_filter,
index_page_filter=index_page_filter,
featured_filter=featured_filter,
critique_only=critique_only,
),
" LIMIT %i) t" % (COUNT_LIMIT,),
))
return d.engine.scalar(statement)
def select_list(
userid,
rating,
*,
limit,
otherid=None,
folderid=None,
backid=None,
nextid=None,
subcat=None,
profile_page_filter=False,
index_page_filter=False,
featured_filter=False,
critique_only=False,
):
"""
Selects a list from the submissions table.
Args:
userid: The current user
rating: The maximum rating level to show
limit: The number of submissions to get
otherid: The user whose submissions to get
folderid: Select submissions from this folder
backid: Select the IDs that are less than this value
nextid: Select the IDs that are greater than this value
subcat: Select submissions whose subcategory is within this range
(this value + 1000)
profile_page_filter: Do not select from folders that should not appear
on the profile page.
index_page_filter: Do not select from folders that should not appear on
the front page.
featured_filter: Select from folders marked as featured submissions and randomize the order of results.
critique_only: Select only submissions for which critique is requested.
Returns:
An array with the following keys: "contype", "submitid", "title",
"rating", "unixtime", "userid", "username", "subtype", "sub_media"
"""
statement = "".join((
"SELECT su.submitid, su.title, su.rating, su.unixtime, su.userid, pr.username, su.subtype ",
*_select_query(
userid=userid,
rating=rating,
otherid=otherid,
folderid=folderid,
backid=backid,
nextid=nextid,
subcat=subcat,
profile_page_filter=profile_page_filter,
index_page_filter=index_page_filter,
featured_filter=featured_filter,
critique_only=critique_only,
),
" ORDER BY %s%s LIMIT %i" % ("RANDOM()" if featured_filter else "su.submitid", "" if backid else " DESC", limit),
))
submissions = [{**row, "contype": 10} for row in d.engine.execute(statement)]
media.populate_with_submission_media(submissions)
return submissions[::-1] if backid else submissions
def select_featured(userid, otherid, rating):
submissions = select_list(
userid=userid,
rating=rating,
limit=1,
otherid=otherid,
featured_filter=True,
)
return submissions[0] if submissions else None
def select_near(userid, rating, limit, otherid, folderid, submitid):
statement = ["""
SELECT su.submitid, su.title, su.rating, su.unixtime, su.subtype
FROM submission su
WHERE su.userid = %(owner)s
AND NOT su.hidden
"""]
if userid:
if d.is_sfw_mode():
statement.append(" AND su.rating <= %(rating)s")
else:
# Outside of SFW mode, users always see their own content.
statement.append(" AND (su.rating <= %%(rating)s OR su.userid = %i)" % (userid,))
statement.append(m.MACRO_IGNOREUSER % (userid, "su"))
statement.append(m.MACRO_FRIENDUSER_SUBMIT % (userid, userid, userid))
statement.append(m.MACRO_BLOCKTAG_SUBMIT % (userid, userid))
else:
statement.append(" AND su.rating <= %(rating)s AND NOT su.friends_only")
if folderid:
statement.append(" AND su.folderid = %i" % folderid)
statement = "".join(statement)
statement = (
f"SELECT * FROM ({statement} AND su.submitid < %(submitid)s ORDER BY su.submitid DESC LIMIT 1) AS older"
f" UNION ALL SELECT * FROM ({statement} AND su.submitid > %(submitid)s ORDER BY su.submitid LIMIT 1) AS newer"
)
username = d.get_display_name(otherid)
query = [{
"contype": 10,
"userid": otherid,
"username": username,
"submitid": i[0],
"title": i[1],
"rating": i[2],
"unixtime": i[3],
"subtype": i[4],
} for i in d.engine.execute(statement, {
"owner": otherid,
"submitid": submitid,
"rating": rating,
})]
media.populate_with_submission_media(query)
query.sort(key=lambda i: i['submitid'])
older = [i for i in query if i["submitid"] < submitid]
newer = [i for i in query if i["submitid"] > submitid]
return {
"older": older,
"newer": newer,
}
def edit(userid, submission, embedlink=None, friends_only=False, critique=False):
query = d.engine.execute(
"SELECT userid, subtype, hidden, embed_type FROM submission WHERE submitid = %(id)s",
id=submission.submitid).first()
if not query or query[2]:
raise WeasylError("Unexpected")
elif userid != query[0] and userid not in staff.MODS:
raise WeasylError("InsufficientPermissions")
elif not submission.title:
raise WeasylError("titleInvalid")
elif not submission.rating:
raise WeasylError("Unexpected")
elif not folder.check(query[0], submission.folderid):
raise WeasylError("Unexpected")
elif submission.subtype // 1000 != query[1] // 1000:
raise WeasylError("Unexpected")
elif 'other' == query[3] and not embed.check_valid(embedlink):
raise WeasylError("embedlinkInvalid")
elif 'google-drive' == query[3]:
embedlink = _normalize_google_docs_embed(embedlink)
profile.check_user_rating_allowed(userid, submission.rating)
if 'other' == query[3]:
submission.content = "%s\n%s" % (embedlink, submission.content)
if friends_only:
welcome.submission_became_friends_only(submission.submitid, userid)
# TODO(kailys): maintain ORM object
db = d.connect()
su = d.meta.tables['submission']
q = (
su.update()
.values(
folderid=submission.folderid,
title=submission.title,
content=submission.content,
subtype=submission.subtype,
rating=submission.rating,
friends_only=friends_only,
critique=critique,
)
.where(su.c.submitid == submission.submitid))
db.execute(q)
if 'google-drive' == query[3]:
db = d.connect()
gde = d.meta.tables['google_doc_embeds']
q = (gde.update()
.values(embed_url=embedlink)
.where(gde.c.submitid == submission.submitid))
db.execute(q)
if userid != query[0]:
from weasyl import moderation
moderation.note_about(
userid, query[0], 'The following submission was edited:',
'- ' + text.markdown_link(submission.title, '/submission/%s?anyway=true' % (submission.submitid,)))
def remove(userid, submitid):
ownerid = d.get_ownerid(submitid=submitid)
if userid not in staff.MODS and userid != ownerid:
raise WeasylError("InsufficientPermissions")
query = d.execute("UPDATE submission SET hidden = TRUE"
" WHERE submitid = %i AND NOT hidden RETURNING submitid", [submitid])
if query:
welcome.submission_remove(submitid)
return ownerid
def reupload_cover(userid, submitid, coverfile):
query = d.engine.execute(
"SELECT userid, subtype FROM submission WHERE submitid = %(id)s",
id=submitid).first()
if not query:
raise WeasylError("Unexpected")
elif userid not in staff.MODS and userid != query[0]:
raise WeasylError("Unexpected")
elif query[1] < 2000:
raise WeasylError("Unexpected")
cover_media_item = media.make_cover_media_item(coverfile)
if not cover_media_item:
orm.SubmissionMediaLink.clear_link(submitid, 'cover')
else:
orm.SubmissionMediaLink.make_or_replace_link(submitid, 'cover', cover_media_item)
@region.cache_on_arguments(expiration_time=600)
@d.record_timing
def select_recently_popular():
"""
Get a list of recent, popular submissions.
To calculate scores, this method performs the following evaluation:
item_score = log(item_fave_count + 1) + log(item_view_counts) / 2 + submission_time / 180000
180000 is roughly two days. So intuitively an item two days old needs an order of
magnitude more favorites/views compared to a fresh one. Also the favorites are
quadratically more influential than views. The result is that this algorithm favors
recent, heavily favorited items.
:return: A list of submission dictionaries, in score-rank order.
"""
query = d.engine.execute("""
SELECT
submission.submitid,
submission.title,
submission.rating,
submission.subtype,
submission_tags.tags,
submission.userid,
profile.username
FROM submission
INNER JOIN submission_tags ON submission.submitid = submission_tags.submitid
INNER JOIN profile ON submission.userid = profile.userid
WHERE
NOT submission.hidden
AND NOT submission.friends_only
ORDER BY
log(submission.favorites + 1) +
log(submission.page_views + 1) / 2 +
submission.unixtime / 180000.0
DESC
LIMIT 128
""")
submissions = [{**row, "contype": 10} for row in query]
media.populate_with_submission_media(submissions)
media.strip_non_thumbnail_media(submissions)
return submissions
@region.cache_on_arguments(expiration_time=600)
def select_critique():
query = d.engine.execute("""
SELECT
submission.submitid,
submission.title,
submission.rating,
submission.subtype,
submission_tags.tags,
submission.userid,
profile.username
FROM submission
INNER JOIN submission_tags ON submission.submitid = submission_tags.submitid
INNER JOIN profile ON submission.userid = profile.userid
WHERE
NOT submission.hidden
AND NOT submission.friends_only
AND submission.critique
ORDER BY submission.submitid DESC
LIMIT 128
""")
submissions = [{**row, "contype": 10} for row in query]
media.populate_with_submission_media(submissions)
media.strip_non_thumbnail_media(submissions)
return submissions
| |
# -*- coding:utf-8 mode:Python -*-
from werkzeug import exceptions
from xml.sax.saxutils import escape,unescape
import os
import re
from sets import Set
import rfc822, datetime
from mizwiki import misc,lang,views,page,urlmap
from mizwiki import config, htmlwriter, models, svnrep, requestinfo
from mizwiki.local import local
text_access_denied = 'Access Denied'
CONTENT_TYPE ={'.html':'text/html;charset=utf-8',
'.atom':'application/atom+xml' }
CONTENT_TYPE.update(config.MIME_MAP)
class IterWriter(object):
def __init__(self):
self._l = []
def write(self, text):
self._l.append(text)
def __iter__(self):
#yield ''.join(self._l).encode('utf-8')
for f in self._l:
yield f.encode('utf-8')
class FileWrapper(object):
def __init__(self, filelike, ext, headers=[]):
if ext not in CONTENT_TYPE.keys():
raise exceptions.Forbidden()
self.headers = [('Content-Type',CONTENT_TYPE[ext])]+headers
self.f = filelike
def __call__(self, environ, start_response):
start_response('200 OK', self.headers)
if 'wsgi.file_wrapper' in environ:
return environ['wsgi.file_wrapper'](self.f, config.BLOCK_SIZE)
else:
return iter(lambda: self.f.read(config.BLOCK_SIZE), '')
class RendererWrapper(object):
def __init__(self, renderer, controller, content_type, headers=[]):
self.headers = [('Content-Type',content_type)]+headers
self.r = renderer
self.h = controller
def __call__(self, environ, start_response):
start_response('200 OK', self.headers)
iw = IterWriter()
w = htmlwriter.WikiWriter(iw)
self.r(w,self.h)
return iw
class TextWrapper(object):
def __init__(self, text, headers=[]):
self.headers = headers
self.text = text
def __call__(self, environ, start_response):
start_response('200 OK', [('Content-Type',CONTENT_TYPE['.txt'])]+self.headers)
return [self.text]
class NotModified(exceptions.HTTPException):
def __init__(self):
pass
def __call__(self, environ, start_response):
start_response('304 NOT MODIFIED', [])
return []
class Linker(object):
def __init__(self, url_for, url_scheme, hostname, script_name, path_info):
self.url_for = url_for
self.url_scheme = url_scheme
self.hostname = hostname
self.script_name = script_name.strip('/')
self.path_info = path_info.strip('/')
@property
def full_url_root(self):
return self.url_scheme+'://'+self.hostname+'/'
@property
def full_tex_url(self):
return self.full_url_root + 'cgi/mimetex.cgi'
def full_link(self, name, **variables):
return self.full_url_root + (self.script_name + '/').lstrip('/') + self.url_for(name, **variables)
def link(self, name, **variables):
return misc.relpath(self.url_for(name, **variables), self.path_info)
re_cmd = re.compile('^cmd_(\w+)$')
class Controller(object):
def __init__(self, path_info):
self.sidebar_ = self.revision(self.head.revno).get_file('sidebar.wiki')
self.linker = Linker(mapper.url_for, config.URL_SCHEME, config.HOSTNAME, config.SCRIPT_NAME, path_info)
self.commands = {}
for f in dir(self):
m = re_cmd.match(f)
if m:
self.commands[m.group(1)] = getattr(self,f)
sidebar = property(lambda x:x.sidebar_)
def __call__(self, environ, start_response):
self._headers = []
ri = requestinfo.RequestInfo(environ)
if not ri.args.has_key('cmd'):
response = self.view(ri)
else:
try:
response = self.commands[ri.args.get('cmd')](ri)
except KeyError:
raise exceptions.Forbidden()
return response(environ, start_response)
def view(self, ri):
raise exceptions.Forbidden()
def file_wrapper(self, filelike, ext):
return FileWrapper(filelike, ext, self._headers)
def renderer_wrapper(self, renderer, content_type=CONTENT_TYPE['.html']):
return RendererWrapper(renderer, self, content_type, self._headers)
def text_wrapper(self, text):
return TextWrapper(text, self._headers)
@property
def lastmodified_date(self):
raise NotImplemented
@property
def _lastmodified_date(self):
lmd = self.lastmodified_date
if config.GLOBAL_LASTMODIFIED_DATE:
lmd = max(lmd, config.GLOBAL_LASTMODIFIED_DATE)
return lmd
def _lastmodified_headers(self, expire):
r = self._lastmodified_date.ctime()
h = []
h.append(('Last-Modified',r))
if expire:
h.append(('Expires',r))
return h
def escape_if_clientcache(self, headers, expire=False):
'''
check the datetime of client cache, and lastmodified datetime of wiki page
send back the lastmodified date, and
send NotModified Code if you can use client cachef
'''
self._headers += self._lastmodified_headers(expire)
ims = headers.get('If-Modified-Since')
if not ims:
return
try:
ccd = datetime.datetime(*rfc822.parsedate(ims)[:6])
except:
return
if self._lastmodified_date <= ccd:
raise NotModified()
@property
def title(self):
return None
@property
def menu_items(self):
#Set(['Head','History','Attach','Edit','View','Diff'])
return Set()
@property
def repository(self):
return local.repository
def revision(self, revno):
"need cache?"
return local.repository.get_revision(revno)
@property
def head(self):
return local.head
class ControllerWikiBase(Controller):
def __init__(self, path_info, path='FrontPage.wiki', rev=None):
super(ControllerWikiBase,self).__init__(path_info)
self.path = path
self.revno = int(rev) if rev else self.head.revno
self.basepath = os.path.basename(path)
self.wikifile_ = self.revision(self.revno).get_file(self.path)
self.sidebar_ = self.revision(self.revno).get_file('sidebar.wiki')
wikifile = property(lambda x:x.wikifile_)
@property
def title(self):
return self.path
def cmd_history(self, ri):
if not self.wikifile.exist:
raise exceptions.NotFound()
self.escape_if_clientcache(ri.headers, True)
offset = ri.args.get('offset',0,type=int)
return self.renderer_wrapper(views.history_body(offset))
def cmd_diff(self, ri):
if not self.wikifile.exist:
raise exceptions.NotFound()
self.escape_if_clientcache(ri.headers, True)
head_rev = self.head.revno
target_rev = self.wikifile.revno
f0 = self.wikifile.switch_rev(target_rev-1)
f1 = self.wikifile.switch_rev(target_rev)
if not f0.exist:
f0lines = []
f1lines = f1.text.splitlines()
title = 'diff: none <==> Revision %s %s' % (f1.revno,self.title)
else:
# previous.path == lastmodified.path
f0lines = f0.text.splitlines()
f1lines = f1.text.splitlines()
title = 'diff: Revision %s <==> Revision %s: %s' % (f0.revno,f1.revno,self.title)
ld = misc.linediff(f0lines,f1lines)
return self.renderer_wrapper(views.diff_body(title,f0,f1,ld))
@property
def lastmodified_date(self):
r = self.wikifile.lastmodified.date
if self.sidebar.exist:
r = max(r, self.sidebar.lastmodified.date)
return r
class ControllerAttachFile(ControllerWikiBase):
@property
def lastmodified_date(self):
return self.wikifile.lastmodified.date
def view(self, ri):
if not self.wikifile.exist:
raise exceptions.NotFound()
if not config.MIME_MAP.has_key(self.wikifile.ext):
raise exceptions.Forbidden()
self.escape_if_clientcache(ri.headers, False)
return self.file_wrapper(self.wikifile.open(), self.wikifile.ext)
class ControllerWikiHead(ControllerWikiBase):
@property
def menu_items(self):
if self.wikifile.exist:
return Set(['Head','History','Attach','Edit','View','Diff'])
else:
return Set(['Edit'])
def cmd_wordpress(self, ri):
if self.wikifile.exist:
self.escape_if_clientcache(ri.headers, True)
return self.text_wrapper(self.wikifile.wordpress.encode('utf-8'))
def view(self, ri):
if self.wikifile.exist:
self.escape_if_clientcache(ri.headers, True)
return self.renderer_wrapper(views.view_head_body())
else:
return self.cmd_edit(ri)
def _get_paraedit(self, dic):
if dic.has_key('paraedit_from') and dic.has_key('paraedit_to'):
return (dic.get('paraedit_from',type=int), dic.get('paraedit_to',type=int))
else:
return None
def cmd_edit(self, ri):
if not config.EDITABLE or self.wikifile.path in page.locked_pages:
return self.renderer_wrapper(views.locked_body())
paraedit = self._get_paraedit(ri.args)
wikif = ''
if self.wikifile.exist:
if paraedit:
wikif = self.wikifile.get_paraedit_section(paraedit[0],paraedit[1])
else:
wikif = self.wikifile.text
#if wikif:
# wikif = wiki2html.pre_convert_wiki(wikif)
return self.renderer_wrapper(views.edit_body('',wikif,'','',paraedit,not ri.is_valid_host))
def cmd_commit(self, ri):
form = ri.form
if ri.is_spam:
return self.text_wrapper(text_access_denied)
if not config.EDITABLE or self.wikifile.path in page.locked_pages:
return self.renderer_wrapper(views.locked_body())
base_rev = form.get('base_rev',type=int)
if not (base_rev<=self.head.revno) and base_rev > 0:
raise exceptions.BadRequest()
paraedit = self._get_paraedit(ri.form)
wiki_text = unescape(form.get('text','')).replace('\r\n','\n')
commitmsg_text = unescape(form.get('commitmsg',''))
ispreview = form.has_key('preview')
wiki_src_full = wiki_text
if paraedit:
wiki_src_full = self.wikifile.get_paraedit_applied_data(paraedit[0],paraedit[1],wiki_text)
#wiki_src_full = wiki2html.pre_convert_wiki(wiki_src_full)
base_page = self.wikifile.switch_rev(base_rev)
full_merged,merged,message = models.merge_with_latest(self.wikifile,
base_page,
wiki_src_full)
if merged or ispreview or (not ri.is_valid_host):
if paraedit and not merged:
edit_src = wiki_text
else:
edit_src = full_merged
paraedit = None
preview_text = self.wikifile.get_preview_xhtml(edit_src)
return self.renderer_wrapper(views.edit_body(preview_text,edit_src,commitmsg_text,
message,
paraedit,
not ri.is_valid_host))
else:
r = self.wikifile.write_text(full_merged, ri.user, commitmsg_text)
return self.renderer_wrapper(views.commited_body(not not r,
base_rev=self.head.revno,
commited_rev=r))
def cmd_comment(self, ri):
form = ri.form
if not self.wikifile.exist:
raise exceptions.NotFound()
if ri.is_spam or not config.EDITABLE:
return self.text_wrapper(text_access_denied)
author = unescape(form.get('author','AnonymousCorward')).strip()
message = unescape(form.get('message','')).strip()
comment_no = form.get('comment_no',type=int)
if (not ri.is_valid_host) or (not message):
return self.renderer_wrapper(views.edit_comment_body(comment_no,author,message,'',
not ri.is_valid_host))
r = self.wikifile.insert_comment(self.head.revno, ri.user,
'comment by %s'% (author), comment_no, author, message)
success = not not r
return self.renderer_wrapper(views.commited_body(success,base_rev=self.head.revno,commited_rev=r))
def page_attach(self,ri):
ms = config.MAX_ATTACH_SIZE / 1024
exts = ' '.join(list(config.MIME_MAP.keys()))
message = lang.upload(ms,exts)
return self.renderer_wrapper(views.attach_body(message,not ri.is_valid_host))
def cmd_attach(self, ri):
if not self.wikifile.exist:
raise exceptions.NotFound()
return self.page_attach(ri)
def cmd_upload(self, ri):
if not self.wikifile.exist:
raise exceptions.NotFound()
if ri.is_spam or not config.EDITABLE:
return self.text_wrapper(text_access_denied)
if not ri.is_valid_host:
return self.page_attach(ri)
filename = 'unkown'
message = 'unkown error'
success = False
if not ri.files:
message = 'no file.'
else:
item = ri.files['file']
filename = os.path.basename(misc.normpath(item.filename.replace('\\','/'))).lower()
ext = os.path.splitext(filename)[1]
path = misc.join(self.wikifile.path,"../"+filename)
wa = self.head.get_file(path)
if not config.MIME_MAP.has_key(ext):
message = '%s: file type not supported'%filename
else:
temp = misc.read_fs_file(item.stream, config.MAX_ATTACH_SIZE)
if not temp:
message = '%s: too big file. maximum attach size = %s'%(filename,config.MAX_ATTACH_SIZE)
else:
temp.seek(0)
success = not not wa.write(temp.read(), ri.user,
'attach file uploaded', ext=='.txt')
if not success:
message = 'commit error.'
if not success:
ri.log('cmd_upload: file=%s message=%s'%(filename, message))
return self.renderer_wrapper(views.uploaded_body(success,message))
class ControllerWikiRev(ControllerWikiBase):
@property
def menu_items(self):
if self.wikifile.exist:
return Set(['Head','View','Diff'])
else:
return Set(['Head'])
@property
def title(self):
return 'Revision %s, %s'%(self.revno, self.path)
def view(self, ri):
if self.wikifile.exist:
self.escape_if_clientcache(ri.headers, False)
return self.renderer_wrapper(views.view_old_body())
else:
return self.renderer_wrapper(views.not_found_body())
class ControllerSitemap(Controller):
@property
def lastmodified_date(self):
return self.head.last_paths_changed.date
def view(self, ri):
self.escape_if_clientcache(ri.headers, True)
return self.renderer_wrapper(views.sitemap_body())
def sitemap(self):
rev = self.head.last_paths_changed.revno
for n in self.revision(rev).ls_all:
yield n
class ControllerSitemapText(ControllerSitemap):
def view(self, ri):
self.escape_if_clientcache(ri.headers, True)
return self.renderer_wrapper(views.sitemaptxt(), CONTENT_TYPE['.txt'])
class ControllerRecentChanges(Controller):
@property
def lastmodified_date(self):
return self.head.date
def view(self, ri):
self.escape_if_clientcache(ri.headers, True)
offset = ri.args.get('offset',0,type=int)
return self.renderer_wrapper(views.recent_body(offset))
def rev_date(self,revno):
return self.revision(revno).date
def changesets(self,revno):
for f,sets in self.revision(revno).paths_changed:
s = sets.change_kind
kind = svnrep.path_change[s]
yield f, kind
class ControllerAtom(ControllerRecentChanges):
def view(self, ri):
self.escape_if_clientcache(ri.headers, True)
return self.renderer_wrapper(views.atom(), CONTENT_TYPE['.atom'])
PWD = os.path.abspath(os.path.dirname(__file__))
class ControllerFile(Controller):
def __init__(self, path_info, relative_path):
super(ControllerFile,self).__init__(path_info)
self.rpath = relative_path
self.filepath = os.path.join(PWD,self.rpath)
def view(self, ri):
self.escape_if_clientcache(ri.headers)
ext = os.path.splitext(self.rpath)[1]
if not ext:
raise exceptions.Forbidden
f = open(self.filepath, 'r')
return self.file_wrapper(f, ext)
@property
def lastmodified_date(self):
return datetime.datetime.fromtimestamp(os.path.getmtime(self.filepath))
class ControllerTheme(ControllerFile):
def __init__(self, path_info, path):
super(ControllerTheme,self).__init__(path_info, misc.join('theme',path))
class ControllerFavicon(ControllerFile):
def __init__(self, path_info):
super(ControllerFavicon,self).__init__(path_info, 'favicon.ico')
mapper = urlmap.UrlMap()
mapper.add_rule('favicon.ico', ControllerFavicon, '^favicon.ico$', "%s")
mapper.add_rule('theme', ControllerTheme, '^theme/(.+)$', "theme/%s", ["path"])
mapper.add_rule('recentchanges', ControllerRecentChanges, 'RecentChanges$')
mapper.add_rule('atom', ControllerAtom, 'RecentChanges/atom.xml$')
mapper.add_rule('sitemap', ControllerSitemap, 'sitemap$')
mapper.add_rule('sitemap.txt', ControllerSitemapText, 'sitemap.txt$')
mapper.add_rule('frontpage', ControllerWikiHead, r'$', 'FrontPage.wiki')
mapper.add_rule('wiki_rev', ControllerWikiRev,
r'r(\d+)/([\w_/+\-]+\.wiki)$', "r%d/%s",["rev","path"])
mapper.add_rule('wiki_head', ControllerWikiHead,
r'([\w_/+\-]+\.wiki)$', "%s", ["path"])
mapper.add_rule('attach', ControllerAttachFile,
r'r(\d+)/(/[\w_./+\-]*)$', "r%d/%s", ["rev","path"])
mapper.add_rule('attach', ControllerAttachFile,
r'([\w_/+\-]+\.[\w_]+)$', "%s",["path"])
| |
import os
import sys
from collections import defaultdict
from operator import itemgetter
import FittingUtilities
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits as pyfits
from astropy import units
import SpectralTypeRelations
import StarData
import PlotBlackbodies
"""
Program to analyze the output of SensitivityAnalysis, and make some pretty plots!
Command line arguments:
-combine: will combine several output (say as generated by xgrid) NOT YET IMPLEMENTED
-xaxis: specifies the variable to use as the x axis. Choices are as follows
SecondarySpectralType
SecondaryMass
MassRatio
DetectionRate
AverageSignificance
MagnitudeDifference
-yaxis: specifies the variable to use for the y axis. Choices are the same as for -xaxis
-infile: specifies the input filename (default is Sensitivity/summary.dat).
If combine is True, the input filename should be a list of comma-separated
filenames
"""
def MakeSummaryFile(directory, prefix, outfilename="Sensitivity/logfile.dat", tolerance=10.0):
# Read in all the correlation files
allfiles = [f for f in os.listdir(directory) if f.startswith(prefix)]
# Figure out the primary mass
MS = SpectralTypeRelations.MainSequence()
header = pyfits.getheader(prefix + ".fits")
starname = header["OBJECT"].split()[0].replace("_", " ")
stardata = StarData.GetData(starname)
primary_mass = MS.Interpolate(MS.Mass, stardata.spectype[:2])
primary_temp = MS.Interpolate(MS.Temperature, stardata.spectype[:2])
detections = defaultdict(list)
outfile = open(outfilename, "w")
outfile.write("Sensitivity Analysis:\n*****************************\n\n")
outfile.write(
"Filename\t\t\tPrimary Temperature\tSecondary Temperature\tMass (Msun)\tMass Ratio\tVelocity\tPeak Correct?\tSignificance\n")
for fname in allfiles:
#Read in temperature and expected velocity from filename
T = float(fname.split(prefix)[-1].split("t")[-1].split("_")[0])
v = float(fname.split("v")[-1])
#Figure out the secondary mass from the temperature
spt = MS.GetSpectralType(MS.Temperature, T)
secondary_mass = MS.Interpolate(MS.Mass, spt)
q = secondary_mass / primary_mass
#Find the maximum in the cross-correlation function
vel, corr = np.loadtxt(directory + fname, unpack=True)
idx = np.argmax(corr)
vmax = vel[idx]
fit = FittingUtilities.Continuum(vel, corr, fitorder=2, lowreject=3, highreject=3)
corr -= fit
mean = corr.mean()
std = corr.std()
significance = (corr[idx] - mean) / std
if np.abs(vmax - v) <= tolerance:
#Signal found!
outfile.write("%s\t%i\t\t\t%i\t\t\t\t%.2f\t\t%.4f\t\t%i\t\tyes\t\t%.2f\n" % (
prefix, primary_temp, T, secondary_mass, q, v, significance))
else:
outfile.write("%s\t%i\t\t\t%i\t\t\t\t%.2f\t\t%.4f\t\t%i\t\tno\t\t%.2f\n" % (
prefix, primary_temp, T, secondary_mass, q, v, significance))
outfile.close()
def MakePlot(infilename):
# Set up thing to cycle through matplotlib linestyles
from itertools import cycle
lines = ["-", "--", "-.", ":"]
linecycler = cycle(lines)
# Defaults
combine = False
xaxis = "SecondarySpectralType"
yaxis = "DetectionRate"
#Command-line overrides
for arg in sys.argv:
if "combine" in arg:
combine = True
elif "xaxis" in arg:
xaxis = arg.split("=")[-1]
elif "yaxis" in arg:
yaxis = arg.split("=")[-1]
elif "infile" in arg:
infilename = arg.split("=")[-1]
if combine and "," in infilename:
infiles = infilename.split(",")
else:
infiles = [infilename, ]
#Set up dictionaries/lists
p_spt = defaultdict(list) #Primary spectral type
s_spt = defaultdict(list) #Secondary spectral type
s_temp = defaultdict(list) #Secondary Temperature
p_mass = defaultdict(list) #Primary mass
s_mass = defaultdict(list) #Secondary mass
q = defaultdict(list) #Mass ratio
det_rate = defaultdict(list) #Detection rate
sig = defaultdict(list) #Average detection significance
magdiff = defaultdict(list) #Magnitude difference
namedict = {"SecondarySpectralType": s_spt,
"SecondaryTemperature": s_temp,
"SecondaryMass": s_mass,
"MassRatio": q,
"DetectionRate": det_rate,
"AverageSignificance": sig,
"MagnitudeDifference": magdiff}
labeldict = {"SecondarySpectralType": "Secondary Spectral Type",
"SecondaryTemperature": "Secondary Temperature (K)",
"SecondaryMass": "SecondaryMass (Solar Masses)",
"MassRatio": "Mass Ratio",
"DetectionRate": "Detection Rate",
"AverageSignificance": "Average Significance",
"MagnitudeDifference": "Magnitude Difference"}
if xaxis not in namedict.keys() or yaxis not in namedict:
print "Error! axis keywords must be one of the following:"
for key in namedict.keys():
print key
print "You chose %s for the x axis and %s for the y axis" % (xaxis, yaxis)
sys.exit()
MS = SpectralTypeRelations.MainSequence()
vband = np.arange(500, 600, 1) * units.nm.to(units.cm)
#Read in file/files WARNING! ASSUMES A CERTAIN FORMAT. MUST CHANGE THIS IF THE FORMAT CHANGES!
for infilename in infiles:
infile = open(infilename)
lines = infile.readlines()
infile.close()
print "Reading file %s" % infilename
current_temp = float(lines[4].split()[2])
fname = lines[4].split()[0]
starname = pyfits.getheader(fname)['object']
detections = 0.0
numsamples = 0.0
significance = []
starname_dict = {fname: starname}
spt_dict = {}
current_fname = fname
for iternum, line in enumerate(lines[4:]):
segments = line.split()
fname = segments[0]
T2 = float(segments[2])
if fname in starname_dict and T2 == current_temp and current_fname == fname:
# Do the time-consuming SpectralType calls
T1 = float(segments[1])
if T1 in spt_dict:
p_spectype = spt_dict[T1][0]
R1 = spt_dict[T1][1]
else:
p_spectype = MS.GetSpectralType(MS.Temperature, T1)
R1 = MS.Interpolate(MS.Radius, p_spectype)
spt_dict[T1] = (p_spectype, R1)
if T2 in spt_dict:
s_spectype = spt_dict[T2][0]
R2 = spt_dict[T2][1]
else:
s_spectype = MS.GetSpectralType(MS.Temperature, T2)
R2 = MS.Interpolate(MS.Radius, s_spectype)
spt_dict[T2] = (s_spectype, R2)
fluxratio = (PlotBlackbodies.Planck(vband, T1) / PlotBlackbodies.Planck(vband, T2)).mean() \
* (R1 / R2) ** 2
sec_mass = float(segments[3])
massratio = float(segments[4])
starname = starname_dict[fname]
if "y" in segments[6]:
detections += 1.
significance.append(float(segments[7]))
numsamples += 1.
else:
s_spt[starname].append(s_spectype)
s_temp[starname].append(current_temp)
p_spt[starname].append(p_spectype)
p_mass[starname].append(sec_mass / massratio)
s_mass[starname].append(sec_mass)
q[starname].append(massratio)
det_rate[starname].append(detections / numsamples)
sig[starname].append(np.mean(significance))
magdiff[starname].append(2.5 * np.log10(fluxratio))
# Reset things
current_temp = T2
current_fname = fname
fname = segments[0]
if fname in starname_dict:
starname = starname_dict[fname]
else:
starname = pyfits.getheader(fname)['object']
starname_dict[fname] = starname
numsamples = 0.0
detections = 0.0
significance = []
#Process this line for the next star
T1 = float(segments[1])
if T1 in spt_dict:
p_spectype = spt_dict[T1][0]
R1 = spt_dict[T1][1]
else:
p_spectype = MS.GetSpectralType(MS.Temperature, T1)
R1 = MS.Interpolate(MS.Radius, p_spectype)
spt_dict[T1] = (p_spectype, R1)
if T2 in spt_dict:
s_spectype = spt_dict[T2][0]
R2 = spt_dict[T2][1]
else:
s_spectype = MS.GetSpectralType(MS.Temperature, T2)
R2 = MS.Interpolate(MS.Radius, s_spectype)
spt_dict[T2] = (s_spectype, R2)
fluxratio = (PlotBlackbodies.Planck(vband, T1) / PlotBlackbodies.Planck(vband, T2)).mean() \
* (R1 / R2) ** 2
sec_mass = float(segments[3])
massratio = float(segments[4])
if "y" in segments[6]:
detections += 1.
significance.append(float(segments[7]))
numsamples += 1.
#plot
print "Plotting now"
spt_sorter = {"O": 1, "B": 2, "A": 3, "F": 4, "G": 5, "K": 6, "M": 7}
fcn = lambda s: (spt_sorter[itemgetter(0)(s)], itemgetter(1)(s))
#print sorted(s_spt.keys(), key=fcn)
#for starname in sorted(s_spt.keys(), key=fcn):
print sorted(s_spt.keys())
for starname in sorted(s_spt.keys()):
p_spectype = p_spt[starname]
x = namedict[xaxis][starname]
y = namedict[yaxis][starname]
if "SpectralType" in xaxis:
plt.plot(range(len(x)), y[::-1], linestyle=next(linecycler), linewidth=2,
label="%s (%s)" % (starname, p_spectype[0]))
plt.xticks(range(len(x)), x[::-1], size="small")
elif "SpectralType" in yaxis:
plt.plot(x[::-1], range(len(y)), linestyle=next(linecycler), linewidth=2,
label="%s (%s)" % (starname, p_spectype[0]))
plt.yticks(range(len(y)), y[::-1], size="small")
else:
plt.plot(x, y, linestyle=next(linecycler), linewidth=2, label="%s (%s)" % (starname, p_spectype[0]))
if "Magnitude" in xaxis:
ax = plt.gca()
ax.set_xlim(ax.get_xlim()[::-1])
elif "Magnitude" in yaxis:
ax = plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
plt.legend(loc='best')
plt.xlabel(labeldict[xaxis], fontsize=15)
plt.ylabel(labeldict[yaxis], fontsize=15)
if "DetectionRate" in yaxis:
ax = plt.gca()
ax.set_ylim([-0.05, 1.05])
plt.title("Sensitivity Analysis", fontsize=20)
plt.show()
if __name__ == "__main__":
if any(["new" in f for f in sys.argv[1:]]):
directory = "Sensitivity/"
allfiles = [f for f in os.listdir(directory) if (f.startswith("HIP") or f.startswith("HR"))]
prefixes = []
for fname in allfiles:
prefix = fname.split("_v")[0][:-6]
if prefix not in prefixes:
print "New prefix: %s" % prefix
prefixes.append(prefix)
for i, prefix in enumerate(prefixes):
MakeSummaryFile(directory, prefix, outfilename="%slogfile%i.txt" % (directory, i + 1))
MakePlot("%slogfile%i.txt" % (directory, i + 1))
else:
MakePlot("Sensitivity/logfile.dat")
| |
import os
import time
import threading
import curses
from curses import textpad, ascii
from contextlib import contextmanager
from . import config
from .docs import HELP
from .helpers import strip_textpad, clean
from .exceptions import EscapeInterrupt
__all__ = ['ESCAPE', 'get_gold', 'show_notification', 'show_help',
'LoadScreen', 'Color', 'text_input', 'curses_session',
'prompt_input', 'add_line', 'get_arrow']
# Curses does define constants for symbols (e.g. curses.ACS_BULLET)
# However, they rely on using the curses.addch() function, which has been
# found to be buggy and a PITA to work with. By defining them as unicode
# points they can be added via the more reliable curses.addstr().
# http://bugs.python.org/issue21088
ESCAPE = 27
def get_gold():
"""
Return the gilded symbol.
"""
symbol = u'\u272A' if config.unicode else '*'
attr = curses.A_BOLD | Color.YELLOW
return symbol, attr
def get_arrow(likes):
"""
Return the vote symbol to display, based on the `likes` paramater.
"""
if likes is None:
symbol = u'\u2022' if config.unicode else 'o'
attr = curses.A_BOLD
elif likes:
symbol = u'\u25b2' if config.unicode else '^'
attr = curses.A_BOLD | Color.GREEN
else:
symbol = u'\u25bc' if config.unicode else 'v'
attr = curses.A_BOLD | Color.RED
return symbol, attr
def add_line(window, text, row=None, col=None, attr=None):
"""
Unicode aware version of curses's built-in addnstr method.
Safely draws a line of text on the window starting at position (row, col).
Checks the boundaries of the window and cuts off the text if it exceeds
the length of the window.
"""
# The following arg combinations must be supported to conform with addnstr
# (window, text)
# (window, text, attr)
# (window, text, row, col)
# (window, text, row, col, attr)
cursor_row, cursor_col = window.getyx()
row = row if row is not None else cursor_row
col = col if col is not None else cursor_col
max_rows, max_cols = window.getmaxyx()
n_cols = max_cols - col - 1
if n_cols <= 0:
# Trying to draw outside of the screen bounds
return
text = clean(text, n_cols)
params = [] if attr is None else [attr]
window.addstr(row, col, text, *params)
def show_notification(stdscr, message):
"""
Overlay a message box on the center of the screen and wait for user input.
Params:
message (list): List of strings, one per line.
"""
n_rows, n_cols = stdscr.getmaxyx()
box_width = max(map(len, message)) + 2
box_height = len(message) + 2
# Cut off the lines of the message that don't fit on the screen
box_width = min(box_width, n_cols)
box_height = min(box_height, n_rows)
message = message[:box_height-2]
s_row = (n_rows - box_height) // 2
s_col = (n_cols - box_width) // 2
window = stdscr.derwin(box_height, box_width, s_row, s_col)
window.erase()
window.border()
for index, line in enumerate(message, start=1):
add_line(window, line, index, 1)
window.refresh()
ch = stdscr.getch()
window.clear()
window = None
stdscr.refresh()
return ch
def show_help(stdscr):
"""
Overlay a message box with the help screen.
"""
show_notification(stdscr, HELP.splitlines())
class LoadScreen(object):
"""
Display a loading dialog while waiting for a blocking action to complete.
This class spins off a seperate thread to animate the loading screen in the
background.
Usage:
#>>> loader = LoadScreen(stdscr)
#>>> with loader(...):
#>>> blocking_request(...)
"""
def __init__(self, stdscr):
self._stdscr = stdscr
self._args = None
self._animator = None
self._is_running = None
def __call__(
self,
delay=0.5,
interval=0.4,
message='Downloading',
trail='...'):
"""
Params:
delay (float): Length of time that the loader will wait before
printing on the screen. Used to prevent flicker on pages that
load very fast.
interval (float): Length of time between each animation frame.
message (str): Message to display
trail (str): Trail of characters that will be animated by the
loading screen.
"""
self._args = (delay, interval, message, trail)
return self
def __enter__(self):
self._animator = threading.Thread(target=self.animate, args=self._args)
self._animator.daemon = True
self._is_running = True
self._animator.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self._is_running = False
self._animator.join()
def animate(self, delay, interval, message, trail):
start = time.time()
while (time.time() - start) < delay:
if not self._is_running:
return
message_len = len(message) + len(trail)
n_rows, n_cols = self._stdscr.getmaxyx()
s_row = (n_rows - 3) // 2
s_col = (n_cols - message_len - 1) // 2
window = self._stdscr.derwin(3, message_len + 2, s_row, s_col)
while True:
for i in range(len(trail) + 1):
if not self._is_running:
window.clear()
window = None
self._stdscr.refresh()
return
window.erase()
window.border()
window.addstr(1, 1, message + trail[:i])
window.refresh()
time.sleep(interval)
class Color(object):
"""
Color attributes for curses.
"""
_colors = {
'RED': (curses.COLOR_RED, -1),
'GREEN': (curses.COLOR_GREEN, -1),
'YELLOW': (curses.COLOR_YELLOW, -1),
'BLUE': (curses.COLOR_BLUE, -1),
'MAGENTA': (curses.COLOR_MAGENTA, -1),
'CYAN': (curses.COLOR_CYAN, -1),
'WHITE': (curses.COLOR_WHITE, -1),
}
@classmethod
def init(cls):
"""
Initialize color pairs inside of curses using the default background.
This should be called once during the curses initial setup. Afterwards,
curses color pairs can be accessed directly through class attributes.
"""
# Assign the terminal's default (background) color to code -1
curses.use_default_colors()
for index, (attr, code) in enumerate(cls._colors.items(), start=1):
curses.init_pair(index, code[0], code[1])
setattr(cls, attr, curses.color_pair(index))
@classmethod
def get_level(cls, level):
levels = [cls.MAGENTA, cls.CYAN, cls.GREEN, cls.YELLOW]
return levels[level % len(levels)]
def text_input(window, allow_resize=True):
"""
Transform a window into a text box that will accept user input and loop
until an escape sequence is entered.
If enter is pressed, return the input text as a string.
If escape is pressed, return None.
"""
window.clear()
# Set cursor mode to 1 because 2 doesn't display on some terminals
curses.curs_set(1)
# Turn insert_mode off to avoid the recursion error described here
# http://bugs.python.org/issue13051
textbox = textpad.Textbox(window, insert_mode=False)
textbox.stripspaces = 0
def validate(ch):
"Filters characters for special key sequences"
if ch == ESCAPE:
raise EscapeInterrupt
if (not allow_resize) and (ch == curses.KEY_RESIZE):
raise EscapeInterrupt
# Fix backspace for iterm
if ch == ascii.DEL:
ch = curses.KEY_BACKSPACE
return ch
# Wrapping in an exception block so that we can distinguish when the user
# hits the return character from when the user tries to back out of the
# input.
try:
out = textbox.edit(validate=validate)
except EscapeInterrupt:
out = None
curses.curs_set(0)
return strip_textpad(out)
def prompt_input(window, prompt, hide=False):
"""
Display a prompt where the user can enter text at the bottom of the screen
Set hide to True to make the input text invisible.
"""
attr = curses.A_BOLD | Color.CYAN
n_rows, n_cols = window.getmaxyx()
if hide:
prompt += ' ' * (n_cols - len(prompt) - 1)
window.addstr(n_rows-1, 0, prompt, attr)
out = window.getstr(n_rows-1, 1)
else:
window.addstr(n_rows - 1, 0, prompt, attr)
window.refresh()
subwin = window.derwin(1, n_cols - len(prompt),
n_rows - 1, len(prompt))
subwin.attrset(attr)
out = text_input(subwin)
return out
@contextmanager
def curses_session():
"""
Setup terminal and initialize curses.
"""
try:
# Curses must wait for some time after the Escape key is pressed to
# check if it is the beginning of an escape sequence indicating a
# special key. The default wait time is 1 second, which means that
# getch() will not return the escape key (27) until a full second
# after it has been pressed.
# Turn this down to 25 ms, which is close to what VIM uses.
# http://stackoverflow.com/questions/27372068
os.environ['ESCDELAY'] = '25'
# Initialize curses
stdscr = curses.initscr()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho()
curses.cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted and
# a special value like curses.KEY_LEFT will be returned
stdscr.keypad(1)
# Start color, too. Harmless if the terminal doesn't have
# color; user can test with has_color() later on. The try/catch
# works around a minor bit of over-conscientiousness in the curses
# module -- the error return from C start_color() is ignorable.
try:
curses.start_color()
except:
pass
Color.init()
# Hide blinking cursor
curses.curs_set(0)
yield stdscr
finally:
if stdscr is not None:
stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
| |
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Models and helper functions for access to app's datastore metadata.
These entities cannot be created by users, but are created as results of
__namespace__, __kind__, __property__ and __entity_group__ metadata queries
or gets.
A simplified API is also offered:
ndb.metadata.get_namespaces(): A list of namespace names.
ndb.metadata.get_kinds(): A list of kind names.
ndb.metadata.get_properties_of_kind(kind):
A list of property names for the given kind name.
ndb.metadata.get_representations_of_kind(kind):
A dict mapping property names to lists of representation ids.
ndb.metadata.get_entity_group_version(key):
The version of the entity group containing key (HRD only).
get_kinds(), get_properties_of_kind(), get_representations_of_kind()
implicitly apply to the current namespace.
get_namespaces(), get_kinds(), get_properties_of_kind(),
get_representations_of_kind() have optional start and end arguments to limit the
query to a range of names, such that start <= name < end.
"""
from google.appengine.ext.ndb import model
__all__ = ['Namespace', 'Kind', 'Property', 'EntityGroup',
'get_namespaces', 'get_kinds',
'get_properties_of_kind', 'get_representations_of_kind',
'get_entity_group_version',
]
class _BaseMetadata(model.Model):
"""Base class for all metadata models."""
_use_cache = False
_use_memcache = False
KIND_NAME = ''
@classmethod
def _get_kind(cls):
"""Kind name override."""
return cls.KIND_NAME
class Namespace(_BaseMetadata):
"""Model for __namespace__ metadata query results."""
KIND_NAME = '__namespace__'
EMPTY_NAMESPACE_ID = 1
@property
def namespace_name(self):
"""Return the namespace name specified by this entity's key."""
return self.key_to_namespace(self.key)
@classmethod
def key_for_namespace(cls, namespace):
"""Return the Key for a namespace.
Args:
namespace: A string giving the namespace whose key is requested.
Returns:
The Key for the namespace.
"""
if namespace:
return model.Key(cls.KIND_NAME, namespace)
else:
return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)
@classmethod
def key_to_namespace(cls, key):
"""Return the namespace specified by a given __namespace__ key.
Args:
key: key whose name is requested.
Returns:
The namespace specified by key.
"""
return key.string_id() or ''
class Kind(_BaseMetadata):
"""Model for __kind__ metadata query results."""
KIND_NAME = '__kind__'
@property
def kind_name(self):
"""Return the kind name specified by this entity's key."""
return self.key_to_kind(self.key)
@classmethod
def key_for_kind(cls, kind):
"""Return the __kind__ key for kind.
Args:
kind: kind whose key is requested.
Returns:
The key for kind.
"""
return model.Key(cls.KIND_NAME, kind)
@classmethod
def key_to_kind(cls, key):
"""Return the kind specified by a given __kind__ key.
Args:
key: key whose name is requested.
Returns:
The kind specified by key.
"""
return key.id()
class Property(_BaseMetadata):
"""Model for __property__ metadata query results."""
KIND_NAME = '__property__'
@property
def property_name(self):
"""Return the property name specified by this entity's key."""
return self.key_to_property(self.key)
@property
def kind_name(self):
"""Return the kind name specified by this entity's key."""
return self.key_to_kind(self.key)
property_representation = model.StringProperty(repeated=True)
@classmethod
def key_for_kind(cls, kind):
"""Return the __property__ key for kind.
Args:
kind: kind whose key is requested.
Returns:
The parent key for __property__ keys of kind.
"""
return model.Key(Kind.KIND_NAME, kind)
@classmethod
def key_for_property(cls, kind, property):
"""Return the __property__ key for property of kind.
Args:
kind: kind whose key is requested.
property: property whose key is requested.
Returns:
The key for property of kind.
"""
return model.Key(Kind.KIND_NAME, kind, Property.KIND_NAME, property)
@classmethod
def key_to_kind(cls, key):
"""Return the kind specified by a given __property__ key.
Args:
key: key whose kind name is requested.
Returns:
The kind specified by key.
"""
if key.kind() == Kind.KIND_NAME:
return key.id()
else:
return key.parent().id()
@classmethod
def key_to_property(cls, key):
"""Return the property specified by a given __property__ key.
Args:
key: key whose property name is requested.
Returns:
property specified by key, or None if the key specified only a kind.
"""
if key.kind() == Kind.KIND_NAME:
return None
else:
return key.id()
class EntityGroup(_BaseMetadata):
"""Model for __entity_group__ metadata (available in HR datastore only).
This metadata contains a numeric __version__ property that is guaranteed
to increase on every change to the entity group. The version may increase
even in the absence of user-visible changes to the entity group. The
__entity_group__ entity may not exist if the entity group was never
written to.
"""
KIND_NAME = '__entity_group__'
ID = 1
version = model.IntegerProperty(name='__version__')
@classmethod
def key_for_entity_group(cls, key):
"""Return the key for the entity group containing key.
Args:
key: a key for an entity group whose __entity_group__ key you want.
Returns:
The __entity_group__ key for the entity group containing key.
"""
return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())
def get_namespaces(start=None, end=None):
"""Return all namespaces in the specified range.
Args:
start: only return namespaces >= start if start is not None.
end: only return namespaces < end if end is not None.
Returns:
A list of namespace names between the (optional) start and end values.
"""
q = Namespace.query()
if start is not None:
q = q.filter(Namespace.key >= Namespace.key_for_namespace(start))
if end is not None:
q = q.filter(Namespace.key < Namespace.key_for_namespace(end))
return [x.namespace_name for x in q]
def get_kinds(start=None, end=None):
"""Return all kinds in the specified range, for the current namespace.
Args:
start: only return kinds >= start if start is not None.
end: only return kinds < end if end is not None.
Returns:
A list of kind names between the (optional) start and end values.
"""
q = Kind.query()
if start is not None and start != '':
q = q.filter(Kind.key >= Kind.key_for_kind(start))
if end is not None:
if end == '':
return []
q = q.filter(Kind.key < Kind.key_for_kind(end))
return [x.kind_name for x in q]
def get_properties_of_kind(kind, start=None, end=None):
"""Return all properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A list of property names of kind between the (optional) start and end
values.
"""
q = Property.query(ancestor=Property.key_for_kind(kind))
if start is not None and start != '':
q = q.filter(Property.key >= Property.key_for_property(kind, start))
if end is not None:
if end == '':
return []
q = q.filter(Property.key < Property.key_for_property(kind, end))
return [Property.key_to_property(k) for k in q.iter(keys_only=True)]
def get_representations_of_kind(kind, start=None, end=None):
"""Return all representations of properties of kind in the specified range.
NOTE: This function does not return unindexed properties.
Args:
kind: name of kind whose properties you want.
start: only return properties >= start if start is not None.
end: only return properties < end if end is not None.
Returns:
A dictionary mapping property names to its list of representations.
"""
q = Property.query(ancestor=Property.key_for_kind(kind))
if start is not None and start != '':
q = q.filter(Property.key >= Property.key_for_property(kind, start))
if end is not None:
if end == '':
return {}
q = q.filter(Property.key < Property.key_for_property(kind, end))
result = {}
for property in q:
result[property.property_name] = property.property_representation
return result
def get_entity_group_version(key):
"""Return the version of the entity group containing key.
Args:
key: a key for an entity group whose __entity_group__ key you want.
Returns:
The version of the entity group containing key. This version is
guaranteed to increase on every change to the entity group. The version
may increase even in the absence of user-visible changes to the entity
group. May return None if the entity group was never written to.
"""
eg = EntityGroup.key_for_entity_group(key).get()
if eg:
return eg.version
else:
return None
| |
""" Handlers related to data production.
"""
from collections import OrderedDict
import cStringIO
from datetime import datetime
import json
from dateutil import parser
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import tornado.web
from status.util import dthandler, SafeHandler
from dateutil import parser
class ProductionHandler(SafeHandler):
""" Serves a page with statistics and plots about the amount of
sequencing / data produced over time.
"""
def get(self):
t = self.application.loader.load("production.html")
self.write(t.generate(gs_globals=self.application.gs_globals, user=self.get_current_user_name(), deprecated=True))
class ProductionCronjobsHandler(SafeHandler):
""" Returns a JSON document with the Cronjobs database information
"""
def get(self):
cronjobs = {}
servers = self.application.cronjobs_db.view('server/alias')
for server in servers.rows:
doc = self.application.cronjobs_db.get(server.value)
cronjobs[server.key] = {"last_updated": datetime.strftime(parser.parse(doc['Last updated']), '%Y-%m-%d %H:%M'),
'users': doc['users'], 'server': server.key}
template = self.application.loader.load("cronjobs.html")
self.write(template.generate(gs_globals=self.application.gs_globals,
cronjobs=cronjobs))
class DeliveredMonthlyDataHandler(SafeHandler):
""" Gives the data for monthly delivered amount of basepairs.
Loaded through /api/v1/delivered_monthly url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.delivered(start_date, end_date), default=dthandler))
def delivered(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.projects_db.view("date/m_bp_delivered",
group_level=3)
delivered = OrderedDict()
start = [start_date.year,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = row.key[0]
m = row.key[2]
delivered[dthandler(datetime(y, m, 1))] = int(row.value * 1e6)
return delivered
class DeliveredMonthlyPlotHandler(DeliveredMonthlyDataHandler):
""" Gives a bar plot for monthly delivered amount of basepairs.
Loaded through /api/v1/delivered_monthly.png url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
delivered = self.delivered(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in delivered.keys()]
values = delivered.values()
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels([d.strftime("%Y\n%B") for d in dates])
ax.set_title("Basepairs delivered per month")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
delivered = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(delivered))
self.write(delivered)
class DeliveredQuarterlyDataHandler(SafeHandler):
""" Gives the data for quarterly delivered amount of basepairs.
Loaded through /api/v1/delivered_quarterly url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.delivered(start_date, end_date), default=dthandler))
def delivered(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.projects_db.view("date/m_bp_delivered",
group_level=2)
delivered = OrderedDict()
start = [start_date.year,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = row.key[0]
q = row.key[1]
delivered[dthandler(datetime(y, (q - 1) * 3 + 1, 1))] = int(row.value * 1e6)
return delivered
class DeliveredQuarterlyPlotHandler(DeliveredQuarterlyDataHandler):
""" Gives a bar plot for quarterly delivered amount of basepairs.
Loaded through /api/v1/delivered_quarterly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
delivered = self.delivered(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in delivered.keys()]
values = delivered.values()
ax.bar(dates, values)
ax.set_xticks(dates)
labels = []
for d in dates:
labels.append("{}\nQ{}".format(d.year, (d.month - 1) // 3 + 1))
ax.set_xticklabels(labels)
ax.set_title("Basepairs delivered per quarter")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
delivered = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(delivered))
self.write(delivered)
class ProducedMonthlyDataHandler(SafeHandler):
""" Serves the amount of data produced per month.
Loaded through /api/v1/produced_monthly
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.bpcounts(start_date, end_date), default=dthandler))
def bpcounts(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.samples_db.view("barcodes/date_read_counts",
group_level=3)
produced = OrderedDict()
start = [start_date.year - 2000,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year - 2000,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = int("20" + str(row.key[0]))
m = row.key[2]
produced[dthandler(datetime(y, m, 1))] = row.value
return produced
class ProducedMonthlyPlotHandler(ProducedMonthlyDataHandler):
""" Serves a plot of amount of data produced per month.
Loaded through /api/v1/produced_monthly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
produced = self.bpcounts(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in produced.keys()]
values = produced.values()
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels([d.strftime("%b-%Y") for d in dates], rotation=30)
ax.set_title("Basepairs produced per month")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
produced = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(produced))
self.write(produced)
class ProducedQuarterlyDataHandler(SafeHandler):
""" Gives the data for quarterly produced amount of basepairs.
Loaded through /api/v1/produced_quarterly
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.produced(start_date, end_date), default=dthandler))
def produced(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.samples_db.view("barcodes/date_read_counts",
group_level=2)
produced = OrderedDict()
start = [start_date.year - 2000,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year - 2000,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = int("20" + str(row.key[0]))
q = row.key[1]
produced[dthandler(datetime(y, (q - 1) * 3 + 1, 1))] = int(row.value)
return produced
class ProducedQuarterlyPlotHandler(ProducedQuarterlyDataHandler):
""" Gives a bar plot for quarterly produced amount of basepairs.
Loaded through /api/v1/produced_quarterly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
produced = self.produced(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in produced.keys()]
values = produced.values()
quarters = [(d.month - 1) // 3 + 1 for d in dates]
years = [d.year for d in dates]
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels(["{}\nQ{}".format(*t) for t in zip(years, quarters)])
ax.set_title("Basepairs produced per quarter")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
produced = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(produced))
self.write(produced)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Computes a header file to be used with SELECTIVE_REGISTRATION.
See the executable wrapper, print_selective_registration_header.py, for more
information.
"""
import json
import os
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import _pywrap_kernel_registry
# Usually, we use each graph node to induce registration of an op and
# corresponding kernel; nodes without a corresponding kernel (perhaps due to
# attr types) generate a warning but are otherwise ignored. Ops in this set are
# registered even if there's no corresponding kernel.
OPS_WITHOUT_KERNEL_ALLOWLIST = frozenset([
# AccumulateNV2 is rewritten away by AccumulateNV2RemovePass; see
# core/common_runtime/accumulate_n_optimizer.cc.
'AccumulateNV2'
])
FLEX_PREFIX = b'Flex'
FLEX_PREFIX_LENGTH = len(FLEX_PREFIX)
def _get_ops_from_ops_list(input_file):
"""Gets the ops and kernels needed from the ops list file."""
ops = set()
ops_list_str = gfile.GFile(input_file, 'r').read()
if not ops_list_str:
raise Exception('Input file should not be empty')
ops_list = json.loads(ops_list_str)
for op, kernel in ops_list:
op_and_kernel = (op, kernel if kernel else None)
ops.add(op_and_kernel)
return ops
def _get_ops_from_graphdef(graph_def):
"""Gets the ops and kernels needed from the tensorflow model."""
ops = set()
for node_def in graph_def.node:
if not node_def.device:
node_def.device = '/cpu:0'
kernel_class = _pywrap_kernel_registry.TryFindKernelClass(
node_def.SerializeToString())
op = str(node_def.op)
if kernel_class or op in OPS_WITHOUT_KERNEL_ALLOWLIST:
op_and_kernel = (op, str(kernel_class.decode('utf-8'))
if kernel_class else None)
ops.add(op_and_kernel)
else:
print('Warning: no kernel found for op %s' % node_def.op, file=sys.stderr)
return ops
def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):
"""Gets the ops and kernels needed from the model files."""
ops = set()
for proto_file in proto_files:
tf_logging.info('Loading proto file %s', proto_file)
# Load ops list file.
if proto_fileformat == 'ops_list':
ops = ops.union(_get_ops_from_ops_list(proto_file))
continue
# Load GraphDef.
file_data = gfile.GFile(proto_file, 'rb').read()
if proto_fileformat == 'rawproto':
graph_def = graph_pb2.GraphDef.FromString(file_data)
else:
assert proto_fileformat == 'textproto'
graph_def = text_format.Parse(file_data, graph_pb2.GraphDef())
ops = ops.union(_get_ops_from_graphdef(graph_def))
# Add default ops.
if default_ops_str and default_ops_str != 'all':
for s in default_ops_str.split(','):
op, kernel = s.split(':')
op_and_kernel = (op, kernel)
if op_and_kernel not in ops:
ops.add(op_and_kernel)
return list(sorted(ops))
def get_header_from_ops_and_kernels(ops_and_kernels,
include_all_ops_and_kernels):
"""Returns a header for use with tensorflow SELECTIVE_REGISTRATION.
Args:
ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include.
include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op
kernels are included.
Returns:
the string of the header that should be written as ops_to_register.h.
"""
ops = set(op for op, _ in ops_and_kernels)
result_list = []
def append(s):
result_list.append(s)
_, script_name = os.path.split(sys.argv[0])
append('// This file was autogenerated by %s' % script_name)
append('#ifndef OPS_TO_REGISTER')
append('#define OPS_TO_REGISTER')
if include_all_ops_and_kernels:
append('#define SHOULD_REGISTER_OP(op) true')
append('#define SHOULD_REGISTER_OP_KERNEL(clz) true')
append('#define SHOULD_REGISTER_OP_GRADIENT true')
else:
line = """
namespace {
constexpr const char* skip(const char* x) {
return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;
}
constexpr bool isequal(const char* x, const char* y) {
return (*skip(x) && *skip(y))
? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))
: (!*skip(x) && !*skip(y));
}
template<int N>
struct find_in {
static constexpr bool f(const char* x, const char* const y[N]) {
return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1);
}
};
template<>
struct find_in<0> {
static constexpr bool f(const char* x, const char* const y[]) {
return false;
}
};
} // end namespace
"""
line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\n'
for _, kernel_class in ops_and_kernels:
if kernel_class is None:
continue
line += '"%s",\n' % kernel_class
line += '};'
append(line)
append('#define SHOULD_REGISTER_OP_KERNEL(clz) '
'(find_in<sizeof(kNecessaryOpKernelClasses) '
'/ sizeof(*kNecessaryOpKernelClasses)>::f(clz, '
'kNecessaryOpKernelClasses))')
append('')
append('constexpr inline bool ShouldRegisterOp(const char op[]) {')
append(' return false')
for op in sorted(ops):
append(' || isequal(op, "%s")' % op)
append(' ;')
append('}')
append('#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)')
append('')
append('#define SHOULD_REGISTER_OP_GRADIENT ' +
('true' if 'SymbolicGradient' in ops else 'false'))
append('#endif')
return '\n'.join(result_list)
def get_header(graphs,
proto_fileformat='rawproto',
default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'):
"""Computes a header for use with tensorflow SELECTIVE_REGISTRATION.
Args:
graphs: a list of paths to GraphDef files to include.
proto_fileformat: optional format of proto file, either 'textproto',
'rawproto' (default) or ops_list. The ops_list is the file contain the
list of ops in JSON format, Ex: "[["Transpose", "TransposeCpuOp"]]".
default_ops: optional comma-separated string of operator:kernel pairs to
always include implementation for. Pass 'all' to have all operators and
kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'.
Returns:
the string of the header that should be written as ops_to_register.h.
"""
ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops)
if not ops_and_kernels:
print('Error reading graph!')
return 1
return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')
| |
import copy
import logging
from random import shuffle
import socket
import struct
from threading import local
import six
from kafka.common import ConnectionError
log = logging.getLogger("kafka")
DEFAULT_SOCKET_TIMEOUT_SECONDS = 120
DEFAULT_KAFKA_PORT = 9092
def collect_hosts(hosts, randomize=True):
"""
Collects a comma-separated set of hosts (host:port) and optionally
randomize the returned list.
"""
if isinstance(hosts, six.string_types):
hosts = hosts.strip().split(',')
result = []
for host_port in hosts:
res = host_port.split(':')
host = res[0]
port = int(res[1]) if len(res) > 1 else DEFAULT_KAFKA_PORT
result.append((host.strip(), port))
if randomize:
shuffle(result)
return result
class KafkaConnection(local):
"""
A socket connection to a single Kafka broker
This class is _not_ thread safe. Each call to `send` must be followed
by a call to `recv` in order to get the correct response. Eventually,
we can do something in here to facilitate multiplexed requests/responses
since the Kafka API includes a correlation id.
Arguments:
host: the host name or IP address of a kafka broker
port: the port number the kafka broker is listening on
timeout: default 120. The socket timeout for sending and receiving data
in seconds. None means no timeout, so a request can block forever.
"""
def __init__(self, host, port, timeout=DEFAULT_SOCKET_TIMEOUT_SECONDS):
super(KafkaConnection, self).__init__()
self.host = host
self.port = port
self.timeout = timeout
self._sock = None
self.reinit()
def __getnewargs__(self):
return (self.host, self.port, self.timeout)
def __repr__(self):
return "<KafkaConnection host=%s port=%d>" % (self.host, self.port)
###################
# Private API #
###################
def _raise_connection_error(self):
# Cleanup socket if we have one
if self._sock:
self.close()
# And then raise
raise ConnectionError("Kafka @ {0}:{1} went away".format(self.host, self.port))
def _read_bytes(self, num_bytes):
bytes_left = num_bytes
responses = []
log.debug("About to read %d bytes from Kafka", num_bytes)
# Make sure we have a connection
if not self._sock:
self.reinit()
while bytes_left:
try:
data = self._sock.recv(min(bytes_left, 4096))
# Receiving empty string from recv signals
# that the socket is in error. we will never get
# more data from this socket
if data == b'':
raise socket.error("Not enough data to read message -- did server kill socket?")
except socket.error:
log.exception('Unable to receive data from Kafka')
self._raise_connection_error()
bytes_left -= len(data)
log.debug("Read %d/%d bytes from Kafka", num_bytes - bytes_left, num_bytes)
responses.append(data)
return b''.join(responses)
##################
# Public API #
##################
# TODO multiplex socket communication to allow for multi-threaded clients
def send(self, request_id, payload):
"""
Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol)
"""
log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id))
# Make sure we have a connection
if not self._sock:
self.reinit()
try:
self._sock.sendall(payload)
except socket.error:
log.exception('Unable to send payload to Kafka')
self._raise_connection_error()
def recv(self, request_id):
"""
Get a response packet from Kafka
Arguments:
request_id: can be any int (only used for debug logging...)
Returns:
str: Encoded kafka packet response from server
"""
log.debug("Reading response %d from Kafka" % request_id)
# Read the size off of the header
resp = self._read_bytes(4)
(size,) = struct.unpack('>i', resp)
# Read the remainder of the response
resp = self._read_bytes(size)
return resp
def copy(self):
"""
Create an inactive copy of the connection object
A reinit() has to be done on the copy before it can be used again
return a new KafkaConnection object
"""
c = copy.deepcopy(self)
# Python 3 doesn't copy custom attributes of the threadlocal subclass
c.host = copy.copy(self.host)
c.port = copy.copy(self.port)
c.timeout = copy.copy(self.timeout)
c._sock = None
return c
def close(self):
"""
Shutdown and close the connection socket
"""
log.debug("Closing socket connection for %s:%d" % (self.host, self.port))
if self._sock:
# Call shutdown to be a good TCP client
# But expect an error if the socket has already been
# closed by the server
try:
self._sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
# Closing the socket should always succeed
self._sock.close()
self._sock = None
else:
log.debug("No socket found to close!")
def reinit(self):
"""
Re-initialize the socket connection
close current socket (if open)
and start a fresh connection
raise ConnectionError on error
"""
log.debug("Reinitializing socket connection for %s:%d" % (self.host, self.port))
if self._sock:
self.close()
try:
self._sock = socket.create_connection((self.host, self.port), self.timeout)
except socket.error:
log.exception('Unable to connect to kafka broker at %s:%d' % (self.host, self.port))
self._raise_connection_error()
| |
"""Analytics helper class for the analytics integration."""
import asyncio
import aiohttp
import async_timeout
from homeassistant.components import hassio
from homeassistant.components.api import ATTR_INSTALLATION_TYPE
from homeassistant.components.automation.const import DOMAIN as AUTOMATION_DOMAIN
from homeassistant.const import __version__ as HA_VERSION
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.storage import Store
from homeassistant.helpers.system_info import async_get_system_info
from homeassistant.loader import IntegrationNotFound, async_get_integration
from homeassistant.setup import async_get_loaded_integrations
from .const import (
ANALYTICS_ENDPOINT_URL,
ATTR_ADDON_COUNT,
ATTR_ADDONS,
ATTR_AUTO_UPDATE,
ATTR_AUTOMATION_COUNT,
ATTR_BASE,
ATTR_DIAGNOSTICS,
ATTR_HEALTHY,
ATTR_HUUID,
ATTR_INTEGRATION_COUNT,
ATTR_INTEGRATIONS,
ATTR_ONBOARDED,
ATTR_PREFERENCES,
ATTR_PROTECTED,
ATTR_SLUG,
ATTR_STATE_COUNT,
ATTR_STATISTICS,
ATTR_SUPERVISOR,
ATTR_SUPPORTED,
ATTR_USAGE,
ATTR_USER_COUNT,
ATTR_VERSION,
LOGGER,
PREFERENCE_SCHEMA,
STORAGE_KEY,
STORAGE_VERSION,
)
class Analytics:
"""Analytics helper class for the analytics integration."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the Analytics class."""
self.hass: HomeAssistant = hass
self.session = async_get_clientsession(hass)
self._data = {ATTR_PREFERENCES: {}, ATTR_ONBOARDED: False}
self._store: Store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
@property
def preferences(self) -> dict:
"""Return the current active preferences."""
preferences = self._data[ATTR_PREFERENCES]
return {
ATTR_BASE: preferences.get(ATTR_BASE, False),
ATTR_DIAGNOSTICS: preferences.get(ATTR_DIAGNOSTICS, False),
ATTR_USAGE: preferences.get(ATTR_USAGE, False),
ATTR_STATISTICS: preferences.get(ATTR_STATISTICS, False),
}
@property
def onboarded(self) -> bool:
"""Return bool if the user has made a choice."""
return self._data[ATTR_ONBOARDED]
@property
def supervisor(self) -> bool:
"""Return bool if a supervisor is present."""
return hassio.is_hassio(self.hass)
async def load(self) -> None:
"""Load preferences."""
stored = await self._store.async_load()
if stored:
self._data = stored
if self.supervisor:
supervisor_info = hassio.get_supervisor_info(self.hass)
if not self.onboarded:
# User have not configured analytics, get this setting from the supervisor
if supervisor_info[ATTR_DIAGNOSTICS] and not self.preferences.get(
ATTR_DIAGNOSTICS, False
):
self._data[ATTR_PREFERENCES][ATTR_DIAGNOSTICS] = True
elif not supervisor_info[ATTR_DIAGNOSTICS] and self.preferences.get(
ATTR_DIAGNOSTICS, False
):
self._data[ATTR_PREFERENCES][ATTR_DIAGNOSTICS] = False
async def save_preferences(self, preferences: dict) -> None:
"""Save preferences."""
preferences = PREFERENCE_SCHEMA(preferences)
self._data[ATTR_PREFERENCES].update(preferences)
self._data[ATTR_ONBOARDED] = True
await self._store.async_save(self._data)
if self.supervisor:
await hassio.async_update_diagnostics(
self.hass, self.preferences.get(ATTR_DIAGNOSTICS, False)
)
async def send_analytics(self, _=None) -> None:
"""Send analytics."""
supervisor_info = None
if not self.onboarded or not self.preferences.get(ATTR_BASE, False):
LOGGER.debug("Nothing to submit")
return
huuid = await self.hass.helpers.instance_id.async_get()
if self.supervisor:
supervisor_info = hassio.get_supervisor_info(self.hass)
system_info = await async_get_system_info(self.hass)
integrations = []
addons = []
payload: dict = {
ATTR_HUUID: huuid,
ATTR_VERSION: HA_VERSION,
ATTR_INSTALLATION_TYPE: system_info[ATTR_INSTALLATION_TYPE],
}
if supervisor_info is not None:
payload[ATTR_SUPERVISOR] = {
ATTR_HEALTHY: supervisor_info[ATTR_HEALTHY],
ATTR_SUPPORTED: supervisor_info[ATTR_SUPPORTED],
}
if self.preferences.get(ATTR_USAGE, False) or self.preferences.get(
ATTR_STATISTICS, False
):
configured_integrations = await asyncio.gather(
*[
async_get_integration(self.hass, domain)
for domain in async_get_loaded_integrations(self.hass)
],
return_exceptions=True,
)
for integration in configured_integrations:
if isinstance(integration, IntegrationNotFound):
continue
if isinstance(integration, BaseException):
raise integration
if integration.disabled or not integration.is_built_in:
continue
integrations.append(integration.domain)
if supervisor_info is not None:
installed_addons = await asyncio.gather(
*[
hassio.async_get_addon_info(self.hass, addon[ATTR_SLUG])
for addon in supervisor_info[ATTR_ADDONS]
]
)
for addon in installed_addons:
addons.append(
{
ATTR_SLUG: addon[ATTR_SLUG],
ATTR_PROTECTED: addon[ATTR_PROTECTED],
ATTR_VERSION: addon[ATTR_VERSION],
ATTR_AUTO_UPDATE: addon[ATTR_AUTO_UPDATE],
}
)
if self.preferences.get(ATTR_USAGE, False):
payload[ATTR_INTEGRATIONS] = integrations
if supervisor_info is not None:
payload[ATTR_ADDONS] = addons
if self.preferences.get(ATTR_STATISTICS, False):
payload[ATTR_STATE_COUNT] = len(self.hass.states.async_all())
payload[ATTR_AUTOMATION_COUNT] = len(
self.hass.states.async_all(AUTOMATION_DOMAIN)
)
payload[ATTR_INTEGRATION_COUNT] = len(integrations)
if supervisor_info is not None:
payload[ATTR_ADDON_COUNT] = len(addons)
payload[ATTR_USER_COUNT] = len(
[
user
for user in await self.hass.auth.async_get_users()
if not user.system_generated
]
)
try:
with async_timeout.timeout(30):
response = await self.session.post(ANALYTICS_ENDPOINT_URL, json=payload)
if response.status == 200:
LOGGER.info(
(
"Submitted analytics to Home Assistant servers. "
"Information submitted includes %s"
),
payload,
)
else:
LOGGER.warning(
"Sending analytics failed with statuscode %s", response.status
)
except asyncio.TimeoutError:
LOGGER.error("Timeout sending analytics to %s", ANALYTICS_ENDPOINT_URL)
except aiohttp.ClientError as err:
LOGGER.error(
"Error sending analytics to %s: %r", ANALYTICS_ENDPOINT_URL, err
)
| |
#!/usr/bin/env python
"""
Pull together the NCES data and the Segregation calculator to produce
general reports on segregation in the USA.
"""
import sys
import argparse
import operator
from segcalc import SegCalc
from nces_parser import NCESParser
from fips import fips_to_st
from xlwt import Workbook
# ==============================================================================
# Constants
# ==============================================================================
# Maximum number of rows to report in an output file.
MAX_RECORD = 1000
# ==============================================================================
# Functions
# ==============================================================================
def calc_idxes(segcalc):
"""
Call down to get all the various measures calculated
"""
print "Calculating Dissimilarity Index"
dis_idx = segcalc.calc_dis_idx()
print "Calculating Exposure Index"
exp_idx = segcalc.calc_exp_idx()
print "Calculating Isolation Index"
iso_idx = segcalc.calc_iso_idx()
print "Calculating Total Minority Students"
min_idx = segcalc.calc_totals('MINORITY')
print "Calculating Total Student Count"
tot_idx = segcalc.calc_totals()
print "Calculating Proportion of Students in the Minority"
mper_idx = segcalc.calc_proportion(idx='MINORITY')
print "Calculating Proportion of Students in a Magnet"
mag_idx = segcalc.calc_dependant_totals(sum_idx='MEMBER', dep_idx='MAGNET')
pmag_idx = segcalc.calc_prop(mag_idx, tot_idx)
print "Calculating Proportion of Students in a Charter"
chr_idx = segcalc.calc_dependant_totals(sum_idx='MEMBER', dep_idx='CHARTR')
pchr_idx = segcalc.calc_prop(chr_idx, tot_idx)
print "Calculating Proportion of Students in a Magnet or Charter"
chc_idx = segcalc.calc_dependant_totals(sum_idx='MEMBER', dep_idx='CHARTR', sec_dep_idx='MAGNET')
pchc_idx = segcalc.calc_prop(chc_idx, tot_idx)
print "Done with Calculations"
return (dis_idx, exp_idx, iso_idx, min_idx, tot_idx, mper_idx, pmag_idx, pchr_idx, pchc_idx)
# -------------------------------------
def save_report(year_range, idxes, count, category_list, category_txt, category_txt2, filename):
"""
Write out a bunch of report data to a spreadsheet report.
Report will be a 2D matrix:
- X-Axis = school year
- Y-Axis = 'Category' (FIPS Code, District, etc...)
Notes:
- idxes contains the data
- worksheets is a list of XLS worksheets, one per report in idxes
"""
wb = Workbook()
dws = wb.add_sheet('Dissimilarity Index')
ews = wb.add_sheet('Exposure Index')
iws = wb.add_sheet('Isolation Index')
min = wb.add_sheet('Minority Student Count')
tot = wb.add_sheet('Student Count')
mper = wb.add_sheet('Minority Proportion')
pmag = wb.add_sheet('Magnet Proportion')
pchr = wb.add_sheet('Charter Proportion')
pchc = wb.add_sheet('Choice Proportion')
worksheets = [dws, ews, iws, min, tot, mper, pmag, pchr, pchc]
# Create the headers/labels row/col
for ws in worksheets:
ws.write(0, 0, "Agency Name")
for j, st in enumerate(category_list):
if j < count:
if len(category_txt[st]) == 2: # Don't change caps for State abbr.
ws.write(j+1, 0, category_txt[st])
else:
ws.write(j+1, 0, category_txt[st].title())
offset = 1
if category_txt2:
for ws in worksheets:
ws.write(0, 1, "State")
for j, st in enumerate(category_list):
if j < count:
ws.write(j+1, 1, fips_to_st[category_txt2[st]][0])
offset = 2
# Print out the data
for i, year in enumerate(year_range):
print "Write Report for: %d" % year
for ws in worksheets:
ws.write(0, i+offset, year)
for j, st in enumerate(category_list):
if j < count:
for k, idx in enumerate(idxes):
try:
if k <= 5 and idx[i][st] < 0.001:
worksheets[k].write(j+1, i+offset, "")
else:
worksheets[k].write(j+1, i+offset, idx[i][st])
except KeyError:
worksheets[k].write(j+1, i+offset, "")
wb.save(filename)
# -------------------------------------
# Parse the command line options
# -------------------------------------
def main(argv):
parser = argparse.ArgumentParser(description='Segregation Report Generator')
parser.add_argument('--outfile', action='store', dest='outfile', required=True,
help='Report Filename')
parser.add_argument('--category', action='store', dest='category', required=False,
help='Which Category do we sort the results by?')
parser.add_argument('--match_idx', action='store', dest='match_idx', required=False,
help='Only use data points that match some criterion')
parser.add_argument('--match_val', action='store', dest='match_val', required=False,
help='Value to match when using --match_idx')
parser.add_argument('--minority', action='store', dest='minority', required=False,
help='Override the default list of Minority Groups')
parser.add_argument('--sec_minority', action='store', dest='sec_minority', required=False,
help='Override the default list of Secondary Minority Groups')
parser.add_argument('--majority', action='store', dest='majority', required=False,
help='Override the default list of Majority Groups')
parser.add_argument('--year', action='store', dest='year', required=False, type=int,
help='Override the default list of years to report on')
parser.add_argument('--max_record', action='store', dest='max_record', required=False,
help='Override the default number of items to report')
parser.add_argument('-debug', action='store_true', dest='debug', required=False,
help='Debug Mode')
args = parser.parse_args()
if args.category:
category = args.category
else:
category = 'LEAID'
# Lets calculate all the data first
if args.debug:
year_range = range(2009,2012)
minorities = ['BLACK']
sec_minorities = [None]
majorities = ['WHITE']
filenames = ['blacks_white']
else:
year_range = range(1987, 2012)
minorities = ['BLACK', 'HISP', 'BLACK', 'HISP', 'FRELCH', 'FRELCH']
sec_minorities = [None, None, 'HISP', None, None, 'REDLCH']
majorities = ['WHITE', 'WHITE', 'WHITE', 'BLACK', None, None]
filenames = ['blacks_white', 'hisp_white', 'minorities_white', 'hisp_black', 'free_lunch', 'free_red_lunch']
# Override the default years/groups per command line requests
if args.year:
year_range = [args.year]
if args.minority:
minorities = [args.minority]
filenames = [""]
if args.sec_minority:
sec_minorities = [args.sec_minorities]
if args.majority:
majorities = [args.majority]
# Print out more or fewer records than the default
if args.max_record:
report_count = int(args.max_record)
else:
report_count = MAX_RECORD
# Default search query
idx = {
'MINORITY': 'BLACK',
'MAJORITY': 'WHITE',
'TOTAL': 'MEMBER',
'CATEGORY': category,
'SUB_CAT': 'LEAID',
}
if args.match_idx:
idx['MATCH_IDX'] = args.match_idx
idx['MATCH_VAL'] = args.match_val
for i, group in enumerate(minorities):
idx['MINORITY'] = minorities[i]
idx['SEC_MINORITY'] = sec_minorities[i]
idx['MAJORITY'] = majorities[i]
print "*" * 80
print "Running all calculations with the following parameters"
print "*" * 80
print idx
print "*" * 80
DATASETS = 9
datasets = [[] for _ in range(DATASETS)]
for year in year_range:
print "Loading NCES Data from: %d" % year
nces = NCESParser(year=year)
schools = nces.parse(make_dict=True)
print "Finished Loading NCES Data from: %d" % year
if args.debug:
# print schools
pass
# Get our data query ready
segcalc = SegCalc(schools, idx)
if category == 'LEAID':
category_lut = segcalc.get_idxed_val('LEAID', 'LEANM')
category_lut2 = segcalc.get_idxed_val('LEAID', 'FIPS')
elif category == 'FIPS':
category_lut = dict(zip(fips_to_st.keys(), [fips_to_st[key][0] for key in fips_to_st.keys()]))
category_lut2 = None
print "Performing Calculations on Data from: %d" % year
dataset = calc_idxes(segcalc)
print "Finished Performing Calculations on Data from: %d" % year
print "Appending Yearly Data"
for j in range(DATASETS):
datasets[j].append(dataset[j])
print "Sorting By Size of the last year"
category_by_size = sorted(dataset[4].iteritems(), key=operator.itemgetter(1), reverse=True)
category_list = []
for category, total in category_by_size:
category_list.append(category)
if args.debug:
print "dist_dict = {"
for cat in category_list:
print " '%s': '%s'," % (cat, category_lut[cat].title())
print "}"
print "Generating Report"
save_report(
year_range,
datasets,
report_count,
category_list,
category_lut,
category_lut2,
filenames[i] + '_' + args.outfile
)
# -------------------------------------
# Drop the script name from the args
# and call our command line parser
# -------------------------------------
if __name__ == "__main__":
main(sys.argv[1:])
| |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch
from urllib import quote
from swift.common import swob
from swift.common.swob import Request
from swift.common.utils import json
from swift3.test.unit import Swift3TestCase
from swift3.etree import fromstring, tostring
from swift3.subresource import Owner, Grant, User, ACL, encode_acl, \
decode_acl, ACLPublicRead
from swift3.test.unit.test_s3_acl import s3acl
from swift3.cfg import CONF
from swift3.utils import sysmeta_header
from swift3.request import MAX_32BIT_INT
xml = '<CompleteMultipartUpload>' \
'<Part>' \
'<PartNumber>1</PartNumber>' \
'<ETag>HASH</ETag>' \
'</Part>' \
'<Part>' \
'<PartNumber>2</PartNumber>' \
'<ETag>"HASH"</ETag>' \
'</Part>' \
'</CompleteMultipartUpload>'
objects_template = \
(('object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 100),
('object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 200))
multiparts_template = \
(('object/X', '2014-05-07T19:47:50.592270', 'HASH', 1),
('object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 11),
('object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 21),
('object/Y', '2014-05-07T19:47:53.592270', 'HASH', 2),
('object/Y/1', '2014-05-07T19:47:54.592270', 'HASH', 12),
('object/Y/2', '2014-05-07T19:47:55.592270', 'HASH', 22),
('object/Z', '2014-05-07T19:47:56.592270', 'HASH', 3),
('object/Z/1', '2014-05-07T19:47:57.592270', 'HASH', 13),
('object/Z/2', '2014-05-07T19:47:58.592270', 'HASH', 23),
('subdir/object/Z', '2014-05-07T19:47:58.592270', 'HASH', 4),
('subdir/object/Z/1', '2014-05-07T19:47:58.592270', 'HASH', 41),
('subdir/object/Z/2', '2014-05-07T19:47:58.592270', 'HASH', 41))
class TestSwift3MultiUpload(Swift3TestCase):
def setUp(self):
super(TestSwift3MultiUpload, self).setUp()
segment_bucket = '/v1/AUTH_test/bucket+segments'
self.etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
self.last_modified = 'Fri, 01 Apr 2014 12:00:00 GMT'
put_headers = {'etag': self.etag, 'last-modified': self.last_modified}
objects = map(lambda item: {'name': item[0], 'last_modified': item[1],
'hash': item[2], 'bytes': item[3]},
objects_template)
object_list = json.dumps(objects)
self.swift.register('PUT',
'/v1/AUTH_test/bucket+segments',
swob.HTTPAccepted, {}, None)
self.swift.register('GET', segment_bucket, swob.HTTPOk, {},
object_list)
self.swift.register('HEAD', segment_bucket + '/object/X',
swob.HTTPOk, {'x-object-meta-foo': 'bar',
'content-type': 'baz/quux'}, None)
self.swift.register('PUT', segment_bucket + '/object/X',
swob.HTTPCreated, {}, None)
self.swift.register('DELETE', segment_bucket + '/object/X',
swob.HTTPNoContent, {}, None)
self.swift.register('GET', segment_bucket + '/object/invalid',
swob.HTTPNotFound, {}, None)
self.swift.register('PUT', segment_bucket + '/object/X/1',
swob.HTTPCreated, put_headers, None)
self.swift.register('DELETE', segment_bucket + '/object/X/1',
swob.HTTPNoContent, {}, None)
self.swift.register('DELETE', segment_bucket + '/object/X/2',
swob.HTTPNoContent, {}, None)
self.swift.register('HEAD', segment_bucket + '/object/Y',
swob.HTTPOk, {}, None)
self.swift.register('PUT', segment_bucket + '/object/Y',
swob.HTTPCreated, {}, None)
self.swift.register('DELETE', segment_bucket + '/object/Y',
swob.HTTPNoContent, {}, None)
self.swift.register('PUT', segment_bucket + '/object/Y/1',
swob.HTTPCreated, {}, None)
self.swift.register('DELETE', segment_bucket + '/object/Y/1',
swob.HTTPNoContent, {}, None)
self.swift.register('DELETE', segment_bucket + '/object/Y/2',
swob.HTTPNoContent, {}, None)
self.swift.register('HEAD', segment_bucket + '/object2/Z',
swob.HTTPOk, {}, None)
self.swift.register('PUT', segment_bucket + '/object2/Z',
swob.HTTPCreated, {}, None)
self.swift.register('DELETE', segment_bucket + '/object2/Z',
swob.HTTPNoContent, {}, None)
self.swift.register('PUT', segment_bucket + '/object2/Z/1',
swob.HTTPCreated, {}, None)
self.swift.register('DELETE', segment_bucket + '/object2/Z/1',
swob.HTTPNoContent, {}, None)
self.swift.register('DELETE', segment_bucket + '/object2/Z/2',
swob.HTTPNoContent, {}, None)
@s3acl
def test_bucket_upload_part(self):
req = Request.blank('/bucket?partNumber=1&uploadId=x',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidRequest')
@s3acl
def test_object_multipart_uploads_list(self):
req = Request.blank('/bucket/object?uploads',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidRequest')
@s3acl
def test_bucket_multipart_uploads_initiate(self):
req = Request.blank('/bucket?uploads',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidRequest')
@s3acl
def test_bucket_list_parts(self):
req = Request.blank('/bucket?uploadId=x',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidRequest')
@s3acl
def test_bucket_multipart_uploads_abort(self):
req = Request.blank('/bucket?uploadId=x',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidRequest')
@s3acl
def test_bucket_multipart_uploads_complete(self):
req = Request.blank('/bucket?uploadId=x',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidRequest')
def _test_bucket_multipart_uploads_GET(self, query=None,
multiparts=None):
segment_bucket = '/v1/AUTH_test/bucket+segments'
objects = multiparts or multiparts_template
objects = map(lambda item: {'name': item[0], 'last_modified': item[1],
'hash': item[2], 'bytes': item[3]},
objects)
object_list = json.dumps(objects)
self.swift.register('GET', segment_bucket, swob.HTTPOk, {},
object_list)
query = '?uploads&' + query if query else '?uploads'
req = Request.blank('/bucket/%s' % query,
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
return self.call_swift3(req)
@s3acl
def test_bucket_multipart_uploads_GET(self):
status, headers, body = self._test_bucket_multipart_uploads_GET()
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(elem.find('Bucket').text, 'bucket')
self.assertEquals(elem.find('KeyMarker').text, None)
self.assertEquals(elem.find('UploadIdMarker').text, None)
self.assertEquals(elem.find('NextUploadIdMarker').text, 'Z')
self.assertEquals(elem.find('MaxUploads').text, '1000')
self.assertEquals(elem.find('IsTruncated').text, 'false')
self.assertEquals(len(elem.findall('Upload')), 4)
objects = [(o[0], o[1][:-3] + 'Z') for o in multiparts_template]
for u in elem.findall('Upload'):
name = u.find('Key').text + '/' + u.find('UploadId').text
initiated = u.find('Initiated').text
self.assertTrue((name, initiated) in objects)
self.assertEquals(u.find('Initiator/ID').text, 'test:tester')
self.assertEquals(u.find('Initiator/DisplayName').text,
'test:tester')
self.assertEquals(u.find('Owner/ID').text, 'test:tester')
self.assertEquals(u.find('Owner/DisplayName').text, 'test:tester')
self.assertEquals(u.find('StorageClass').text, 'STANDARD')
self.assertEquals(status.split()[0], '200')
@s3acl
def test_bucket_multipart_uploads_GET_without_segment_bucket(self):
segment_bucket = '/v1/AUTH_test/bucket+segments'
self.swift.register('GET', segment_bucket, swob.HTTPNotFound, {}, '')
req = Request.blank('/bucket?uploads',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, haeaders, body = self.call_swift3(req)
self.assertEquals(status.split()[0], '200')
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(elem.find('Bucket').text, 'bucket')
self.assertEquals(elem.find('KeyMarker').text, None)
self.assertEquals(elem.find('UploadIdMarker').text, None)
self.assertEquals(elem.find('NextUploadIdMarker').text, None)
self.assertEquals(elem.find('MaxUploads').text, '1000')
self.assertEquals(elem.find('IsTruncated').text, 'false')
self.assertEquals(len(elem.findall('Upload')), 0)
@s3acl
@patch('swift3.request.get_container_info', lambda x, y: {'status': 404})
def test_bucket_multipart_uploads_GET_without_bucket(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNotFound, {}, '')
req = Request.blank('/bucket?uploads',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, haeaders, body = self.call_swift3(req)
self.assertEquals(status.split()[0], '404')
self.assertEquals(self._get_error_code(body), 'NoSuchBucket')
@s3acl
def test_bucket_multipart_uploads_GET_encoding_type_error(self):
query = 'encoding-type=xml'
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
@s3acl
def test_bucket_multipart_uploads_GET_maxuploads(self):
query = 'max-uploads=2'
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(len(elem.findall('Upload/UploadId')), 2)
self.assertEquals(elem.find('NextKeyMarker').text, 'object')
self.assertEquals(elem.find('NextUploadIdMarker').text, 'Y')
self.assertEquals(elem.find('MaxUploads').text, '2')
self.assertEquals(elem.find('IsTruncated').text, 'true')
self.assertEquals(status.split()[0], '200')
@s3acl
def test_bucket_multipart_uploads_GET_str_maxuploads(self):
query = 'max-uploads=invalid'
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
@s3acl
def test_bucket_multipart_uploads_GET_negative_maxuploads(self):
query = 'max-uploads=-1'
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
@s3acl
def test_bucket_multipart_uploads_GET_maxuploads_over_default(self):
query = 'max-uploads=1001'
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(len(elem.findall('Upload/UploadId')), 4)
self.assertEquals(elem.find('NextKeyMarker').text, 'subdir/object')
self.assertEquals(elem.find('NextUploadIdMarker').text, 'Z')
self.assertEquals(elem.find('MaxUploads').text, '1000')
self.assertEquals(elem.find('IsTruncated').text, 'false')
self.assertEquals(status.split()[0], '200')
@s3acl
def test_bucket_multipart_uploads_GET_maxuploads_over_max_32bit_int(self):
query = 'max-uploads=%s' % (MAX_32BIT_INT + 1)
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
@s3acl
def test_bucket_multipart_uploads_GET_with_id_and_key_marker(self):
query = 'upload-id-marker=Y&key-marker=object'
multiparts = \
(('object/Y', '2014-05-07T19:47:53.592270', 'HASH', 2),
('object/Y/1', '2014-05-07T19:47:54.592270', 'HASH', 12),
('object/Y/2', '2014-05-07T19:47:55.592270', 'HASH', 22))
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query, multiparts)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(elem.find('KeyMarker').text, 'object')
self.assertEquals(elem.find('UploadIdMarker').text, 'Y')
self.assertEquals(len(elem.findall('Upload')), 1)
objects = [(o[0], o[1][:-3] + 'Z') for o in multiparts]
for u in elem.findall('Upload'):
name = u.find('Key').text + '/' + u.find('UploadId').text
initiated = u.find('Initiated').text
self.assertTrue((name, initiated) in objects)
self.assertEquals(status.split()[0], '200')
_, path, _ = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEquals(query['format'], 'json')
self.assertEquals(query['limit'], '1001')
self.assertEquals(query['marker'], 'object/Y')
@s3acl
def test_bucket_multipart_uploads_GET_with_key_marker(self):
query = 'key-marker=object'
multiparts = \
(('object/X', '2014-05-07T19:47:50.592270', 'HASH', 1),
('object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 11),
('object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 21),
('object/Y', '2014-05-07T19:47:53.592270', 'HASH', 2),
('object/Y/1', '2014-05-07T19:47:54.592270', 'HASH', 12),
('object/Y/2', '2014-05-07T19:47:55.592270', 'HASH', 22))
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query, multiparts)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(elem.find('KeyMarker').text, 'object')
self.assertEquals(elem.find('NextKeyMarker').text, 'object')
self.assertEquals(elem.find('NextUploadIdMarker').text, 'Y')
self.assertEquals(len(elem.findall('Upload')), 2)
objects = [(o[0], o[1][:-3] + 'Z') for o in multiparts]
for u in elem.findall('Upload'):
name = u.find('Key').text + '/' + u.find('UploadId').text
initiated = u.find('Initiated').text
self.assertTrue((name, initiated) in objects)
self.assertEquals(status.split()[0], '200')
_, path, _ = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEquals(query['format'], 'json')
self.assertEquals(query['limit'], '1001')
self.assertEquals(query['marker'], quote('object/~'))
@s3acl
def test_bucket_multipart_uploads_GET_with_prefix(self):
query = 'prefix=X'
multiparts = \
(('object/X', '2014-05-07T19:47:50.592270', 'HASH', 1),
('object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 11),
('object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 21))
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query, multiparts)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(len(elem.findall('Upload')), 1)
objects = [(o[0], o[1][:-3] + 'Z') for o in multiparts]
for u in elem.findall('Upload'):
name = u.find('Key').text + '/' + u.find('UploadId').text
initiated = u.find('Initiated').text
self.assertTrue((name, initiated) in objects)
self.assertEquals(status.split()[0], '200')
_, path, _ = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEquals(query['format'], 'json')
self.assertEquals(query['limit'], '1001')
self.assertEquals(query['prefix'], 'X')
@s3acl
def test_bucket_multipart_uploads_GET_with_delimiter(self):
query = 'delimiter=/'
multiparts = \
(('object/X', '2014-05-07T19:47:50.592270', 'HASH', 1),
('object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 11),
('object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 21),
('object/Y', '2014-05-07T19:47:50.592270', 'HASH', 2),
('object/Y/1', '2014-05-07T19:47:51.592270', 'HASH', 21),
('object/Y/2', '2014-05-07T19:47:52.592270', 'HASH', 22),
('object/Z', '2014-05-07T19:47:50.592270', 'HASH', 3),
('object/Z/1', '2014-05-07T19:47:51.592270', 'HASH', 31),
('object/Z/2', '2014-05-07T19:47:52.592270', 'HASH', 32),
('subdir/object/X', '2014-05-07T19:47:50.592270', 'HASH', 4),
('subdir/object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 41),
('subdir/object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 42),
('subdir/object/Y', '2014-05-07T19:47:50.592270', 'HASH', 5),
('subdir/object/Y/1', '2014-05-07T19:47:51.592270', 'HASH', 51),
('subdir/object/Y/2', '2014-05-07T19:47:52.592270', 'HASH', 52),
('subdir2/object/Z', '2014-05-07T19:47:50.592270', 'HASH', 6),
('subdir2/object/Z/1', '2014-05-07T19:47:51.592270', 'HASH', 61),
('subdir2/object/Z/2', '2014-05-07T19:47:52.592270', 'HASH', 62))
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query, multiparts)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(len(elem.findall('Upload')), 3)
self.assertEquals(len(elem.findall('CommonPrefixes')), 2)
objects = [(o[0], o[1][:-3] + 'Z') for o in multiparts
if o[0].startswith('o')]
prefixes = set([o[0].split('/')[0] + '/' for o in multiparts
if o[0].startswith('s')])
for u in elem.findall('Upload'):
name = u.find('Key').text + '/' + u.find('UploadId').text
initiated = u.find('Initiated').text
self.assertTrue((name, initiated) in objects)
for p in elem.findall('CommonPrefixes'):
prefix = p.find('Prefix').text
self.assertTrue(prefix in prefixes)
self.assertEquals(status.split()[0], '200')
_, path, _ = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEquals(query['format'], 'json')
self.assertEquals(query['limit'], '1001')
self.assertTrue(query.get('delimiter') is None)
@s3acl
def test_bucket_multipart_uploads_GET_with_multi_chars_delimiter(self):
query = 'delimiter=subdir'
multiparts = \
(('object/X', '2014-05-07T19:47:50.592270', 'HASH', 1),
('object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 11),
('object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 21),
('dir/subdir/object/X', '2014-05-07T19:47:50.592270',
'HASH', 3),
('dir/subdir/object/X/1', '2014-05-07T19:47:51.592270',
'HASH', 31),
('dir/subdir/object/X/2', '2014-05-07T19:47:52.592270',
'HASH', 32),
('subdir/object/X', '2014-05-07T19:47:50.592270', 'HASH', 4),
('subdir/object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 41),
('subdir/object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 42),
('subdir/object/Y', '2014-05-07T19:47:50.592270', 'HASH', 5),
('subdir/object/Y/1', '2014-05-07T19:47:51.592270', 'HASH', 51),
('subdir/object/Y/2', '2014-05-07T19:47:52.592270', 'HASH', 52),
('subdir2/object/Z', '2014-05-07T19:47:50.592270', 'HASH', 6),
('subdir2/object/Z/1', '2014-05-07T19:47:51.592270', 'HASH', 61),
('subdir2/object/Z/2', '2014-05-07T19:47:52.592270', 'HASH', 62))
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query, multiparts)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(len(elem.findall('Upload')), 1)
self.assertEquals(len(elem.findall('CommonPrefixes')), 2)
objects = [(o[0], o[1][:-3] + 'Z') for o in multiparts
if o[0].startswith('object')]
prefixes = ('dir/subdir', 'subdir')
for u in elem.findall('Upload'):
name = u.find('Key').text + '/' + u.find('UploadId').text
initiated = u.find('Initiated').text
self.assertTrue((name, initiated) in objects)
for p in elem.findall('CommonPrefixes'):
prefix = p.find('Prefix').text
self.assertTrue(prefix in prefixes)
self.assertEquals(status.split()[0], '200')
_, path, _ = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEquals(query['format'], 'json')
self.assertEquals(query['limit'], '1001')
self.assertTrue(query.get('delimiter') is None)
@s3acl
def test_bucket_multipart_uploads_GET_with_prefix_and_delimiter(self):
query = 'prefix=dir/&delimiter=/'
multiparts = \
(('dir/subdir/object/X', '2014-05-07T19:47:50.592270',
'HASH', 4),
('dir/subdir/object/X/1', '2014-05-07T19:47:51.592270',
'HASH', 41),
('dir/subdir/object/X/2', '2014-05-07T19:47:52.592270',
'HASH', 42),
('dir/object/X', '2014-05-07T19:47:50.592270', 'HASH', 5),
('dir/object/X/1', '2014-05-07T19:47:51.592270', 'HASH', 51),
('dir/object/X/2', '2014-05-07T19:47:52.592270', 'HASH', 52))
status, headers, body = \
self._test_bucket_multipart_uploads_GET(query, multiparts)
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEquals(len(elem.findall('Upload')), 1)
self.assertEquals(len(elem.findall('CommonPrefixes')), 1)
objects = [(o[0], o[1][:-3] + 'Z') for o in multiparts
if o[0].startswith('dir/o')]
prefixes = ['dir/subdir/']
for u in elem.findall('Upload'):
name = u.find('Key').text + '/' + u.find('UploadId').text
initiated = u.find('Initiated').text
self.assertTrue((name, initiated) in objects)
for p in elem.findall('CommonPrefixes'):
prefix = p.find('Prefix').text
self.assertTrue(prefix in prefixes)
self.assertEquals(status.split()[0], '200')
_, path, _ = self.swift.calls_with_headers[-1]
path, query_string = path.split('?', 1)
query = {}
for q in query_string.split('&'):
key, arg = q.split('=')
query[key] = arg
self.assertEquals(query['format'], 'json')
self.assertEquals(query['limit'], '1001')
self.assertEquals(query['prefix'], 'dir/')
self.assertTrue(query.get('delimiter') is None)
@patch('swift3.controllers.multi_upload.unique_id', lambda: 'X')
def test_object_multipart_upload_initiate(self):
req = Request.blank('/bucket/object?uploads',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization':
'AWS test:tester:hmac',
'x-amz-meta-foo': 'bar'})
status, headers, body = self.call_swift3(req)
fromstring(body, 'InitiateMultipartUploadResult')
self.assertEquals(status.split()[0], '200')
_, _, req_headers = self.swift.calls_with_headers[-1]
self.assertEquals(req_headers.get('X-Object-Meta-Foo'), 'bar')
@s3acl(s3acl_only=True)
@patch('swift3.controllers.multi_upload.unique_id', lambda: 'X')
def test_object_multipart_upload_initiate_s3acl(self):
req = Request.blank('/bucket/object?uploads',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization':
'AWS test:tester:hmac',
'x-amz-acl': 'public-read',
'x-amz-meta-foo': 'bar'})
status, headers, body = self.call_swift3(req)
fromstring(body, 'InitiateMultipartUploadResult')
self.assertEquals(status.split()[0], '200')
_, _, req_headers = self.swift.calls_with_headers[-1]
self.assertEquals(req_headers.get('X-Object-Meta-Foo'), 'bar')
tmpacl_header = req_headers.get(sysmeta_header('object', 'tmpacl'))
self.assertTrue(tmpacl_header)
acl_header = encode_acl('object',
ACLPublicRead(Owner('test:tester',
'test:tester')))
self.assertEquals(acl_header.get(sysmeta_header('object', 'acl')),
tmpacl_header)
@patch('swift3.controllers.multi_upload.unique_id', lambda: 'X')
def test_object_multipart_upload_initiate_without_bucket(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNotFound, {}, None)
req = Request.blank('/bucket/object?uploads',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization':
'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(status.split()[0], '404')
self.assertEquals(self._get_error_code(body), 'NoSuchBucket')
@s3acl
def test_object_multipart_upload_complete_error(self):
malformed_xml = 'malformed_XML'
req = Request.blank('/bucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac'},
body=malformed_xml)
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'MalformedXML')
# without target bucket
req = Request.blank('/nobucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac'},
body=xml)
with patch('swift3.request.get_container_info',
lambda x, y: {'status': 404}):
self.swift.register('HEAD', '/v1/AUTH_test/nobucket',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'NoSuchBucket')
def test_object_multipart_upload_complete(self):
req = Request.blank('/bucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac'},
body=xml)
status, headers, body = self.call_swift3(req)
fromstring(body, 'CompleteMultipartUploadResult')
self.assertEquals(status.split()[0], '200')
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEquals(headers.get('X-Object-Meta-Foo'), 'bar')
self.assertEquals(headers.get('Content-Type'), 'baz/quux')
@s3acl(s3acl_only=True)
def test_object_multipart_upload_complete_s3acl(self):
acl_headers = encode_acl('object', ACLPublicRead(Owner('test:tester',
'test:tester')))
headers = {}
headers[sysmeta_header('object', 'tmpacl')] = \
acl_headers.get(sysmeta_header('object', 'acl'))
headers['X-Object-Meta-Foo'] = 'bar'
headers['Content-Type'] = 'baz/quux'
self.swift.register('HEAD', '/v1/AUTH_test/bucket+segments/object/X',
swob.HTTPOk, headers, None)
req = Request.blank('/bucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac'},
body=xml)
status, headers, body = self.call_swift3(req)
fromstring(body, 'CompleteMultipartUploadResult')
self.assertEquals(status.split()[0], '200')
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEquals(headers.get('X-Object-Meta-Foo'), 'bar')
self.assertEquals(headers.get('Content-Type'), 'baz/quux')
self.assertEquals(tostring(ACLPublicRead(Owner('test:tester',
'test:tester')).elem()),
tostring(decode_acl('object', headers).elem()))
@s3acl
def test_object_multipart_upload_abort_error(self):
req = Request.blank('/bucket/object?uploadId=invalid',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'NoSuchUpload')
# without target bucket
req = Request.blank('/nobucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac'})
with patch('swift3.request.get_container_info',
lambda x, y: {'status': 404}):
self.swift.register('HEAD', '/v1/AUTH_test/nobucket',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'NoSuchBucket')
@s3acl
def test_object_multipart_upload_abort(self):
req = Request.blank('/bucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(status.split()[0], '204')
@s3acl
@patch('swift3.request.get_container_info', lambda x, y: {'status': 204})
def test_object_upload_part_error(self):
# without upload id
req = Request.blank('/bucket/object?partNumber=1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'},
body='part object')
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
# invalid part number
req = Request.blank('/bucket/object?partNumber=invalid&uploadId=X',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'},
body='part object')
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
# part number must be > 0
req = Request.blank('/bucket/object?partNumber=0&uploadId=X',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'},
body='part object')
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
# part number must be < 1000
req = Request.blank('/bucket/object?partNumber=1001&uploadId=X',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'},
body='part object')
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
# without target bucket
req = Request.blank('/nobucket/object?partNumber=1&uploadId=X',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'},
body='part object')
with patch('swift3.request.get_container_info',
lambda x, y: {'status': 404}):
self.swift.register('HEAD', '/v1/AUTH_test/nobucket',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'NoSuchBucket')
@s3acl
def test_object_upload_part(self):
req = Request.blank('/bucket/object?partNumber=1&uploadId=X',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac'},
body='part object')
status, headers, body = self.call_swift3(req)
self.assertEquals(status.split()[0], '200')
@s3acl
def test_object_list_parts_error(self):
req = Request.blank('/bucket/object?uploadId=invalid',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'NoSuchUpload')
# without target bucket
req = Request.blank('/nobucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
with patch('swift3.request.get_container_info',
lambda x, y: {'status': 404}):
self.swift.register('HEAD', '/v1/AUTH_test/nobucket',
swob.HTTPNotFound, {}, None)
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'NoSuchBucket')
@s3acl
def test_object_list_parts(self):
req = Request.blank('/bucket/object?uploadId=X',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(elem.find('Bucket').text, 'bucket')
self.assertEquals(elem.find('Key').text, 'object')
self.assertEquals(elem.find('UploadId').text, 'X')
self.assertEquals(elem.find('Initiator/ID').text, 'test:tester')
self.assertEquals(elem.find('Initiator/ID').text, 'test:tester')
self.assertEquals(elem.find('Owner/ID').text, 'test:tester')
self.assertEquals(elem.find('Owner/ID').text, 'test:tester')
self.assertEquals(elem.find('StorageClass').text, 'STANDARD')
self.assertEquals(elem.find('PartNumberMarker').text, '0')
self.assertEquals(elem.find('NextPartNumberMarker').text, '2')
self.assertEquals(elem.find('MaxParts').text, '1000')
self.assertEquals(elem.find('IsTruncated').text, 'false')
self.assertEquals(len(elem.findall('Part')), 2)
for p in elem.findall('Part'):
partnum = int(p.find('PartNumber').text)
self.assertEquals(p.find('LastModified').text,
objects_template[partnum - 1][1][:-3]
+ 'Z')
self.assertEquals(p.find('ETag').text.strip(),
'"%s"' % objects_template[partnum - 1][2])
self.assertEquals(p.find('Size').text,
str(objects_template[partnum - 1][3]))
self.assertEquals(status.split()[0], '200')
def test_object_list_parts_encoding_type(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket+segments/object@@/X',
swob.HTTPOk, {}, None)
req = Request.blank('/bucket/object@@?uploadId=X&encoding-type=url',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(elem.find('Key').text, quote('object@@'))
self.assertEquals(elem.find('EncodingType').text, 'url')
self.assertEquals(status.split()[0], '200')
def test_object_list_parts_without_encoding_type(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket+segments/object@@/X',
swob.HTTPOk, {}, None)
req = Request.blank('/bucket/object@@?uploadId=X',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(elem.find('Key').text, 'object@@')
self.assertEquals(status.split()[0], '200')
def test_object_list_parts_encoding_type_error(self):
req = Request.blank('/bucket/object?uploadId=X&encoding-type=xml',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_list_parts_max_parts(self):
req = Request.blank('/bucket/object?uploadId=X&max-parts=1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(elem.find('IsTruncated').text, 'true')
self.assertEquals(len(elem.findall('Part')), 1)
self.assertEquals(status.split()[0], '200')
def test_object_list_parts_str_max_parts(self):
req = Request.blank('/bucket/object?uploadId=X&max-parts=invalid',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_list_parts_negative_max_parts(self):
req = Request.blank('/bucket/object?uploadId=X&max-parts=-1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_list_parts_over_max_parts(self):
req = Request.blank('/bucket/object?uploadId=X&max-parts=%d' %
(CONF.max_parts_listing + 1),
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(elem.find('Bucket').text, 'bucket')
self.assertEquals(elem.find('Key').text, 'object')
self.assertEquals(elem.find('UploadId').text, 'X')
self.assertEquals(elem.find('Initiator/ID').text, 'test:tester')
self.assertEquals(elem.find('Owner/ID').text, 'test:tester')
self.assertEquals(elem.find('StorageClass').text, 'STANDARD')
self.assertEquals(elem.find('PartNumberMarker').text, '0')
self.assertEquals(elem.find('NextPartNumberMarker').text, '2')
self.assertEquals(elem.find('MaxParts').text, '1000')
self.assertEquals(elem.find('IsTruncated').text, 'false')
self.assertEquals(len(elem.findall('Part')), 2)
for p in elem.findall('Part'):
partnum = int(p.find('PartNumber').text)
self.assertEquals(p.find('LastModified').text,
objects_template[partnum - 1][1][:-3]
+ 'Z')
self.assertEquals(p.find('ETag').text,
'"%s"' % objects_template[partnum - 1][2])
self.assertEquals(p.find('Size').text,
str(objects_template[partnum - 1][3]))
self.assertEquals(status.split()[0], '200')
def test_object_list_parts_over_max_32bit_int(self):
req = Request.blank('/bucket/object?uploadId=X&max-parts=%d' %
(MAX_32BIT_INT + 1),
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_list_parts_with_part_number_marker(self):
req = Request.blank('/bucket/object?uploadId=X&'
'part-number-marker=1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(len(elem.findall('Part')), 1)
self.assertEquals(elem.find('Part/PartNumber').text, '2')
self.assertEquals(elem.find('PartNumberMarker').text, '1')
self.assertEquals(status.split()[0], '200')
def test_object_list_parts_str_part_number_marker(self):
req = Request.blank('/bucket/object?uploadId=X&part-number-marker='
'invalid',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_list_parts_negative_part_number_marker(self):
req = Request.blank('/bucket/object?uploadId=X&part-number-marker='
'-1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_list_parts_over_part_number_marker(self):
part_number_marker = str(CONF.max_upload_part_num + 1)
req = Request.blank('/bucket/object?uploadId=X&'
'part-number-marker=%s' % part_number_marker,
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(len(elem.findall('Part')), 0)
self.assertEquals(elem.find('PartNumberMarker').text,
part_number_marker)
self.assertEquals(status.split()[0], '200')
def test_object_list_parts_over_max_32bit_int_part_number_marker(self):
req = Request.blank('/bucket/object?uploadId=X&part-number-marker='
'%s' % ((MAX_32BIT_INT + 1)),
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_list_parts_same_max_marts_as_objects_num(self):
req = Request.blank('/bucket/object?uploadId=X&max-parts=2',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac'})
status, headers, body = self.call_swift3(req)
elem = fromstring(body, 'ListPartsResult')
self.assertEquals(len(elem.findall('Part')), 2)
self.assertEquals(status.split()[0], '200')
def _test_for_s3acl(self, method, query, account, hasObj=True, body=None):
path = '/bucket%s' % ('/object' + query if hasObj else query)
req = Request.blank(path,
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS %s:hmac' % account},
body=body)
return self.call_swift3(req)
@s3acl(s3acl_only=True)
def test_upload_part_acl_without_permission(self):
status, headers, body = \
self._test_for_s3acl('PUT', '?partNumber=1&uploadId=X',
'test:other')
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_upload_part_acl_with_write_permission(self):
status, headers, body = \
self._test_for_s3acl('PUT', '?partNumber=1&uploadId=X',
'test:write')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_upload_part_acl_with_fullcontrol_permission(self):
status, headers, body = \
self._test_for_s3acl('PUT', '?partNumber=1&uploadId=X',
'test:full_control')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_list_multipart_uploads_acl_without_permission(self):
status, headers, body = \
self._test_for_s3acl('GET', '?uploads', 'test:other',
hasObj=False)
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_list_multipart_uploads_acl_with_read_permission(self):
status, headers, body = \
self._test_for_s3acl('GET', '?uploads', 'test:read',
hasObj=False)
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_list_multipart_uploads_acl_with_fullcontrol_permission(self):
status, headers, body = \
self._test_for_s3acl('GET', '?uploads', 'test:full_control',
hasObj=False)
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
@patch('swift3.controllers.multi_upload.unique_id', lambda: 'X')
def test_initiate_multipart_upload_acl_without_permission(self):
status, headers, body = \
self._test_for_s3acl('POST', '?uploads', 'test:other')
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
@patch('swift3.controllers.multi_upload.unique_id', lambda: 'X')
def test_initiate_multipart_upload_acl_with_write_permission(self):
status, headers, body = \
self._test_for_s3acl('POST', '?uploads', 'test:write')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
@patch('swift3.controllers.multi_upload.unique_id', lambda: 'X')
def test_initiate_multipart_upload_acl_with_fullcontrol_permission(self):
status, headers, body = \
self._test_for_s3acl('POST', '?uploads', 'test:full_control')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_list_parts_acl_without_permission(self):
status, headers, body = \
self._test_for_s3acl('GET', '?uploadId=X', 'test:other')
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_list_parts_acl_with_read_permission(self):
status, headers, body = \
self._test_for_s3acl('GET', '?uploadId=X', 'test:read')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_list_parts_acl_with_fullcontrol_permission(self):
status, headers, body = \
self._test_for_s3acl('GET', '?uploadId=X', 'test:full_control')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_abort_multipart_upload_acl_without_permission(self):
status, headers, body = \
self._test_for_s3acl('DELETE', '?uploadId=X', 'test:other')
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_abort_multipart_upload_acl_with_write_permission(self):
status, headers, body = \
self._test_for_s3acl('DELETE', '?uploadId=X', 'test:write')
self.assertEquals(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_abort_multipart_upload_acl_with_fullcontrol_permission(self):
status, headers, body = \
self._test_for_s3acl('DELETE', '?uploadId=X', 'test:full_control')
self.assertEquals(status.split()[0], '204')
@s3acl(s3acl_only=True)
def test_complete_multipart_upload_acl_without_permission(self):
status, headers, body = \
self._test_for_s3acl('POST', '?uploadId=X', 'test:other',
body=xml)
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_complete_multipart_upload_acl_with_write_permission(self):
status, headers, body = \
self._test_for_s3acl('POST', '?uploadId=X', 'test:write',
body=xml)
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_complete_multipart_upload_acl_with_fullcontrol_permission(self):
status, headers, body = \
self._test_for_s3acl('POST', '?uploadId=X', 'test:full_control',
body=xml)
self.assertEquals(status.split()[0], '200')
def _test_copy_for_s3acl(self, account, src_permission=None,
src_path='/src_bucket/src_obj',
head_resp=swob.HTTPOk, put_header={}):
owner = 'test:tester'
grants = [Grant(User(account), src_permission)] \
if src_permission else [Grant(User(owner), 'FULL_CONTROL')]
src_o_headers = encode_acl('object', ACL(Owner(owner, owner), grants))
src_o_headers.update({'last-modified': self.last_modified})
self.swift.register('HEAD', '/v1/AUTH_test/src_bucket/src_obj',
head_resp, src_o_headers, None)
put_headers = {'Authorization': 'AWS %s:hmac' % account,
'X-Amz-Copy-Source': src_path}
put_headers.update(put_header)
req = Request.blank(
'/bucket/object?partNumber=1&uploadId=X',
environ={'REQUEST_METHOD': 'PUT'},
headers=put_headers)
return self.call_swift3(req)
@s3acl
def test_upload_part_copy(self):
last_modified = '2014-04-01T12:00:00'
status, headers, body = \
self._test_copy_for_s3acl('test:tester')
self.assertEquals(status.split()[0], '200')
self.assertEquals(headers['Content-Type'], 'application/xml')
self.assertTrue(headers.get('etag') is None)
elem = fromstring(body, 'CopyPartResult')
self.assertEquals(elem.find('LastModified').text, last_modified)
self.assertEquals(elem.find('ETag').text, '"%s"' % self.etag)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEquals(headers['X-Copy-From'], '/src_bucket/src_obj')
self.assertEquals(headers['Content-Length'], '0')
@s3acl(s3acl_only=True)
def test_upload_part_copy_acl_with_owner_permission(self):
status, headers, body = \
self._test_copy_for_s3acl('test:tester')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_upload_part_copy_acl_without_permission(self):
status, headers, body = \
self._test_copy_for_s3acl('test:other', 'READ')
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_upload_part_copy_acl_with_write_permission(self):
status, headers, body = \
self._test_copy_for_s3acl('test:write', 'READ')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_upload_part_copy_acl_with_fullcontrol_permission(self):
status, headers, body = \
self._test_copy_for_s3acl('test:full_control', 'READ')
self.assertEquals(status.split()[0], '200')
@s3acl(s3acl_only=True)
def test_upload_part_copy_acl_without_src_permission(self):
status, headers, body = \
self._test_copy_for_s3acl('test:write', 'WRITE')
self.assertEquals(status.split()[0], '403')
@s3acl(s3acl_only=True)
def test_upload_part_copy_acl_invalid_source(self):
status, headers, body = \
self._test_copy_for_s3acl('test:write', 'WRITE', '')
self.assertEquals(status.split()[0], '400')
status, headers, body = \
self._test_copy_for_s3acl('test:write', 'WRITE', '/')
self.assertEquals(status.split()[0], '400')
status, headers, body = \
self._test_copy_for_s3acl('test:write', 'WRITE', '/bucket')
self.assertEquals(status.split()[0], '400')
status, headers, body = \
self._test_copy_for_s3acl('test:write', 'WRITE', '/bucket/')
self.assertEquals(status.split()[0], '400')
@s3acl
def test_upload_part_copy_headers_error(self):
account = 'test:tester'
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag}
status, header, body = \
self._test_copy_for_s3acl(account,
head_resp=swob.HTTPPreconditionFailed,
put_header=header)
self.assertEquals(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-None-Match': etag}
status, header, body = \
self._test_copy_for_s3acl(account,
head_resp=swob.HTTPNotModified,
put_header=header)
self.assertEquals(self._get_error_code(body), 'PreconditionFailed')
header = {'X-Amz-Copy-Source-If-Modified-Since': last_modified_since}
status, header, body = \
self._test_copy_for_s3acl(account,
head_resp=swob.HTTPNotModified,
put_header=header)
self.assertEquals(self._get_error_code(body), 'PreconditionFailed')
header = \
{'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since}
status, header, body = \
self._test_copy_for_s3acl(account,
head_resp=swob.HTTPPreconditionFailed,
put_header=header)
self.assertEquals(self._get_error_code(body), 'PreconditionFailed')
def test_upload_part_copy_headers_with_match(self):
account = 'test:tester'
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since}
status, header, body = \
self._test_copy_for_s3acl(account, put_header=header)
self.assertEquals(status.split()[0], '200')
self.assertEquals(len(self.swift.calls_with_headers), 4)
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEquals(headers['If-Match'], etag)
self.assertEquals(headers['If-Modified-Since'], last_modified_since)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
@s3acl(s3acl_only=True)
def test_upload_part_copy_headers_with_match_and_s3acl(self):
account = 'test:tester'
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since}
status, header, body = \
self._test_copy_for_s3acl(account, put_header=header)
self.assertEquals(status.split()[0], '200')
self.assertEquals(len(self.swift.calls_with_headers), 4)
# Before the check of the copy source in the case of s3acl is valid,
# Swift3 check the bucket write permissions and the object existence
# of the destination.
_, _, headers = self.swift.calls_with_headers[-3]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEquals(headers['If-Match'], etag)
self.assertEquals(headers['If-Modified-Since'], last_modified_since)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
def test_upload_part_copy_headers_with_not_match(self):
account = 'test:tester'
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since}
status, header, body = \
self._test_copy_for_s3acl(account, put_header=header)
self.assertEquals(status.split()[0], '200')
self.assertEquals(len(self.swift.calls_with_headers), 4)
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEquals(headers['If-None-Match'], etag)
self.assertEquals(headers['If-Unmodified-Since'], last_modified_since)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
@s3acl(s3acl_only=True)
def test_upload_part_copy_headers_with_not_match_and_s3acl(self):
account = 'test:tester'
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 12:00:00 GMT'
header = {'X-Amz-Copy-Source-If-None-Match': etag,
'X-Amz-Copy-Source-If-Unmodified-Since': last_modified_since}
status, header, body = \
self._test_copy_for_s3acl(account, put_header=header)
self.assertEquals(status.split()[0], '200')
self.assertEquals(len(self.swift.calls_with_headers), 4)
# Before the check of the copy source in the case of s3acl is valid,
# Swift3 check the bucket write permissions and the object existence
# of the destination.
_, _, headers = self.swift.calls_with_headers[-3]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEquals(headers['If-None-Match'], etag)
self.assertEquals(headers['If-Unmodified-Since'], last_modified_since)
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-None-Match') is None)
self.assertTrue(headers.get('If-Unmodified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
#
# Copyright (C) 2015 Joseph W. Metcalf
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby
# granted, provided that the above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys
import defs
import argparse
import string
import logging
import datetime
import time
import subprocess
def alert_start(JJJHHMM, format='%j%H%M'):
import calendar
"""Convert EAS date string to datetime format"""
utc_dt=datetime.datetime.strptime(JJJHHMM, format).replace(datetime.datetime.utcnow().year)
timestamp = calendar.timegm(utc_dt.timetuple())
return datetime.datetime.fromtimestamp(timestamp)
def fn_dt(dt, format='%I:%M %p'):
"""Return formated datetime"""
return dt.strftime(format)
# ZCZC-ORG-EEE-PSSCCC-PSSCCC+TTTT-JJJHHMM-LLLLLLLL-
def format_error(info=''):
logging.warning(' '.join(['INVALID FORMAT', info]))
def time_str(x, type='hour'):
if x==1:
return ''.join([str(x),' ',type])
elif x>=2:
return ''.join([str(x),' ',type,'s'])
def get_length(TTTT):
hh,mm=TTTT[:2],TTTT[2:]
return ' '.join(filter(None, (time_str(int(hh)), time_str(int(mm), type='minute'))))
def county_decode(input, COUNTRY):
"""Convert SAME county/geographic code to text list"""
P, SS, CCC, SSCCC=input[:1], input[1:3], input[3:], input[1:]
if COUNTRY=='US':
if SSCCC in defs.SAME_CTYB:
SAME__LOC=defs.SAME_LOCB
else:
SAME__LOC=defs.SAME_LOCA
if CCC=='000':
county='ALL'
else:
county=defs.US_SAME_CODE[SSCCC]
return [' '.join(filter(None, (SAME__LOC[P], county))), defs.US_SAME_AREA[SS]]
else:
if CCC=='000':
county='ALL'
else:
county=defs.CA_SAME_CODE[SSCCC]
return [county, defs.CA_SAME_AREA[SS]]
def get_division(input, COUNTRY='US'):
if COUNTRY=='US':
try:
DIVISION=defs.FIPS_DIVN[input]
if not DIVISION:
DIVISION='areas'
except:
DIVISION='counties'
else:
DIVISION='areas'
return DIVISION
def get_event(input):
event=None
try:
event=defs.SAME__EEE[input]
except:
if input[2:] in 'WAESTM':
event=defs.SAME_UEEE[input[2:]]
return event
def printf(output=''):
output=output.lstrip(' ')
output=' '.join(output.split())
sys.stdout.write(''.join([output, '\n']))
def alert_end(JJJHHMM, TTTT):
alertstart = alert_start(JJJHHMM)
delta = datetime.timedelta(hours = int(TTTT[:2]), minutes=int(TTTT[2:]))
return alertstart + delta
def get_location(STATION=None, TYPE=None):
location=''
if TYPE=='NWS':
try:
location=defs.ICAO_LIST[STATION].title()
except:
pass
return location
def check_watch(watch_list, PSSCCC_list, event_list, EEE):
if not watch_list:
watch_list=PSSCCC_list
if not event_list:
event_list=[EEE]
w, p = [],[]
w += [item[1:] for item in watch_list]
p += [item[1:] for item in PSSCCC_list]
if (set(w) & set(p)) and EEE in event_list:
return True
else:
return False
def format_message(command, ORG='WXR', EEE='RWT',PSSCCC=[],TTTT='0030',JJJHHMM='0010000', STATION=None, TYPE=None, LLLLLLLL=None, COUNTRY='US', LANG='EN', MESSAGE=None,**kwargs):
return command.format(ORG=ORG, EEE=EEE, TTTT=TTTT, JJJHHMM=JJJHHMM, STATION=STATION, TYPE=TYPE, LLLLLLLL=LLLLLLLL, COUNTRY=COUNTRY, LANG=LANG, event=get_event(EEE), end=fn_dt(alert_end(JJJHHMM,TTTT)), start=fn_dt(alert_start(JJJHHMM)), organization=defs.SAME__ORG[ORG]['NAME'][COUNTRY], PSSCCC='-'.join(PSSCCC), location=get_location(STATION, TYPE), date=fn_dt(datetime.datetime.now(),'%c'), length=get_length(TTTT), MESSAGE=MESSAGE, **kwargs)
def readable_message(ORG='WXR',EEE='RWT',PSSCCC=[],TTTT='0030',JJJHHMM='0010000',STATION=None, TYPE=None, LLLLLLLL=None, COUNTRY='US', LANG='EN'):
import textwrap
printf()
location=get_location(STATION, TYPE)
MSG=[format_message(defs.MSG__TEXT[LANG]['MSG1'], ORG=ORG, EEE=EEE, TTTT=TTTT, JJJHHMM=JJJHHMM, STATION=STATION, TYPE=TYPE, COUNTRY=COUNTRY, LANG=LANG, article=defs.MSG__TEXT[LANG][defs.SAME__ORG[ORG]['ARTICLE'][COUNTRY]].title(), has=defs.MSG__TEXT[LANG]['HAS'] if not defs.SAME__ORG[ORG]['PLURAL'] else defs.MSG__TEXT[LANG]['HAVE'], preposition=defs.MSG__TEXT[LANG]['IN'] if location !='' else '')]
current_state=None
for idx, item in enumerate(PSSCCC):
county, state=county_decode(item, COUNTRY)
if current_state != state:
DIVISION=get_division(PSSCCC[idx][1:3], COUNTRY)
output=defs.MSG__TEXT[LANG]['MSG2'].format(conjunction='' if idx == 0 else defs.MSG__TEXT[LANG]['AND'], state=state, division=DIVISION)
MSG+=[''.join(output)]
current_state=state
MSG+=[defs.MSG__TEXT[LANG]['MSG3'].format(county=county if county != state else defs.MSG__TEXT[LANG]['ALL'].upper(),punc=',' if idx !=len(PSSCCC)-1 else '.')]
MSG+=[defs.MSG__TEXT[LANG]['MSG4']]
MSG+=[''.join(['(',LLLLLLLL,')'])]
output=textwrap.wrap(''.join(MSG), 78)
for item in output:
printf(item)
printf()
return ''.join(MSG)
def clean_msg(same):
valid_chars=''.join([string.ascii_uppercase, string.digits, '+-/*'])
same = same.upper() # Uppercase
msgidx=same.find('ZCZC')
if msgidx != -1:
same=same[msgidx:] # Left Offset
same = ''.join(same.split()) # Remove whitespace
same = ''.join(filter(lambda x: x in valid_chars, same)) # Valid ASCII codes only
slen= len(same)-1
if same[slen] !='-':
ridx=same.rfind('-')
offset = slen-ridx
if (offset <= 8):
same=''.join([same.ljust(slen+(8-offset)+1,'?'), '-']) # Add final dash and/or pad location field
return same
def same_decode(same, lang, same_watch=None, event_watch=None, text=True, call=None, command=None):
try:
same = clean_msg(same)
except:
return
msgidx=same.find('ZCZC')
if msgidx != -1:
logging.debug('-' * 30)
logging.debug(' '.join([' Identifer found >','ZCZC']))
S1, S2 = None, None
try:
S1,S2=same[msgidx:].split('+')
except:
format_error()
return
try:
ZCZC, ORG, EEE, PSSCCC=S1.split('-',3)
except:
format_error()
return
logging.debug(' '.join([' Originator found >',ORG]))
logging.debug(' '.join([' Event Code found >',EEE]))
try:
PSSCCC_list=PSSCCC.split('-')
except:
format_error()
try:
TTTT,JJJHHMM,LLLLLLLL,tail=S2.split('-')
except:
format_error()
return
logging.debug(' '.join([' Purge Time found >',TTTT]))
logging.debug(' '.join([' Date Code found >',JJJHHMM]))
logging.debug(' '.join(['Location Code found >',LLLLLLLL]))
try:
STATION, TYPE=LLLLLLLL.split('/',1)
except:
STATION, TYPE= None, None
format_error()
logging.debug(' '.join([' SAME Codes found >',str(len(PSSCCC_list))]))
US_bad_list=[]
CA_bad_list=[]
for code in PSSCCC_list:
try:
county=defs.US_SAME_CODE[code[1:]]
except KeyError:
US_bad_list.append(code)
try:
county=defs.CA_SAME_CODE[code[1:]]
except KeyError:
CA_bad_list.append(code)
if len(US_bad_list) < len(CA_bad_list):
COUNTRY='US'
if len(US_bad_list) > len(CA_bad_list):
COUNTRY='CA'
if len(US_bad_list) == len(CA_bad_list):
if type=='CA':
COUNTRY='CA'
else:
COUNTRY='US'
if COUNTRY=='CA':
bad_list=CA_bad_list
else:
bad_list=US_bad_list
logging.debug(' '.join(['Invalid Codes found >',str(len(bad_list))]))
logging.debug(' '.join([' Country >',COUNTRY]))
logging.debug('-' * 30)
for code in bad_list:
PSSCCC_list.remove(code)
PSSCCC_list.sort()
if check_watch(same_watch, PSSCCC_list, event_watch, EEE):
if text:
MESSAGE=readable_message(ORG, EEE, PSSCCC_list, TTTT, JJJHHMM, STATION, TYPE, LLLLLLLL, COUNTRY, lang)
if command:
if call:
l_cmd=[]
for cmd in command:
l_cmd.append(format_message(cmd, ORG, EEE, PSSCCC_list, TTTT, JJJHHMM, STATION, TYPE, LLLLLLLL, COUNTRY, lang, MESSAGE))
try:
subprocess.call([call] + l_cmd)
except Exception as detail:
logging.error(detail)
return
pass
else:
f_cmd=format_message(' '.join(command), ORG, EEE, PSSCCC_list, TTTT, JJJHHMM, STATION, TYPE, LLLLLLLL, COUNTRY, lang, MESSAGE)
printf(f_cmd)
else:
msgidx=same.find('NNNN')
if msgidx == -1:
logging.warning('Valid identifer not found.')
else:
logging.debug(' '.join(['End of Message found >','NNNN',str(msgidx)]))
def parse_arguments():
parser = argparse.ArgumentParser(description=defs.DESCRIPTION, prog=defs.PROGRAM, fromfile_prefix_chars='@')
parser.add_argument('--msg', help='message to decode')
parser.add_argument('--same', nargs='*', help='filter by SAME code')
parser.add_argument('--event', nargs='*', help='filter by event code')
parser.add_argument('--lang', default='EN', help='set language')
parser.add_argument('--loglevel', default=40, type=int, choices=[10, 20, 30, 40, 50], help='set log level')
parser.add_argument('--text', dest='text', action='store_true', help='output readable message')
parser.add_argument('--no-text', dest='text', action='store_false', help='disable readable message')
parser.add_argument('--version', action='version', version=' '.join([defs.PROGRAM, defs.VERSION]),help='show version infomation and exit')
parser.add_argument('--call', help='call external command')
parser.add_argument('--command', nargs='*', help='command message')
parser.add_argument('--source', help='source program')
parser.set_defaults(text=True)
args, unknown = parser.parse_known_args()
return args
def main():
args=parse_arguments()
logging.basicConfig(level=args.loglevel,format='%(levelname)s: %(message)s')
if args.msg:
same_decode(args.msg, args.lang, same_watch=args.same, event_watch=args.event, text=args.text, call=args.call, command=args.command)
elif args.source:
try:
source_process = subprocess.Popen(args.source, stdout=subprocess.PIPE)
except Exception as detail:
logging.error(detail)
return
while True:
line = source_process.stdout.readline()
if line:
logging.debug(line)
same_decode(line, args.lang, same_watch=args.same, event_watch=args.event, text=args.text, call=args.call, command=args.command)
else:
while True:
for line in sys.stdin:
logging.debug(line)
same_decode(line, args.lang, same_watch=args.same, event_watch=args.event, text=args.text, call=args.call, command=args.command)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.parsers
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for parser generators.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, \
include, bygroups, using
from pygments.token import Punctuation, Other, Text, Comment, Operator, \
Keyword, Name, String, Number, Whitespace
from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \
ObjectiveCLexer, DLexer
from pygments.lexers.dotnet import CSharpLexer
from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer
from pygments.lexers.web import ActionScriptLexer
__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
#'AntlrCLexer',
'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
'AntlrJavaLexer', "AntlrActionScriptLexer"]
class RagelLexer(RegexLexer):
"""
A pure `Ragel <http://www.complang.org/ragel/>`_ lexer. Use this for
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
*New in Pygments 1.1.*
"""
name = 'Ragel'
aliases = ['ragel']
filenames = []
tokens = {
'whitespace': [
(r'\s+', Whitespace)
],
'comments': [
(r'\#.*$', Comment),
],
'keywords': [
(r'(access|action|alphtype)\b', Keyword),
(r'(getkey|write|machine|include)\b', Keyword),
(r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
(r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
],
'numbers': [
(r'0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
'literals': [
(r'"(\\\\|\\"|[^"])*"', String), # double quote string
(r"'(\\\\|\\'|[^'])*'", String), # single quote string
(r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals
(r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions
],
'identifiers': [
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name.Variable),
],
'operators': [
(r',', Operator), # Join
(r'\||&|-|--', Operator), # Union, Intersection and Subtraction
(r'\.|<:|:>|:>>', Operator), # Concatention
(r':', Operator), # Label
(r'->', Operator), # Epsilon Transition
(r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
(r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
(r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
(r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
(r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
(r'>|@|\$|%', Operator), # Transition Actions and Priorities
(r'\*|\?|\+|{[0-9]*,[0-9]*}', Operator), # Repetition
(r'!|\^', Operator), # Negation
(r'\(|\)', Operator), # Grouping
],
'root': [
include('literals'),
include('whitespace'),
include('comments'),
include('keywords'),
include('numbers'),
include('identifiers'),
include('operators'),
(r'{', Punctuation, 'host'),
(r'=', Operator),
(r';', Punctuation),
],
'host': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^{}\'"/#]+', # exclude unsafe characters
r'[^\\][\\][{}]', # allow escaped { or }
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'\#.*$\n?', # ruby comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
}
class RagelEmbeddedLexer(RegexLexer):
"""
A lexer for `Ragel`_ embedded in a host language file.
This will only highlight Ragel statements. If you want host language
highlighting then call the language-specific Ragel lexer.
*New in Pygments 1.1.*
"""
name = 'Embedded Ragel'
aliases = ['ragel-em']
filenames = ['*.rl']
tokens = {
'root': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^%\'"/#]+', # exclude unsafe characters
r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
# Single Line FSM.
# Please don't put a quoted newline in a single line FSM.
# That's just mean. It will break this.
(r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
using(RagelLexer),
Punctuation, Text)),
# Multi Line FSM.
(r'(%%%%|%%){', Punctuation, 'multi-line-fsm'),
],
'multi-line-fsm': [
(r'(' + r'|'.join(( # keep ragel code in largest possible chunks.
r'(' + r'|'.join((
r'[^}\'"\[/#]', # exclude unsafe characters
r'}(?=[^%]|$)', # } is okay as long as it's not followed by %
r'}%(?=[^%]|$)', # ...well, one %'s okay, just not two...
r'[^\\][\\][{}]', # ...and } is okay if it's escaped
# allow / if it's preceded with one of these symbols
# (ragel EOF actions)
r'(>|\$|%|<|@|<>)/',
# specifically allow regex followed immediately by *
# so it doesn't get mistaken for a comment
r'/(?!\*)(\\\\|\\/|[^/])*/\*',
# allow / as long as it's not followed by another / or by a *
r'/(?=[^/\*]|$)',
# We want to match as many of these as we can in one block.
# Not sure if we need the + sign here,
# does it help performance?
)) + r')+',
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
)) + r')+', using(RagelLexer)),
(r'}%%', Punctuation, '#pop'),
]
}
def analyse_text(text):
return '@LANG: indep' in text or 0.1
class RagelRubyLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Ruby Host'
aliases = ['ragel-ruby', 'ragel-rb']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: ruby' in text
class RagelCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a C host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in C Host'
aliases = ['ragel-c']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCLexer, self).__init__(CLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: c' in text
class RagelDLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a D host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in D Host'
aliases = ['ragel-d']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelDLexer, self).__init__(DLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: d' in text
class RagelCppLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a CPP host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in CPP Host'
aliases = ['ragel-cpp']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCppLexer, self).__init__(CppLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: c++' in text
class RagelObjectiveCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in an Objective C host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Objective C Host'
aliases = ['ragel-objc']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelObjectiveCLexer, self).__init__(ObjectiveCLexer,
RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: objc' in text
class RagelJavaLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
*New in Pygments 1.1.*
"""
name = 'Ragel in Java Host'
aliases = ['ragel-java']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: java' in text
class AntlrLexer(RegexLexer):
"""
Generic `ANTLR`_ Lexer.
Should not be called directly, instead
use DelegatingLexer for your target language.
*New in Pygments 1.1.*
.. _ANTLR: http://www.antlr.org/
"""
name = 'ANTLR'
aliases = ['antlr']
filenames = []
_id = r'[A-Za-z][A-Za-z_0-9]*'
_TOKEN_REF = r'[A-Z][A-Za-z_0-9]*'
_RULE_REF = r'[a-z][A-Za-z_0-9]*'
_STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
_INT = r'[0-9]+'
tokens = {
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*$', Comment),
(r'/\*(.|\n)*?\*/', Comment),
],
'root': [
include('whitespace'),
include('comments'),
(r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
Punctuation)),
# optionsSpec
(r'options\b', Keyword, 'options'),
# tokensSpec
(r'tokens\b', Keyword, 'tokens'),
# attrScope
(r'(scope)(\s*)(' + _id + ')(\s*)({)',
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation), 'action'),
# exception
(r'(catch|finally)\b', Keyword, 'exception'),
# action
(r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)({)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
Name.Label, Whitespace, Punctuation), 'action'),
# rule
(r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', \
bygroups(Keyword, Whitespace, Name.Label, Punctuation),
('rule-alts', 'rule-prelims')),
],
'exception': [
(r'\n', Whitespace, '#pop'),
(r'\s', Whitespace),
include('comments'),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
],
'rule-prelims': [
include('whitespace'),
include('comments'),
(r'returns\b', Keyword),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
# throwsSpec
(r'(throws)(\s+)(' + _id + ')',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(?:(,)(\s*)(' + _id + '))+',
bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws
# optionsSpec
(r'options\b', Keyword, 'options'),
# ruleScopeSpec - scope followed by target language code or name of action
# TODO finish implementing other possibilities for scope
# L173 ANTLRv3.g from ANTLR book
(r'(scope)(\s+)({)', bygroups(Keyword, Whitespace, Punctuation),
'action'),
(r'(scope)(\s+)(' + _id + ')(\s*)(;)',
bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
# ruleAction
(r'(@' + _id + ')(\s*)({)',
bygroups(Name.Label, Whitespace, Punctuation), 'action'),
# finished prelims, go to rule alts!
(r':', Punctuation, '#pop')
],
'rule-alts': [
include('whitespace'),
include('comments'),
# These might need to go in a separate 'block' state triggered by (
(r'options\b', Keyword, 'options'),
(r':', Punctuation),
# literals
(r"'(\\\\|\\'|[^'])*'", String),
(r'"(\\\\|\\"|[^"])*"', String),
(r'<<([^>]|>[^>])>>', String),
# identifiers
# Tokens start with capital letter.
(r'\$?[A-Z_][A-Za-z_0-9]*', Name.Constant),
# Rules start with small letter.
(r'\$?[a-z_][A-Za-z_0-9]*', Name.Variable),
# operators
(r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
(r',', Punctuation),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
(r';', Punctuation, '#pop')
],
'tokens': [
include('whitespace'),
include('comments'),
(r'{', Punctuation),
(r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
+ ')?(\s*)(;)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
String, Whitespace, Punctuation)),
(r'}', Punctuation, '#pop'),
],
'options': [
include('whitespace'),
include('comments'),
(r'{', Punctuation),
(r'(' + _id + r')(\s*)(=)(\s*)(' +
'|'.join((_id, _STRING_LITERAL, _INT, '\*'))+ ')(\s*)(;)',
bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
Text, Whitespace, Punctuation)),
(r'}', Punctuation, '#pop'),
],
'action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^\${}\'"/\\]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# backslashes are okay, as long as we are not backslashing a %
r'\\(?!%)',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'(\\)(%)', bygroups(Punctuation, Other)),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
'nested-arg-action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks.
r'[^\$\[\]\'"/]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
]
}
def analyse_text(text):
return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
# TH: I'm not aware of any language features of C++ that will cause
# incorrect lexing of C files. Antlr doesn't appear to make a distinction,
# so just assume they're C++. No idea how to make Objective C work in the
# future.
#class AntlrCLexer(DelegatingLexer):
# """
# ANTLR with C Target
#
# *New in Pygments 1.1*
# """
#
# name = 'ANTLR With C Target'
# aliases = ['antlr-c']
# filenames = ['*.G', '*.g']
#
# def __init__(self, **options):
# super(AntlrCLexer, self).__init__(CLexer, AntlrLexer, **options)
#
# def analyse_text(text):
# return re.match(r'^\s*language\s*=\s*C\s*;', text)
class AntlrCppLexer(DelegatingLexer):
"""
`ANTLR`_ with CPP Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With CPP Target'
aliases = ['antlr-cpp']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCppLexer, self).__init__(CppLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
class AntlrObjectiveCLexer(DelegatingLexer):
"""
`ANTLR`_ with Objective-C Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With ObjectiveC Target'
aliases = ['antlr-objc']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
class AntlrCSharpLexer(DelegatingLexer):
"""
`ANTLR`_ with C# Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With C# Target'
aliases = ['antlr-csharp', 'antlr-c#']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCSharpLexer, self).__init__(CSharpLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
class AntlrPythonLexer(DelegatingLexer):
"""
`ANTLR`_ with Python Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Python Target'
aliases = ['antlr-python']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPythonLexer, self).__init__(PythonLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
class AntlrJavaLexer(DelegatingLexer):
"""
`ANTLR`_ with Java Target
*New in Pygments 1.1*
"""
name = 'ANTLR With Java Target'
aliases = ['antlr-java']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrJavaLexer, self).__init__(JavaLexer, AntlrLexer,
**options)
def analyse_text(text):
# Antlr language is Java by default
return AntlrLexer.analyse_text(text) and 0.9
class AntlrRubyLexer(DelegatingLexer):
"""
`ANTLR`_ with Ruby Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Ruby Target'
aliases = ['antlr-ruby', 'antlr-rb']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrRubyLexer, self).__init__(RubyLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
class AntlrPerlLexer(DelegatingLexer):
"""
`ANTLR`_ with Perl Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With Perl Target'
aliases = ['antlr-perl']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPerlLexer, self).__init__(PerlLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
class AntlrActionScriptLexer(DelegatingLexer):
"""
`ANTLR`_ with ActionScript Target
*New in Pygments 1.1.*
"""
name = 'ANTLR With ActionScript Target'
aliases = ['antlr-as', 'antlr-actionscript']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
| |
# -*- coding: utf-8
# Collection of functions to create data for testing
import numpy as np
import numpy.matlib
from numpy.polynomial import Polynomial as P
from datetime import datetime
import itertools
from skimage.transform import rotate
from random import uniform
import copy
import nibabel as nib
from spinalcordtoolbox.image import Image, concat_data
from spinalcordtoolbox.resampling import resample_nib
from spinalcordtoolbox.centerline.curve_fitting import bspline, polyfit_1d
# TODO: retrieve os.environ['SCT_DEBUG']
DEBUG = False # Save img_sub
def dummy_blob(size_arr=(9, 9, 9), pixdim=(1, 1, 1), coordvox=None):
"""
Create an image with a non-null voxels at coordinates specified by coordvox.
:param size_arr:
:param pixdim:
:param coordvox: If None: will create a single voxel in the middle of the FOV.
If tuple: (x,y,z): Create single voxel at specified coordinate
If list of tuples: [(x1,y1,z1), (x2,y2,z2)]: Create multiple voxels.
:return: Image object
"""
# nx, ny, nz = size_arr
data = np.zeros(size_arr)
# if not specified, voxel coordinate is set at the middle of the volume
if coordvox is None:
coordvox = tuple([round(i / 2) for i in size_arr])
elif isinstance(coordvox, list):
for icoord in coordvox:
data[icoord] = 1
elif isinstance(coordvox, tuple):
data[coordvox] = 1
else:
ValueError("Wrong type for coordvox")
# Create image with default orientation LPI
affine = np.eye(4)
affine[0:3, 0:3] = affine[0:3, 0:3] * pixdim
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
return img
def dummy_centerline(size_arr=(9, 9, 9), pixdim=(1, 1, 1), subsampling=1, dilate_ctl=0, hasnan=False, zeroslice=[],
outlier=[], orientation='RPI', debug=False):
"""
Create a dummy Image centerline of small size. Return the full and sub-sampled version along z. Voxel resolution
on fully-sampled data is 1x1x1 mm (so, 2x undersampled data along z would have resolution of 1x1x2 mm).
:param size_arr: tuple: (nx, ny, nz)
:param pixdim: tuple: (px, py, pz)
:param subsampling: int >=1. Subsampling factor along z. 1: no subsampling. 2: centerline defined every other z.
:param dilate_ctl: Dilation of centerline. E.g., if dilate_ctl=1, result will be a square of 3x3 per slice.
if dilate_ctl=0, result will be a single pixel per slice.
:param hasnan: Bool: Image has non-numerical values: nan, inf. In this case, do not subsample.
:param zeroslice: list int: zero all slices listed in this param
:param outlier: list int: replace the current point with an outlier at the corner of the image for the slices listed
:param orientation:
:param debug: Bool: Write temp files
:return:
"""
nx, ny, nz = size_arr
# create regularized curve, within X-Z plane, located at y=ny/4, passing through the following points:
x = np.array([round(nx/4.), round(nx/2.), round(3*nx/4.)])
z = np.array([0, round(nz/2.), nz-1])
# we use bspline (instead of poly) in order to avoid bad extrapolation at edges
# see: https://github.com/spinalcordtoolbox/spinalcordtoolbox/pull/2754
xfit, _ = bspline(z, x, range(nz), 10)
# p = P.fit(z, x, 3)
# p = np.poly1d(np.polyfit(z, x, deg=3))
data = np.zeros((nx, ny, nz))
arr_ctl = np.array([xfit.astype(np.int),
[round(ny / 4.)] * len(range(nz)),
range(nz)], dtype=np.uint16)
# Loop across dilation of centerline. E.g., if dilate_ctl=1, result will be a square of 3x3 per slice.
for ixiy_ctl in itertools.product(range(-dilate_ctl, dilate_ctl+1, 1), range(-dilate_ctl, dilate_ctl+1, 1)):
data[(arr_ctl[0] + ixiy_ctl[0]).tolist(),
(arr_ctl[1] + ixiy_ctl[1]).tolist(),
arr_ctl[2].tolist()] = 1
# Zero specified slices
if zeroslice is not []:
data[:, :, zeroslice] = 0
# Add outlier
if outlier is not []:
# First, zero all the slice
data[:, :, outlier] = 0
# Then, add point in the corner
data[0, 0, outlier] = 1
# Create image with default orientation LPI
affine = np.eye(4)
affine[0:3, 0:3] = affine[0:3, 0:3] * pixdim
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
# subsample data
img_sub = img.copy()
img_sub.data = np.zeros((nx, ny, nz))
for iz in range(0, nz, subsampling):
img_sub.data[..., iz] = data[..., iz]
# Add non-numerical values at the top corner of the image
if hasnan:
img.data[0, 0, 0] = np.nan
img.data[1, 0, 0] = np.inf
# Update orientation
img.change_orientation(orientation)
img_sub.change_orientation(orientation)
if debug:
img_sub.save('tmp_dummy_seg_'+datetime.now().strftime("%Y%m%d%H%M%S%f")+'.nii.gz')
return img, img_sub, arr_ctl
def dummy_segmentation(size_arr=(256, 256, 256), pixdim=(1, 1, 1), dtype=np.float64, orientation='LPI',
shape='rectangle', angle_RL=0, angle_AP=0, angle_IS=0, radius_RL=5.0, radius_AP=3.0,
degree=2, interleaved=False, zeroslice=[], debug=False):
"""Create a dummy Image with a ellipse or ones running from top to bottom in the 3rd dimension, and rotate the image
to make sure that compute_csa and compute_shape properly estimate the centerline angle.
:param size_arr: tuple: (nx, ny, nz)
:param pixdim: tuple: (px, py, pz)
:param dtype: Numpy dtype.
:param orientation: Orientation of the image. Default: LPI
:param shape: {'rectangle', 'ellipse'}
:param angle_RL: int: angle around RL axis (in deg)
:param angle_AP: int: angle around AP axis (in deg)
:param angle_IS: int: angle around IS axis (in deg)
:param radius_RL: float: 1st radius. With a, b = 50.0, 30.0 (in mm), theoretical CSA of ellipse is 4712.4
:param radius_AP: float: 2nd radius
:param degree: int: degree of polynomial fit
:param interleaved: bool: create a dummy segmentation simulating interleaved acquisition
:param zeroslice: list int: zero all slices listed in this param
:param debug: Write temp files for debug
:return: img: Image object
"""
# Initialization
padding = 15 # Padding size (isotropic) to avoid edge effect during rotation
# Create a 3d array, with dimensions corresponding to x: RL, y: AP, z: IS
nx, ny, nz = [int(size_arr[i] * pixdim[i]) for i in range(3)]
data = np.zeros((nx, ny, nz))
xx, yy = np.mgrid[:nx, :ny]
# Create a dummy segmentation using polynomial function
# create regularized curve, within Y-Z plane (A-P), located at x=nx/2:
x = [round(nx / 2.)] * len(range(nz))
# and passing through the following points:
#y = np.array([round(ny / 4.), round(ny / 2.), round(3 * ny / 4.)]) # oblique curve (changing AP points across SI)
y = [round(ny / 2.), round(ny / 2.), round(ny / 2.)] # straight curve (same location of AP across SI)
z = np.array([0, round(nz / 2.), nz - 1])
# we use poly (instead of bspline) in order to allow change of scalar for each term of polynomial function
p = np.polynomial.Polynomial.fit(z, y, deg=degree)
# create two polynomial fits, by choosing random scalar for each term of both polynomial functions and then
# interleave these two fits (one for odd slices, second one for even slices)
if interleaved:
p_even = copy.copy(p)
p_odd = copy.copy(p)
# choose random scalar for each term of polynomial function
# even slices
p_even.coef = [element * uniform(0.5, 1) for element in p_even.coef]
# odd slices
p_odd.coef = [element * uniform(0.5, 1) for element in p_odd.coef]
# performs two polynomial fits - one will serve for even slices, second one for odd slices
yfit_even = np.round(p_even(range(nz)))
yfit_odd = np.round(p_odd(range(nz)))
# combine even and odd polynomial fits
yfit = np.zeros(nz)
yfit[0:nz:2] = yfit_even[0:nz:2]
yfit[1:nz:2] = yfit_odd[1:nz:2]
# IF INTERLEAVED=FALSE, perform only one polynomial fit without modification of term's scalars
else:
yfit = np.round(p(range(nz))) # has to be rounded for correct float -> int conversion in next step
yfit = yfit.astype(np.int)
# loop across slices and add object
for iz in range(nz):
if shape == 'rectangle': # theoretical CSA: (a*2+1)(b*2+1)
data[:, :, iz] = ((abs(xx - x[iz]) <= radius_RL) & (abs(yy - yfit[iz]) <= radius_AP)) * 1
if shape == 'ellipse':
data[:, :, iz] = (((xx - x[iz]) / radius_RL) ** 2 + ((yy - yfit[iz]) / radius_AP) ** 2 <= 1) * 1
# Pad to avoid edge effect during rotation
data = np.pad(data, padding, 'reflect')
# ROTATION ABOUT IS AXIS
# rotate (in deg), and re-grid using linear interpolation
data_rotIS = rotate(data, angle_IS, resize=False, center=None, order=1, mode='constant', cval=0, clip=False,
preserve_range=False)
# ROTATION ABOUT RL AXIS
# Swap x-z axes (to make a rotation within y-z plane, because rotate will apply rotation on the first 2 dims)
data_rotIS_swap = data_rotIS.swapaxes(0, 2)
# rotate (in deg), and re-grid using linear interpolation
data_rotIS_swap_rotRL = rotate(data_rotIS_swap, angle_RL, resize=False, center=None, order=1, mode='constant',
cval=0, clip=False, preserve_range=False)
# swap back
data_rotIS_rotRL = data_rotIS_swap_rotRL.swapaxes(0, 2)
# ROTATION ABOUT AP AXIS
# Swap y-z axes (to make a rotation within x-z plane)
data_rotIS_rotRL_swap = data_rotIS_rotRL.swapaxes(1, 2)
# rotate (in deg), and re-grid using linear interpolation
data_rotIS_rotRL_swap_rotAP = rotate(data_rotIS_rotRL_swap, angle_AP, resize=False, center=None, order=1,
mode='constant', cval=0, clip=False, preserve_range=False)
# swap back
data_rot = data_rotIS_rotRL_swap_rotAP.swapaxes(1, 2)
# Crop image (to remove padding)
data_rot_crop = data_rot[padding:nx+padding, padding:ny+padding, padding:nz+padding]
# Zero specified slices
if zeroslice is not []:
data_rot_crop[:, :, zeroslice] = 0
# Create nibabel object
xform = np.eye(4)
for i in range(3):
xform[i][i] = 1 # in [mm]
nii = nib.nifti1.Nifti1Image(data_rot_crop.astype('float32'), xform)
# resample to desired resolution
nii_r = resample_nib(nii, new_size=pixdim, new_size_type='mm', interpolation='linear')
# Create Image object. Default orientation is LPI.
# For debugging add .save() at the end of the command below
img = Image(nii_r.get_data(), hdr=nii_r.header, dim=nii_r.header.get_data_shape())
# Update orientation
img.change_orientation(orientation)
if debug:
img.save('tmp_dummy_seg_'+datetime.now().strftime("%Y%m%d%H%M%S%f")+'.nii.gz')
return img
def dummy_segmentation_4d(vol_num=10, create_bvecs=False, size_arr=(256, 256, 256), pixdim=(1, 1, 1), dtype=np.float64,
orientation='LPI', shape='rectangle', angle_RL=0, angle_AP=0, angle_IS=0, radius_RL=5.0,
radius_AP=3.0, degree=2, interleaved=False, zeroslice=[], debug=False):
"""
Create a dummy 4D segmentation (dMRI/fMRI) and dummy bvecs file (optional)
:param vol_num: int: number of volumes in 4D data
:param create_bvecs: bool: create dummy bvecs file (necessary e.g. for sct_dmri_moco)
other parameters are same as in dummy_segmentation function
:return: Image object
"""
img_list = []
# Loop across individual volumes of 4D data
for volume in range(0,vol_num):
# set debug=True in line below for saving individual volumes into individual nii files
img_list.append(dummy_segmentation(size_arr=size_arr, pixdim=pixdim, dtype=dtype, orientation=orientation,
shape=shape, angle_RL=angle_RL, angle_AP=angle_AP, angle_IS=angle_IS,
radius_RL=radius_RL, radius_AP=radius_AP, degree=degree, zeroslice=zeroslice,
interleaved=interleaved, debug=False))
# Concatenate individual 3D images into 4D data
img_4d = concat_data(img_list, 3)
if debug:
out_name = datetime.now().strftime("%Y%m%d%H%M%S%f")
file_4d_data = 'tmp_dummy_4d_' + out_name + '.nii.gz'
img_4d.save(file_4d_data, verbose=0)
# Create a dummy bvecs file (necessary e.g. for sct_dmri_moco)
if create_bvecs:
n_b0 = 1 # number of b0
n_dwi = vol_num-n_b0 # number of dwi
bvecs_dummy = ['', '', '']
bvec_b0 = np.array([[0.0, 0.0, 0.0]] * n_b0)
bvec_dwi = np.array([[uniform(0,1), uniform(0,1), uniform(0,1)]] * n_dwi)
bvec = np.concatenate((bvec_b0,bvec_dwi),axis=0)
# Concatenate bvecs
for i in (0, 1, 2):
bvecs_dummy[i] += ' '.join(str(v) for v in map(lambda n: '%.16f' % n, bvec[:, i]))
bvecs_dummy[i] += ' '
bvecs_concat = '\n'.join(str(v) for v in bvecs_dummy) # transform list into lines of strings
if debug:
new_f = open('tmp_dummy_4d_' + out_name + '.bvec', 'w')
new_f.write(bvecs_concat)
new_f.close()
return img_4d
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import sys
from oslotest import base as test_base
from oslo_utils import reflection
RUNTIME_ERROR_CLASSES = [
'RuntimeError', 'Exception', 'BaseException', 'object',
]
def dummy_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def mere_function(a, b):
pass
def function_with_defs(a, b, optional=None):
pass
def function_with_kwargs(a, b, **kwargs):
pass
class TestObject(object):
def _hello(self):
pass
def hi(self):
pass
class Class(object):
def method(self, c, d):
pass
@staticmethod
def static_method(e, f):
pass
@classmethod
def class_method(cls, g, h):
pass
class BadClass(object):
def do_something(self):
pass
def __nonzero__(self):
return False
class CallableClass(object):
def __call__(self, i, j):
pass
class ClassWithInit(object):
def __init__(self, k, lll):
pass
class MemberGetTest(test_base.BaseTestCase):
def test_get_members_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_members(obj, exclude_hidden=True))
self.assertEqual(1, len(members))
def test_get_members_no_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_members(obj, exclude_hidden=False))
self.assertGreater(len(members), 1)
def test_get_members_names_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_member_names(obj, exclude_hidden=True))
self.assertEqual(["hi"], members)
def test_get_members_names_no_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_member_names(obj, exclude_hidden=False))
members = [member for member in members if not member.startswith("__")]
self.assertEqual(["_hello", "hi"], sorted(members))
class CallbackEqualityTest(test_base.BaseTestCase):
def test_different_simple_callbacks(self):
def a():
pass
def b():
pass
self.assertFalse(reflection.is_same_callback(a, b))
def test_static_instance_callbacks(self):
class A(object):
@staticmethod
def b(a, b, c):
pass
a = A()
b = A()
self.assertTrue(reflection.is_same_callback(a.b, b.b))
def test_different_instance_callbacks(self):
class A(object):
def b(self):
pass
def __eq__(self, other):
return True
def __ne__(self, other):
return not self.__eq__(other)
b = A()
c = A()
self.assertFalse(reflection.is_same_callback(b.b, c.b))
# NOTE(gmann): python3.8 onwards, comparision of bound methods is
# changed and 'strict' arg has no meaning.
# Ref bug: https://bugs.launchpad.net/oslo.utils/+bug/1841072
if sys.version_info < (3, 8):
self.assertTrue(reflection.is_same_callback(b.b, c.b,
strict=False))
else:
self.assertFalse(reflection.is_same_callback(b.b, c.b,
strict=False))
self.assertTrue(reflection.is_same_callback(b.b, b.b))
class BoundMethodTest(test_base.BaseTestCase):
def test_baddy(self):
b = BadClass()
self.assertTrue(reflection.is_bound_method(b.do_something))
def test_static_method(self):
self.assertFalse(reflection.is_bound_method(Class.static_method))
class GetCallableNameTest(test_base.BaseTestCase):
def test_mere_function(self):
name = reflection.get_callable_name(mere_function)
self.assertEqual('.'.join((__name__, 'mere_function')), name)
def test_method(self):
name = reflection.get_callable_name(Class.method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_instance_method(self):
name = reflection.get_callable_name(Class().method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_static_method(self):
name = reflection.get_callable_name(Class.static_method)
self.assertEqual('.'.join((__name__, 'Class', 'static_method')), name)
def test_class_method(self):
name = reflection.get_callable_name(Class.class_method)
self.assertEqual('.'.join((__name__, 'Class', 'class_method')), name)
def test_constructor(self):
name = reflection.get_callable_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_callable_class(self):
name = reflection.get_callable_name(CallableClass())
self.assertEqual('.'.join((__name__, 'CallableClass')), name)
def test_callable_class_call(self):
name = reflection.get_callable_name(CallableClass().__call__)
self.assertEqual('.'.join((__name__, 'CallableClass',
'__call__')), name)
class GetCallableNameTestExtended(test_base.BaseTestCase):
# Tests items in http://legacy.python.org/dev/peps/pep-3155/
class InnerCallableClass(object):
def __call__(self):
pass
def test_inner_callable_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj.__call__)
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'InnerCallableClass', '__call__'))
self.assertEqual(expected_name, name)
def test_inner_callable_function(self):
def a():
def b():
pass
return b
name = reflection.get_callable_name(a())
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'test_inner_callable_function', '<locals>',
'a', '<locals>', 'b'))
self.assertEqual(expected_name, name)
def test_inner_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj)
expected_name = '.'.join((__name__,
'GetCallableNameTestExtended',
'InnerCallableClass'))
self.assertEqual(expected_name, name)
class GetCallableArgsTest(test_base.BaseTestCase):
def test_mere_function(self):
result = reflection.get_callable_args(mere_function)
self.assertEqual(['a', 'b'], result)
def test_function_with_defaults(self):
result = reflection.get_callable_args(function_with_defs)
self.assertEqual(['a', 'b', 'optional'], result)
def test_required_only(self):
result = reflection.get_callable_args(function_with_defs,
required_only=True)
self.assertEqual(['a', 'b'], result)
def test_method(self):
result = reflection.get_callable_args(Class.method)
self.assertEqual(['self', 'c', 'd'], result)
def test_instance_method(self):
result = reflection.get_callable_args(Class().method)
self.assertEqual(['c', 'd'], result)
def test_class_method(self):
result = reflection.get_callable_args(Class.class_method)
self.assertEqual(['g', 'h'], result)
def test_class_constructor(self):
result = reflection.get_callable_args(ClassWithInit)
self.assertEqual(['k', 'lll'], result)
def test_class_with_call(self):
result = reflection.get_callable_args(CallableClass())
self.assertEqual(['i', 'j'], result)
def test_decorators_work(self):
@dummy_decorator
def special_fun(x, y):
pass
result = reflection.get_callable_args(special_fun)
self.assertEqual(['x', 'y'], result)
class AcceptsKwargsTest(test_base.BaseTestCase):
def test_no_kwargs(self):
self.assertEqual(False, reflection.accepts_kwargs(mere_function))
def test_with_kwargs(self):
self.assertEqual(True, reflection.accepts_kwargs(function_with_kwargs))
class GetClassNameTest(test_base.BaseTestCase):
def test_std_exception(self):
name = reflection.get_class_name(RuntimeError)
self.assertEqual('RuntimeError', name)
def test_class(self):
name = reflection.get_class_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_qualified_class(self):
class QualifiedClass(object):
pass
name = reflection.get_class_name(QualifiedClass)
self.assertEqual('.'.join((__name__, 'QualifiedClass')), name)
def test_instance(self):
name = reflection.get_class_name(Class())
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_int(self):
name = reflection.get_class_name(42)
self.assertEqual('int', name)
def test_class_method(self):
name = reflection.get_class_name(Class.class_method)
self.assertEqual('%s.Class' % __name__, name)
# test with fully_qualified=False
name = reflection.get_class_name(Class.class_method,
fully_qualified=False)
self.assertEqual('Class', name)
def test_static_method(self):
self.assertRaises(TypeError, reflection.get_class_name,
Class.static_method)
def test_unbound_method(self):
self.assertRaises(TypeError, reflection.get_class_name,
mere_function)
def test_bound_method(self):
c = Class()
name = reflection.get_class_name(c.method)
self.assertEqual('%s.Class' % __name__, name)
# test with fully_qualified=False
name = reflection.get_class_name(c.method, fully_qualified=False)
self.assertEqual('Class', name)
class GetAllClassNamesTest(test_base.BaseTestCase):
def test_std_class(self):
names = list(reflection.get_all_class_names(RuntimeError))
self.assertEqual(RUNTIME_ERROR_CLASSES, names)
def test_std_class_up_to(self):
names = list(reflection.get_all_class_names(RuntimeError,
up_to=Exception))
self.assertEqual(RUNTIME_ERROR_CLASSES[:-2], names)
| |
#!/usr/bin/env python
import json
import logging
import os
import psutil
import psycopg2
import shlex
import shutil
import subprocess
import sys
import time
import yaml
from collections import defaultdict
from threading import Thread
from multiprocessing.pool import ThreadPool
logger = logging.getLogger(__name__)
RSYNC_PORT = 5432
def patch_wale_prefix(value, new_version):
from spilo_commons import is_valid_pg_version
if '/spilo/' in value and '/wal/' in value: # path crafted in the configure_spilo.py?
basename, old_version = os.path.split(value.rstrip('/'))
if is_valid_pg_version(old_version) and old_version != new_version:
return os.path.join(basename, new_version)
return value
def update_configs(new_version):
from spilo_commons import append_extensions, get_bin_dir, get_patroni_config, write_file, write_patroni_config
config = get_patroni_config()
config['postgresql']['bin_dir'] = get_bin_dir(new_version)
version = float(new_version)
shared_preload_libraries = config['postgresql'].get('parameters', {}).get('shared_preload_libraries')
if shared_preload_libraries is not None:
config['postgresql']['parameters']['shared_preload_libraries'] =\
append_extensions(shared_preload_libraries, version)
extwlist_extensions = config['postgresql'].get('parameters', {}).get('extwlist.extensions')
if extwlist_extensions is not None:
config['postgresql']['parameters']['extwlist.extensions'] =\
append_extensions(extwlist_extensions, version, True)
write_patroni_config(config, True)
# update wal-e/wal-g envdir files
restore_command = shlex.split(config['postgresql'].get('recovery_conf', {}).get('restore_command', ''))
if len(restore_command) > 4 and restore_command[0] == 'envdir':
envdir = restore_command[1]
try:
for name in os.listdir(envdir):
# len('WALE__PREFIX') = 12
if len(name) > 12 and name.endswith('_PREFIX') and name[:5] in ('WALE_', 'WALG_'):
name = os.path.join(envdir, name)
try:
with open(name) as f:
value = f.read().strip()
new_value = patch_wale_prefix(value, new_version)
if new_value != value:
write_file(new_value, name, True)
except Exception as e:
logger.error('Failed to process %s: %r', name, e)
except Exception:
pass
else:
return envdir
def kill_patroni():
logger.info('Restarting patroni')
patroni = next(iter(filter(lambda p: p.info['name'] == 'patroni', psutil.process_iter(['name']))), None)
if patroni:
patroni.kill()
class InplaceUpgrade(object):
def __init__(self, config):
from patroni.dcs import get_dcs
from patroni.request import PatroniRequest
from pg_upgrade import PostgresqlUpgrade
self.config = config
self.postgresql = PostgresqlUpgrade(config)
self.cluster_version = self.postgresql.get_cluster_version()
self.desired_version = self.get_desired_version()
self.upgrade_required = float(self.cluster_version) < float(self.desired_version)
self.paused = False
self.new_data_created = False
self.upgrade_complete = False
self.rsyncd_configs_created = False
self.rsyncd_started = False
if self.upgrade_required:
# we want to reduce tcp timeouts and keepalives and therefore tune loop_wait, retry_timeout, and ttl
self.dcs = get_dcs({**config.copy(), 'loop_wait': 0, 'ttl': 10, 'retry_timeout': 10, 'patronictl': True})
self.request = PatroniRequest(config, True)
@staticmethod
def get_desired_version():
from spilo_commons import get_bin_dir, get_binary_version
try:
spilo_configuration = yaml.safe_load(os.environ.get('SPILO_CONFIGURATION', ''))
bin_dir = spilo_configuration.get('postgresql', {}).get('bin_dir')
except Exception:
bin_dir = None
if not bin_dir and os.environ.get('PGVERSION'):
bin_dir = get_bin_dir(os.environ['PGVERSION'])
return get_binary_version(bin_dir)
def check_patroni_api(self, member):
try:
response = self.request(member, timeout=2, retries=0)
return response.status == 200
except Exception as e:
return logger.error('API request to %s name failed: %r', member.name, e)
def toggle_pause(self, paused):
from patroni.utils import polling_loop
cluster = self.dcs.get_cluster()
config = cluster.config.data.copy()
if cluster.is_paused() == paused:
return logger.error('Cluster is %spaused, can not continue', ('' if paused else 'not '))
config['pause'] = paused
if not self.dcs.set_config_value(json.dumps(config, separators=(',', ':')), cluster.config.index):
return logger.error('Failed to pause cluster, can not continue')
self.paused = paused
old = {m.name: m.index for m in cluster.members if m.api_url}
ttl = cluster.config.data.get('ttl', self.dcs.ttl)
for _ in polling_loop(ttl + 1):
cluster = self.dcs.get_cluster()
if all(m.data.get('pause', False) == paused for m in cluster.members if m.name in old):
logger.info('Maintenance mode %s', ('enabled' if paused else 'disabled'))
return True
remaining = [m.name for m in cluster.members if m.data.get('pause', False) != paused
and m.name in old and old[m.name] != m.index]
if remaining:
return logger.error("%s members didn't recognized pause state after %s seconds", remaining, ttl)
def resume_cluster(self):
if self.paused:
try:
logger.info('Disabling maintenance mode')
self.toggle_pause(False)
except Exception as e:
logger.error('Failed to resume cluster: %r', e)
def ensure_replicas_state(self, cluster):
"""
This method checks the satatus of all replicas and also tries to open connections
to all of them and puts into the `self.replica_connections` dict for a future usage.
"""
self.replica_connections = {}
streaming = {a: l for a, l in self.postgresql.query(
("SELECT client_addr, pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(),"
" COALESCE(replay_{1}, '0/0'))::bigint FROM pg_catalog.pg_stat_replication")
.format(self.postgresql.wal_name, self.postgresql.lsn_name))}
def ensure_replica_state(member):
ip = member.conn_kwargs().get('host')
lag = streaming.get(ip)
if lag is None:
return logger.error('Member %s is not streaming from the primary', member.name)
if lag > 16*1024*1024:
return logger.error('Replication lag %s on member %s is too high', lag, member.name)
if not self.check_patroni_api(member):
return logger.error('Patroni on %s is not healthy', member.name)
conn_kwargs = member.conn_kwargs(self.postgresql.config.superuser)
conn_kwargs['options'] = '-c statement_timeout=0 -c search_path='
conn_kwargs.pop('connect_timeout', None)
conn = psycopg2.connect(**conn_kwargs)
conn.autocommit = True
cur = conn.cursor()
cur.execute('SELECT pg_catalog.pg_is_in_recovery()')
if not cur.fetchone()[0]:
return logger.error('Member %s is not running as replica!', member.name)
self.replica_connections[member.name] = (ip, cur)
return True
return all(ensure_replica_state(member) for member in cluster.members if member.name != self.postgresql.name)
def sanity_checks(self, cluster):
if not cluster.initialize:
return logger.error('Upgrade can not be triggered because the cluster is not initialized')
if len(cluster.members) != self.replica_count:
return logger.error('Upgrade can not be triggered because the number of replicas does not match (%s != %s)',
len(cluster.members), self.replica_count)
if cluster.is_paused():
return logger.error('Upgrade can not be triggered because Patroni is in maintenance mode')
lock_owner = cluster.leader and cluster.leader.name
if lock_owner != self.postgresql.name:
return logger.error('Upgrade can not be triggered because the current node does not own the leader lock')
return self.ensure_replicas_state(cluster)
def remove_initialize_key(self):
from patroni.utils import polling_loop
for _ in polling_loop(10):
cluster = self.dcs.get_cluster()
if cluster.initialize is None:
return True
logging.info('Removing initialize key')
if self.dcs.cancel_initialization():
return True
logger.error('Failed to remove initialize key')
def wait_for_replicas(self, checkpoint_lsn):
from patroni.utils import polling_loop
logger.info('Waiting for replica nodes to catch up with primary')
query = ("SELECT pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(),"
" '0/0')::bigint").format(self.postgresql.wal_name, self.postgresql.lsn_name)
status = {}
for _ in polling_loop(60):
synced = True
for name, (_, cur) in self.replica_connections.items():
prev = status.get(name)
if prev and prev >= checkpoint_lsn:
continue
cur.execute(query)
lsn = cur.fetchone()[0]
status[name] = lsn
if lsn < checkpoint_lsn:
synced = False
if synced:
logger.info('All replicas are ready')
return True
for name in self.replica_connections.keys():
lsn = status.get(name)
if not lsn or lsn < checkpoint_lsn:
logger.error('Node %s did not catched up. Lag=%s', name, checkpoint_lsn - lsn)
def create_rsyncd_configs(self):
self.rsyncd_configs_created = True
self.rsyncd_conf_dir = '/run/rsync'
self.rsyncd_feedback_dir = os.path.join(self.rsyncd_conf_dir, 'feedback')
if not os.path.exists(self.rsyncd_feedback_dir):
os.makedirs(self.rsyncd_feedback_dir)
self.rsyncd_conf = os.path.join(self.rsyncd_conf_dir, 'rsyncd.conf')
secrets_file = os.path.join(self.rsyncd_conf_dir, 'rsyncd.secrets')
auth_users = ','.join(self.replica_connections.keys())
replica_ips = ','.join(str(v[0]) for v in self.replica_connections.values())
with open(self.rsyncd_conf, 'w') as f:
f.write("""port = {0}
use chroot = false
[pgroot]
path = {1}
read only = true
timeout = 300
post-xfer exec = echo $RSYNC_EXIT_STATUS > {2}/$RSYNC_USER_NAME
auth users = {3}
secrets file = {4}
hosts allow = {5}
hosts deny = *
""".format(RSYNC_PORT, os.path.dirname(self.postgresql.data_dir),
self.rsyncd_feedback_dir, auth_users, secrets_file, replica_ips))
with open(secrets_file, 'w') as f:
for name in self.replica_connections.keys():
f.write('{0}:{1}\n'.format(name, self.postgresql.config.replication['password']))
os.chmod(secrets_file, 0o600)
def start_rsyncd(self):
self.create_rsyncd_configs()
self.rsyncd = subprocess.Popen(['rsync', '--daemon', '--no-detach', '--config=' + self.rsyncd_conf])
self.rsyncd_started = True
def stop_rsyncd(self):
if self.rsyncd_started:
logger.info('Stopping rsyncd')
try:
self.rsyncd.kill()
self.rsyncd_started = False
except Exception as e:
return logger.error('Failed to kill rsyncd: %r', e)
if self.rsyncd_configs_created and os.path.exists(self.rsyncd_conf_dir):
try:
shutil.rmtree(self.rsyncd_conf_dir)
self.rsyncd_configs_created = False
except Exception as e:
logger.error('Failed to remove %s: %r', self.rsync_conf_dir, e)
def checkpoint(self, member):
name, (_, cur) = member
try:
cur.execute('CHECKPOINT')
return name, True
except Exception as e:
logger.error('CHECKPOINT on % failed: %r', name, e)
return name, False
def rsync_replicas(self, primary_ip):
from patroni.utils import polling_loop
logger.info('Notifying replicas %s to start rsync', ','.join(self.replica_connections.keys()))
ret = True
status = {}
for name, (ip, cur) in self.replica_connections.items():
try:
cur.execute("SELECT pg_catalog.pg_backend_pid()")
pid = cur.fetchone()[0]
# We use the COPY TO PROGRAM "hack" to start the rsync on replicas.
# There are a few important moments:
# 1. The script is started as a child process of postgres backend, which
# is running with the clean environment. I.e., the script will not see
# values of PGVERSION, SPILO_CONFIGURATION, KUBERNETES_SERVICE_HOST
# 2. Since access to the DCS might not be possible with pass the primary_ip
# 3. The desired_version passed explicitly to guaranty 100% match with the master
# 4. In order to protect from the accidental "rsync" we pass the pid of postgres backend.
# The script will check that it is the child of the very specific postgres process.
cur.execute("COPY (SELECT) TO PROGRAM 'nohup {0} /scripts/inplace_upgrade.py {1} {2} {3}'"
.format(sys.executable, self.desired_version, primary_ip, pid))
conn = cur.connection
cur.close()
conn.close()
except Exception as e:
logger.error('COPY TO PROGRAM on %s failed: %r', name, e)
status[name] = False
ret = False
for name in status.keys():
self.replica_connections.pop(name)
logger.info('Waiting for replicas rsync to complete')
status.clear()
for _ in polling_loop(300):
synced = True
for name in self.replica_connections.keys():
feedback = os.path.join(self.rsyncd_feedback_dir, name)
if name not in status and os.path.exists(feedback):
with open(feedback) as f:
status[name] = f.read().strip()
if name not in status:
synced = False
if synced:
break
for name in self.replica_connections.keys():
result = status.get(name)
if result is None:
logger.error('Did not received rsync feedback from %s after 300 seconds', name)
ret = False
elif not result.startswith('0'):
logger.error('Rsync on %s finished with code %s', name, result)
ret = False
return ret
def wait_replica_restart(self, member):
from patroni.utils import polling_loop
for _ in polling_loop(10):
try:
response = self.request(member, timeout=2, retries=0)
if response.status == 200:
data = json.loads(response.data.decode('utf-8'))
database_system_identifier = data.get('database_system_identifier')
if database_system_identifier and database_system_identifier != self._old_sysid:
return member.name
except Exception:
pass
logger.error('Patroni on replica %s was not restarted in 10 seconds', member.name)
def wait_replicas_restart(self, cluster):
members = [member for member in cluster.members if member.name in self.replica_connections]
logger.info('Waiting for restart of patroni on replicas %s', ', '.join(m.name for m in members))
pool = ThreadPool(len(members))
results = pool.map(self.wait_replica_restart, members)
pool.close()
pool.join()
logger.info(' %s successfully restarted', results)
return all(results)
def reset_custom_statistics_target(self):
from patroni.postgresql.connection import get_connection_cursor
logger.info('Resetting non-default statistics target before analyze')
self._statistics = defaultdict(lambda: defaultdict(dict))
conn_kwargs = self.postgresql.local_conn_kwargs
for d in self.postgresql.query('SELECT datname FROM pg_catalog.pg_database WHERE datallowconn'):
conn_kwargs['dbname'] = d[0]
with get_connection_cursor(**conn_kwargs) as cur:
cur.execute('SELECT attrelid::regclass, quote_ident(attname), attstattarget '
'FROM pg_catalog.pg_attribute WHERE attnum > 0 AND NOT attisdropped AND attstattarget > 0')
for table, column, target in cur.fetchall():
query = 'ALTER TABLE {0} ALTER COLUMN {1} SET STATISTICS -1'.format(table, column)
logger.info("Executing '%s' in the database=%s. Old value=%s", query, d[0], target)
cur.execute(query)
self._statistics[d[0]][table][column] = target
def restore_custom_statistics_target(self):
from patroni.postgresql.connection import get_connection_cursor
if not self._statistics:
return
conn_kwargs = self.postgresql.local_conn_kwargs
logger.info('Restoring default statistics targets after upgrade')
for db, val in self._statistics.items():
conn_kwargs['dbname'] = db
with get_connection_cursor(**conn_kwargs) as cur:
for table, val in val.items():
for column, target in val.items():
query = 'ALTER TABLE {0} ALTER COLUMN {1} SET STATISTICS {2}'.format(table, column, target)
logger.info("Executing '%s' in the database=%s", query, db)
try:
cur.execute(query)
except Exception:
logger.error("Failed to execute '%s'", query)
def reanalyze(self):
from patroni.postgresql.connection import get_connection_cursor
if not self._statistics:
return
conn_kwargs = self.postgresql.local_conn_kwargs
for db, val in self._statistics.items():
conn_kwargs['dbname'] = db
with get_connection_cursor(**conn_kwargs) as cur:
for table in val.keys():
query = 'ANALYZE {0}'.format(table)
logger.info("Executing '%s' in the database=%s", query, db)
try:
cur.execute(query)
except Exception:
logger.error("Failed to execute '%s'", query)
def analyze(self):
try:
self.reset_custom_statistics_target()
except Exception as e:
logger.error('Failed to reset custom statistics targets: %r', e)
self.postgresql.analyze(True)
try:
self.restore_custom_statistics_target()
except Exception as e:
logger.error('Failed to restore custom statistics targets: %r', e)
def do_upgrade(self):
from patroni.utils import polling_loop
if not self.upgrade_required:
logger.info('Current version=%s, desired version=%s. Upgrade is not required',
self.cluster_version, self.desired_version)
return True
if not (self.postgresql.is_running() and self.postgresql.is_leader()):
return logger.error('PostgreSQL is not running or in recovery')
cluster = self.dcs.get_cluster()
if not self.sanity_checks(cluster):
return False
self._old_sysid = self.postgresql.sysid # remember old sysid
logger.info('Cluster %s is ready to be upgraded', self.postgresql.scope)
if not self.postgresql.prepare_new_pgdata(self.desired_version):
return logger.error('initdb failed')
try:
self.postgresql.drop_possibly_incompatible_extensions()
except Exception:
return logger.error('Failed to drop possibly incompatible extensions')
if not self.postgresql.pg_upgrade(check=True):
return logger.error('pg_upgrade --check failed, more details in the %s_upgrade', self.postgresql.data_dir)
try:
self.postgresql.drop_possibly_incompatible_objects()
except Exception:
return logger.error('Failed to drop possibly incompatible objects')
logging.info('Enabling maintenance mode')
if not self.toggle_pause(True):
return False
logger.info('Doing a clean shutdown of the cluster before pg_upgrade')
downtime_start = time.time()
if not self.postgresql.stop(block_callbacks=True):
return logger.error('Failed to stop the cluster before pg_upgrade')
if self.replica_connections:
from patroni.postgresql.misc import parse_lsn
# Make sure we use the pg_controldata from the correct major version
self.postgresql.set_bin_dir(self.cluster_version)
controldata = self.postgresql.controldata()
self.postgresql.set_bin_dir(self.desired_version)
checkpoint_lsn = controldata.get('Latest checkpoint location')
if controldata.get('Database cluster state') != 'shut down' or not checkpoint_lsn:
return logger.error("Cluster wasn't shut down cleanly")
checkpoint_lsn = parse_lsn(checkpoint_lsn)
logger.info('Latest checkpoint location: %s', checkpoint_lsn)
logger.info('Starting rsyncd')
self.start_rsyncd()
if not self.wait_for_replicas(checkpoint_lsn):
return False
if not (self.rsyncd.pid and self.rsyncd.poll() is None):
return logger.error('Failed to start rsyncd')
if self.replica_connections:
logger.info('Executing CHECKPOINT on replicas %s', ','.join(self.replica_connections.keys()))
pool = ThreadPool(len(self.replica_connections))
# Do CHECKPOINT on replicas in parallel with pg_upgrade.
# It will reduce the time for shutdown and so downtime.
results = pool.map_async(self.checkpoint, self.replica_connections.items())
pool.close()
if not self.postgresql.pg_upgrade():
return logger.error('Failed to upgrade cluster from %s to %s', self.cluster_version, self.desired_version)
self.postgresql.switch_pgdata()
self.upgrade_complete = True
logger.info('Updating configuration files')
envdir = update_configs(self.desired_version)
ret = True
if self.replica_connections:
# Check status of replicas CHECKPOINT and remove connections that are failed.
pool.join()
if results.ready():
for name, status in results.get():
if not status:
ret = False
self.replica_connections.pop(name)
member = cluster.get_member(self.postgresql.name)
if self.replica_connections:
primary_ip = member.conn_kwargs().get('host')
rsync_start = time.time()
try:
if not self.rsync_replicas(primary_ip):
ret = False
except Exception as e:
logger.error('rsync failed: %r', e)
ret = False
logger.info('Rsync took %s seconds', time.time() - rsync_start)
self.stop_rsyncd()
time.sleep(2) # Give replicas a bit of time to switch PGDATA
self.remove_initialize_key()
kill_patroni()
self.remove_initialize_key()
time.sleep(1)
for _ in polling_loop(10):
if self.check_patroni_api(member):
break
else:
logger.error('Patroni REST API on primary is not accessible after 10 seconds')
logger.info('Starting the primary postgres up')
for _ in polling_loop(10):
try:
result = self.request(member, 'post', 'restart', {})
logger.info(' %s %s', result.status, result.data.decode('utf-8'))
if result.status < 300:
break
except Exception as e:
logger.error('POST /restart failed: %r', e)
else:
logger.error('Failed to start primary after upgrade')
logger.info('Upgrade downtime: %s', time.time() - downtime_start)
# The last attempt to fix initialize key race condition
cluster = self.dcs.get_cluster()
if cluster.initialize == self._old_sysid:
self.dcs.cancel_initialization()
try:
self.postgresql.update_extensions()
except Exception as e:
logger.error('Failed to update extensions: %r', e)
# start analyze early
analyze_thread = Thread(target=self.analyze)
analyze_thread.start()
if self.replica_connections:
self.wait_replicas_restart(cluster)
self.resume_cluster()
analyze_thread.join()
self.reanalyze()
logger.info('Total upgrade time (with analyze): %s', time.time() - downtime_start)
self.postgresql.bootstrap.call_post_bootstrap(self.config['bootstrap'])
self.postgresql.cleanup_old_pgdata()
if envdir:
self.start_backup(envdir)
return ret
def post_cleanup(self):
self.stop_rsyncd()
self.resume_cluster()
if self.new_data_created:
try:
self.postgresql.cleanup_new_pgdata()
except Exception as e:
logger.error('Failed to remove new PGDATA %r', e)
def try_upgrade(self, replica_count):
try:
self.replica_count = replica_count
return self.do_upgrade()
finally:
self.post_cleanup()
def start_backup(self, envdir):
logger.info('Initiating a new backup...')
if not os.fork():
subprocess.call(['nohup', 'envdir', envdir, '/scripts/postgres_backup.sh', self.postgresql.data_dir],
stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
# this function will be running in a clean environment, therefore we can't rely on DCS connection
def rsync_replica(config, desired_version, primary_ip, pid):
from pg_upgrade import PostgresqlUpgrade
from patroni.utils import polling_loop
me = psutil.Process()
# check that we are the child of postgres backend
if me.parent().pid != pid and me.parent().parent().pid != pid:
return 1
backend = psutil.Process(pid)
if 'postgres' not in backend.name():
return 1
postgresql = PostgresqlUpgrade(config)
if postgresql.get_cluster_version() == desired_version:
return 0
if os.fork():
return 0
# Wait until the remote side will close the connection and backend process exits
for _ in polling_loop(10):
if not backend.is_running():
break
else:
logger.warning('Backend did not exit after 10 seconds')
sysid = postgresql.sysid # remember old sysid
if not postgresql.stop(block_callbacks=True):
logger.error('Failed to stop the cluster before rsync')
return 1
postgresql.switch_pgdata()
update_configs(desired_version)
env = os.environ.copy()
env['RSYNC_PASSWORD'] = postgresql.config.replication['password']
if subprocess.call(['rsync', '--archive', '--delete', '--hard-links', '--size-only', '--omit-dir-times',
'--no-inc-recursive', '--include=/data/***', '--include=/data_old/***',
'--exclude=/data/pg_xlog/*', '--exclude=/data_old/pg_xlog/*',
'--exclude=/data/pg_wal/*', '--exclude=/data_old/pg_wal/*', '--exclude=*',
'rsync://{0}@{1}:{2}/pgroot'.format(postgresql.name, primary_ip, RSYNC_PORT),
os.path.dirname(postgresql.data_dir)], env=env) != 0:
logger.error('Failed to rsync from %s', primary_ip)
postgresql.switch_back_pgdata()
# XXX: rollback configs?
return 1
conn_kwargs = {k: v for k, v in postgresql.config.replication.items() if v is not None}
if 'username' in conn_kwargs:
conn_kwargs['user'] = conn_kwargs.pop('username')
# If restart Patroni right now there is a chance that it will exit due to the sysid mismatch.
# Due to cleaned environment we can't always use DCS on replicas in this script, therefore
# the good indicator of initialize key being deleted/updated is running primary after the upgrade.
for _ in polling_loop(300):
try:
with postgresql.get_replication_connection_cursor(primary_ip, **conn_kwargs) as cur:
cur.execute('IDENTIFY_SYSTEM')
if cur.fetchone()[0] != sysid:
break
except Exception:
pass
# If the cluster was unpaused earlier than we restarted Patroni, it might have created
# the recovery.conf file and tried (and failed) to start the cluster up using wrong binaries.
# In case of upgrade to 12+ presence of PGDATA/recovery.conf will not allow postgres to start.
# We remove the recovery.conf and restart Patroni in order to make sure it is using correct config.
try:
postgresql.config.remove_recovery_conf()
except Exception:
pass
kill_patroni()
try:
postgresql.config.remove_recovery_conf()
except Exception:
pass
return postgresql.cleanup_old_pgdata()
def main():
from patroni.config import Config
from spilo_commons import PATRONI_CONFIG_FILE
config = Config(PATRONI_CONFIG_FILE)
if len(sys.argv) == 4:
desired_version = sys.argv[1]
primary_ip = sys.argv[2]
pid = int(sys.argv[3])
return rsync_replica(config, desired_version, primary_ip, pid)
elif len(sys.argv) == 2:
replica_count = int(sys.argv[1])
upgrade = InplaceUpgrade(config)
return 0 if upgrade.try_upgrade(replica_count) else 1
else:
return 2
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s inplace_upgrade %(levelname)s: %(message)s', level='INFO')
sys.exit(main())
| |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import logging
import src.utils as utils
import datasets.nav_env_config as nec
from datasets import factory
def adjust_args_for_mode(args, mode):
if mode == 'train':
args.control.train = True
elif mode == 'val1':
# Same settings as for training, to make sure nothing wonky is happening
# there.
args.control.test = True
args.control.test_mode = 'val'
args.navtask.task_params.batch_size = 32
elif mode == 'val2':
# No data augmentation, not sampling but taking the argmax action, not
# sampling from the ground truth at all.
args.control.test = True
args.arch.action_sample_type = 'argmax'
args.arch.sample_gt_prob_type = 'zero'
args.navtask.task_params.data_augment = \
utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False,
relight_fast=False, structured=False)
args.control.test_mode = 'val'
args.navtask.task_params.batch_size = 32
elif mode == 'bench':
# Actually testing the agent in settings that are kept same between
# different runs.
args.navtask.task_params.batch_size = 16
args.control.test = True
args.arch.action_sample_type = 'argmax'
args.arch.sample_gt_prob_type = 'zero'
args.navtask.task_params.data_augment = \
utils.Foo(lr_flip=0, delta_angle=0, delta_xy=0, relight=False,
relight_fast=False, structured=False)
args.summary.test_iters = 250
args.control.only_eval_when_done = True
args.control.reset_rng_seed = True
args.control.test_mode = 'test'
else:
logging.fatal('Unknown mode: %s.', mode)
assert(False)
return args
def get_solver_vars(solver_str):
if solver_str == '': vals = [];
else: vals = solver_str.split('_')
ks = ['clip', 'dlw', 'long', 'typ', 'isdk', 'adam_eps', 'init_lr'];
ks = ks[:len(vals)]
# Gradient clipping or not.
if len(vals) == 0: ks.append('clip'); vals.append('noclip');
# data loss weight.
if len(vals) == 1: ks.append('dlw'); vals.append('dlw20')
# how long to train for.
if len(vals) == 2: ks.append('long'); vals.append('nolong')
# Adam
if len(vals) == 3: ks.append('typ'); vals.append('adam2')
# reg loss wt
if len(vals) == 4: ks.append('rlw'); vals.append('rlw1')
# isd_k
if len(vals) == 5: ks.append('isdk'); vals.append('isdk415') # 415, inflexion at 2.5k.
# adam eps
if len(vals) == 6: ks.append('adam_eps'); vals.append('aeps1en8')
# init lr
if len(vals) == 7: ks.append('init_lr'); vals.append('lr1en3')
assert(len(vals) == 8)
vars = utils.Foo()
for k, v in zip(ks, vals):
setattr(vars, k, v)
logging.error('solver_vars: %s', vars)
return vars
def process_solver_str(solver_str):
solver = utils.Foo(
seed=0, learning_rate_decay=None, clip_gradient_norm=None, max_steps=None,
initial_learning_rate=None, momentum=None, steps_per_decay=None,
logdir=None, sync=False, adjust_lr_sync=True, wt_decay=0.0001,
data_loss_wt=None, reg_loss_wt=None, freeze_conv=True, num_workers=1,
task=0, ps_tasks=0, master='local', typ=None, momentum2=None,
adam_eps=None)
# Clobber with overrides from solver str.
solver_vars = get_solver_vars(solver_str)
solver.data_loss_wt = float(solver_vars.dlw[3:].replace('x', '.'))
solver.adam_eps = float(solver_vars.adam_eps[4:].replace('x', '.').replace('n', '-'))
solver.initial_learning_rate = float(solver_vars.init_lr[2:].replace('x', '.').replace('n', '-'))
solver.reg_loss_wt = float(solver_vars.rlw[3:].replace('x', '.'))
solver.isd_k = float(solver_vars.isdk[4:].replace('x', '.'))
long = solver_vars.long
if long == 'long':
solver.steps_per_decay = 40000
solver.max_steps = 120000
elif long == 'long2':
solver.steps_per_decay = 80000
solver.max_steps = 120000
elif long == 'nolong' or long == 'nol':
solver.steps_per_decay = 20000
solver.max_steps = 60000
else:
logging.fatal('solver_vars.long should be long, long2, nolong or nol.')
assert(False)
clip = solver_vars.clip
if clip == 'noclip' or clip == 'nocl':
solver.clip_gradient_norm = 0
elif clip[:4] == 'clip':
solver.clip_gradient_norm = float(clip[4:].replace('x', '.'))
else:
logging.fatal('Unknown solver_vars.clip: %s', clip)
assert(False)
typ = solver_vars.typ
if typ == 'adam':
solver.typ = 'adam'
solver.momentum = 0.9
solver.momentum2 = 0.999
solver.learning_rate_decay = 1.0
elif typ == 'adam2':
solver.typ = 'adam'
solver.momentum = 0.9
solver.momentum2 = 0.999
solver.learning_rate_decay = 0.1
elif typ == 'sgd':
solver.typ = 'sgd'
solver.momentum = 0.99
solver.momentum2 = None
solver.learning_rate_decay = 0.1
else:
logging.fatal('Unknown solver_vars.typ: %s', typ)
assert(False)
logging.error('solver: %s', solver)
return solver
def get_navtask_vars(navtask_str):
if navtask_str == '': vals = []
else: vals = navtask_str.split('_')
ks_all = ['dataset_name', 'modality', 'task', 'history', 'max_dist',
'num_steps', 'step_size', 'n_ori', 'aux_views', 'data_aug']
ks = ks_all[:len(vals)]
# All data or not.
if len(vals) == 0: ks.append('dataset_name'); vals.append('sbpd')
# modality
if len(vals) == 1: ks.append('modality'); vals.append('rgb')
# semantic task?
if len(vals) == 2: ks.append('task'); vals.append('r2r')
# number of history frames.
if len(vals) == 3: ks.append('history'); vals.append('h0')
# max steps
if len(vals) == 4: ks.append('max_dist'); vals.append('32')
# num steps
if len(vals) == 5: ks.append('num_steps'); vals.append('40')
# step size
if len(vals) == 6: ks.append('step_size'); vals.append('8')
# n_ori
if len(vals) == 7: ks.append('n_ori'); vals.append('4')
# Auxiliary views.
if len(vals) == 8: ks.append('aux_views'); vals.append('nv0')
# Normal data augmentation as opposed to structured data augmentation (if set
# to straug.
if len(vals) == 9: ks.append('data_aug'); vals.append('straug')
assert(len(vals) == 10)
for i in range(len(ks)):
assert(ks[i] == ks_all[i])
vars = utils.Foo()
for k, v in zip(ks, vals):
setattr(vars, k, v)
logging.error('navtask_vars: %s', vals)
return vars
def process_navtask_str(navtask_str):
navtask = nec.nav_env_base_config()
# Clobber with overrides from strings.
navtask_vars = get_navtask_vars(navtask_str)
navtask.task_params.n_ori = int(navtask_vars.n_ori)
navtask.task_params.max_dist = int(navtask_vars.max_dist)
navtask.task_params.num_steps = int(navtask_vars.num_steps)
navtask.task_params.step_size = int(navtask_vars.step_size)
navtask.task_params.data_augment.delta_xy = int(navtask_vars.step_size)/2.
n_aux_views_each = int(navtask_vars.aux_views[2])
aux_delta_thetas = np.concatenate((np.arange(n_aux_views_each) + 1,
-1 -np.arange(n_aux_views_each)))
aux_delta_thetas = aux_delta_thetas*np.deg2rad(navtask.camera_param.fov)
navtask.task_params.aux_delta_thetas = aux_delta_thetas
if navtask_vars.data_aug == 'aug':
navtask.task_params.data_augment.structured = False
elif navtask_vars.data_aug == 'straug':
navtask.task_params.data_augment.structured = True
else:
logging.fatal('Unknown navtask_vars.data_aug %s.', navtask_vars.data_aug)
assert(False)
navtask.task_params.num_history_frames = int(navtask_vars.history[1:])
navtask.task_params.n_views = 1+navtask.task_params.num_history_frames
navtask.task_params.goal_channels = int(navtask_vars.n_ori)
if navtask_vars.task == 'hard':
navtask.task_params.type = 'rng_rejection_sampling_many'
navtask.task_params.rejection_sampling_M = 2000
navtask.task_params.min_dist = 10
elif navtask_vars.task == 'r2r':
navtask.task_params.type = 'room_to_room_many'
elif navtask_vars.task == 'ST':
# Semantic task at hand.
navtask.task_params.goal_channels = \
len(navtask.task_params.semantic_task.class_map_names)
navtask.task_params.rel_goal_loc_dim = \
len(navtask.task_params.semantic_task.class_map_names)
navtask.task_params.type = 'to_nearest_obj_acc'
else:
logging.fatal('navtask_vars.task: should be hard or r2r, ST')
assert(False)
if navtask_vars.modality == 'rgb':
navtask.camera_param.modalities = ['rgb']
navtask.camera_param.img_channels = 3
elif navtask_vars.modality == 'd':
navtask.camera_param.modalities = ['depth']
navtask.camera_param.img_channels = 2
navtask.task_params.img_height = navtask.camera_param.height
navtask.task_params.img_width = navtask.camera_param.width
navtask.task_params.modalities = navtask.camera_param.modalities
navtask.task_params.img_channels = navtask.camera_param.img_channels
navtask.task_params.img_fov = navtask.camera_param.fov
navtask.dataset = factory.get_dataset(navtask_vars.dataset_name)
return navtask
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .utils import s2n_motorola, s2n_intel, Ratio, get_logger, make_string
from .tags import *
import makernote
import sys
import getopt
import logging
import timeit
import struct
import re
__version__ = '0.2.0'
logger = get_logger()
try:
basestring
except NameError:
basestring = str
class IfdTag(object):
"""
Eases dealing with tags.
"""
def __init__(self, printable, tag, field_type, values, field_offset,
field_length):
self._printable = printable
self._tag = tag
self._field_type = field_type
self._field_length = field_length
self._field_offset = field_offset
self._values = values
def __str__(self):
return self._printable
def __repr__(self):
try:
s = '(0x%04X) %s=%s @ %d' % (self._tag,
FIELD_TYPES[self._field_type][2],
self._printable,
self._field_offset)
except:
s = '(%s) %s=%s @ %s' % (str(self._tag),
FIELD_TYPES[self._field_type][2],
self._printable,
str(self._field_offset))
return s
class ExifHeader(object):
"""
Handle an EXIF header.
"""
def __init__(self, filename, endian, offset, fake_exif, strict,
debug=False, detailed=True):
self._file = filename
self._endian = endian
self._offset = offset
self._fake_exif = fake_exif
self._debug = debug
self._detailed = detailed
self._tags = {}
def s2n(self, offset, length, signed=0):
"""
Convert slice to integer, base on sign and endian flags.
Usually this offset is assumed to be relative to the beginning of
the start of the EXIF information.
For some cameras that use relative tags, this offset may be
relative to some other starting point.
"""
self._file.seek(self._offset + offset)
sliced = self._file.read(length)
if self._endian == 'I':
val = s2n_intel(sliced)
else:
val = s2n_motorola(sliced)
if signed:
msb = 1 << (8 * length - 1)
if val & msb:
val -= (msb << 1)
return val
def n2s(self, offset, length):
"""
Convert offset to string.
"""
s = ''
for i in range(length):
if self._endian == 'I':
s += chr(offset & 0xFF)
else:
s = chr(offset & 0xFF) + s
offset >>= 8
return s
def _first_ifd(self):
"""
Return first IFD.
"""
return self.s2n(4, 4)
def _next_ifd(self, ifd):
"""
Returns the pointer to next IFD.
"""
entries = self.s2n(ifd, 2)
next_ifd = self.s2n(ifd + 2 + 12 * entries, 4)
if next_ifd == ifd:
return 0
else:
return next_ifd
def list_ifd(self):
"""
Return the list of IFDs in the header.
"""
i = self._first_ifd()
ifds = []
while i:
ifds.append(i)
i = self._next_ifd(i)
return ifds
def dump_ifd(self, ifd, ifd_name, tag_dict=EXIF_TAGS, relative=0,
stop_tag=DEFAULT_STOP_TAG):
"""
Return a list of entries in the given IFD.
"""
# make sure we can process the entries
try:
entries = self.s2n(ifd, 2)
except TypeError:
logger.warning("Possibly corrupted IFD: %s" % ifd)
return
for i in range(entries):
# entry is index of start of this IFD in the file
entry = ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
# get tag name early to avoid errors, help debug
tag_entry = tag_dict.get(tag)
if tag_entry:
tag_name = tag_entry[0]
else:
tag_name = 'Tag 0x%04X' % tag
# ignore certain tags for faster processing
if not (not self._detailed and tag in IGNORE_TAGS):
field_type = self.s2n(entry + 2, 2)
# unknown field type
if not 0 < field_type < len(FIELD_TYPES):
if not self.strict:
continue
else:
raise ValueError('Unknown type %d in tag 0x%04X' %
(field_type, tag))
type_length = FIELD_TYPES[field_type][0]
count = self.s2n(entry + 4, 4)
# Adjust for tag id/type/count (2+2+4 bytes)
# Now we point at either the data or the 2nd level offset
offset = entry + 8
# If the value fits in 4 bytes, it is inlined, else we
# need to jump ahead again.
if count * type_length > 4:
# offset is not the value; it's a pointer to the value
# if relative we set things up so s2n will seek to the
# right place when it adds self._offset. Note that this
# 'relative' is for the Nikon type 3 makernote. Other
# cameras may use other relative offsets, which would
# have to be computed here slightly differently.
if relative:
tmp_offset = self.s2n(offset, 4)
offset = tmp_offset + ifd - 8
if self._fake_exif:
offset += 18
else:
offset = self.s2n(offset, 4)
field_offset = offset
values = None
if field_type == 2:
# special case: null-terminated ASCII string
# XXX investigate
# sometimes gets too big to fit in int value
# 2E31 is hardware dependant. --gd
if count != 0: # and count < (2**31):
file_position = self._offset + offset
try:
self._file.seek(file_position)
values = self._file.read(count)
# Drop any garbage after a null.
values = values.split(b'\x00', 1)[0]
if isinstance(values, bytes):
try:
values = values.decode("utf-8")
except UnicodeDecodeError:
logger.warning("Possibly corrupted field %s in %s IFD",
tag_name, ifd_name)
except OverflowError:
logger.warn('OverflowError at position: %s, length: %s',
file_position, count)
values = ''
except MemoryError:
logger.warn('MemoryError at position: %s, length: %s',
file_position, count)
values = ''
else:
values = []
signed = (field_type in [6, 8, 9, 10])
# XXX investigate
# some entries get too big to handle could be malformed
# file or problem with self.s2n
if count < 1000:
for dummy in range(count):
if field_type in (5, 10):
# a ratio
value = Ratio(self.s2n(offset, 4, signed),
self.s2n(offset + 4, 4, signed))
else:
value = self.s2n(offset, type_length, signed)
values.append(value)
offset = offset + type_length
# The test above causes problems with tags that are
# supposed to have long values! Fix up one important case.
elif tag_name in ('MakerNote',
makernote.canon.CAMERA_INFO_TAG_NAME):
for dummy in range(count):
value = self.s2n(offset, type_length, signed)
values.append(value)
offset = offset + type_length
# now 'values' is either a string or an array
if count == 1 and field_type != 2:
printable = str(values[0])
elif count > 50 and len(values) > 20 and not isinstance(values, basestring):
printable = str(values[0:20])[0:-1] + ", ... ]"
else:
try:
printable = str(values)
except UnicodeEncodeError:
printable = unicode(values)
# compute printable version of values
if tag_entry:
# optional 2nd tag element is present
if len(tag_entry) != 1:
if callable(tag_entry[1]):
# call mapping function
printable = tag_entry[1](values)
elif type(tag_entry[1]) is tuple:
ifd_info = tag_entry[1]
try:
logger.debug('%s SubIFD at offset %d:',
ifd_info[0], values[0])
self.dump_ifd(values[0], ifd_info[0],
tag_dict=ifd_info[1],
stop_tag=stop_tag)
except IndexError:
logger.warn('No values found for %s SubIFD',
ifd_info[0])
else:
printable = ''
for i in values:
# use lookup table for this tag
printable += tag_entry[1].get(i, repr(i))
self._tags[ifd_name + ' ' + tag_name] = IfdTag(printable, tag,
field_type,
values,
field_offset,
count * type_length)
try:
tag_value = repr(self._tags[ifd_name + ' ' + tag_name])
# fix for python2's handling of unicode values
except UnicodeEncodeError:
tag_value = unicode(self._tags[ifd_name + ' ' + tag_name])
logger.debug(' %s: %s', tag_name, tag_value)
if tag_name == stop_tag:
break
def extract_tiff_thumbnail(self, thumb_ifd):
"""
Extract uncompressed TIFF thumbnail.
Take advantage of the pre-existing layout in the thumbnail IFD as
much as possible
"""
thumb = self._tags.get('Thumbnail Compression')
if not thumb or thumb._printable != 'Uncompressed TIFF':
return
entries = self.s2n(thumb_ifd, 2)
# this is header plus offset to IFD ...
if self._endian == 'M':
tiff = 'MM\x00*\x00\x00\x00\x08'
else:
tiff = 'II*\x00\x08\x00\x00\x00'
# ... plus thumbnail IFD data plus a null "next IFD" pointer
self._file.seek(self._offset + thumb_ifd)
tiff += self._file.read(entries * 12 + 2) + '\x00\x00\x00\x00'
# fix up large value offset pointers into data area
for i in range(entries):
entry = thumb_ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
field_type = self.s2n(entry + 2, 2)
type_length = FIELD_TYPES[field_type][0]
count = self.s2n(entry + 4, 4)
old_offset = self.s2n(entry + 8, 4)
# start of the 4-byte pointer area in entry
ptr = i * 12 + 18
# remember strip offsets location
if tag == 0x0111:
strip_off = ptr
strip_len = count * type_length
# is it in the data area?
if count * type_length > 4:
# update offset pointer (nasty "strings are immutable" crap)
# should be able to say "tiff[ptr:ptr+4]=newoff"
newoff = len(tiff)
tiff = tiff[:ptr] + self.n2s(newoff, 4) + tiff[ptr + 4:]
# remember strip offsets location
if tag == 0x0111:
strip_off = newoff
strip_len = 4
# get original data and store it
self._file.seek(self._offset + old_offset)
tiff += self._file.read(count * type_length)
# add pixel strips and update strip offset info
old_offsets = self._tags['Thumbnail StripOffsets'].values
old_counts = self._tags['Thumbnail StripByteCounts'].values
for i in range(len(old_offsets)):
# update offset pointer (more nasty "strings are immutable" crap)
offset = self.n2s(len(tiff), strip_len)
tiff = tiff[:strip_off] + offset + tiff[strip_off + strip_len:]
strip_off += strip_len
# add pixel strip to end
self._file.seek(self._offset + old_offsets[i])
tiff += self._file.read(old_counts[i])
self._tags['TIFFThumbnail'] = tiff
def extract_jpeg_thumbnail(self):
"""
Extract JPEG thumbnail.
(Thankfully the JPEG data is stored as a unit.)
"""
thumb_offset = self._tags.get('Thumbnail JPEGInterchangeFormat')
if thumb_offset:
self._file.seek(self._offset + thumb_offset._values[0])
size = self._tags['Thumbnail JPEGInterchangeFormatLength']._values[0]
self._tags['JPEGThumbnail'] = self._file.read(size)
# Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote
# since it's not allowed in a uncompressed TIFF IFD
if 'JPEGThumbnail' not in self._tags:
thumb_offset = self._tags.get('MakerNote JPEGThumbnail')
if thumb_offset:
self._file.seek(self._offset + thumb_offset._values[0])
self._tags['JPEGThumbnail'] = self._file.read(thumb_offset._field_length)
def decode_maker_note(self):
"""
Decode all the camera-specific MakerNote formats
Note is the data that comprises this MakerNote.
The MakerNote will likely have pointers in it that point to other
parts of the file. We'll use self._offset as the starting point for
most of those pointers, since they are relative to the beginning
of the file.
If the MakerNote is in a newer format, it may use relative addressing
within the MakerNote. In that case we'll use relative addresses for
the pointers.
As an aside: it's not just to be annoying that the manufacturers use
relative offsets. It's so that if the makernote has to be moved by the
picture software all of the offsets don't have to be adjusted.
Overall, this is probably the right strategy for makernotes, though the
spec is ambiguous.
The spec does not appear to imagine that makernotes would
follow EXIF format internally. Once they did, it's ambiguous whether
the offsets should be from the header at the start of all the EXIF
info, or from the header at the start of the makernote.
"""
note = self._tags['EXIF MakerNote']
# Some apps use MakerNote tags but do not use a format for which we
# have a description, so just do a raw dump for these.
make = self._tags['Image Make']._printable
# Nikon
# The maker note usually starts with the word Nikon, followed by the
# type of the makernote (1 or 2, as a short). If the word Nikon is
# not at the start of the makernote, it's probably type 2, since some
# cameras work that way.
if 'NIKON' in make:
if note._values[0:7] == [78, 105, 107, 111, 110, 0, 1]:
logger.debug("Looks like a type 1 Nikon MakerNote.")
self.dump_ifd(note._field_offset + 8, 'MakerNote',
tag_dict=makernote.nikon.TAGS_OLD)
elif note._values[0:7] == [78, 105, 107, 111, 110, 0, 2]:
logger.debug("Looks like a labeled type 2 Nikon MakerNote")
if note._values[12:14] != [0, 42] and note._values[12:14] != [42, 0]:
raise ValueError("Missing marker tag '42' in MakerNote.")
# skip the Makernote label and the TIFF header
self.dump_ifd(note._field_offset + 10 + 8, 'MakerNote',
tag_dict=makernote.nikon.TAGS_NEW, relative=1)
else:
# E99x or D1
logger.debug("Looks like an unlabeled type 2 Nikon MakerNote")
self.dump_ifd(note._field_offset, 'MakerNote',
tag_dict=makernote.nikon.TAGS_NEW)
return
# Olympus
if make.startswith('OLYMPUS'):
self.dump_ifd(note._field_offset + 8, 'MakerNote',
tag_dict=makernote.olympus.TAGS)
# TODO
# for i in (('MakerNote Tag 0x2020', makernote.OLYMPUS_TAG_0x2020)):
# self.decode_olympus_tag(self._tags[i[0]].values, i[1])
# return
# Casio
if 'CASIO' in make or 'Casio' in make:
self.dump_ifd(note._field_offset, 'MakerNote',
tag_dict=makernote.casio.TAGS)
return
# Fujifilm
if make == 'FUJIFILM':
# bug: everything else is "Motorola" endian, but the MakerNote
# is "Intel" endian
endian = self._endian
self._endian = 'I'
# bug: IFD offsets are from beginning of MakerNote, not
# beginning of file header
offset = self._offset
self._offset += note._field_offset
# process note with bogus values (note is actually at offset 12)
self.dump_ifd(12, 'MakerNote', tag_dict=makernote.fujifilm.TAGS)
# reset to correct values
self._endian = endian
self._offset = offset
return
# Apple
if (make == 'Apple' and note.values[0:10] == [
65, 112, 112, 108, 101, 32, 105, 79, 83, 0
]):
t = self._offset
self._offset += note._field_offset+14
self.dump_ifd(0, 'MakerNote',
tag_dict=makernote.apple.TAGS)
self._offset = t
return
# Canon
if make == 'Canon':
self.dump_ifd(note._field_offset, 'MakerNote',
tag_dict=makernote.canon.TAGS)
for i in (('MakerNote Tag 0x0001', makernote.canon.CAMERA_SETTINGS),
('MakerNote Tag 0x0002', makernote.canon.FOCAL_LENGTH),
('MakerNote Tag 0x0004', makernote.canon.SHOT_INFO),
('MakerNote Tag 0x0026', makernote.canon.AF_INFO_2),
('MakerNote Tag 0x0093', makernote.canon.FILE_INFO)):
if i[0] in self._tags:
logger.debug('Canon ' + i[0])
self._canon_decode_tag(self._tags[i[0]].values, i[1])
del self._tags[i[0]]
if makernote.canon.CAMERA_INFO_TAG_NAME in self._tags:
tag = self._tags[makernote.canon.CAMERA_INFO_TAG_NAME]
logger.debug('Canon CameraInfo')
self._canon_decode_camera_info(tag)
del self._tags[makernote.canon.CAMERA_INFO_TAG_NAME]
return
def _olympus_decode_tag(self, value, mn_tags):
""" TODO Decode Olympus MakerNote tag based on offset within tag."""
pass
def _canon_decode_tag(self, value, mn_tags):
"""
Decode Canon MakerNote tag based on offset within tag.
See http://www.burren.cx/david/canon.html by David Burren
"""
for i in range(1, len(value)):
tag = mn_tags.get(i, ('Unknown', ))
name = tag[0]
if len(tag) > 1:
val = tag[1].get(value[i], 'Unknown')
else:
val = value[i]
try:
logger.debug(" %s %s %s", i, name, hex(value[i]))
except TypeError:
logger.debug(" %s %s %s", i, name, value[i])
# it's not a real IFD Tag but we fake one to make everybody
# happy. this will have a "proprietary" type
self._tags['MakerNote ' + name] = IfdTag(str(val), None, 0, None,
None, None)
def _canon_decode_camera_info(self, camera_info_tag):
"""
Decode the variable length encoded camera info section.
"""
model = self._tags.get('Image Model', None)
if not model:
return
model = str(model.values)
camera_info_tags = None
for (model_name_re, tag_desc) in makernote.canon.CAMERA_INFO_MODEL_MAP.items():
if re.search(model_name_re, model):
camera_info_tags = tag_desc
break
else:
return
# We are assuming here that these are all unsigned bytes (Byte or
# Unknown)
if camera_info_tag.field_type not in (1, 7):
return
camera_info = struct.pack('<%dB' % len(camera_info_tag.values),
*camera_info_tag.values)
# Look for each data value and decode it appropriately.
for offset, tag in camera_info_tags.items():
tag_format = tag[1]
tag_size = struct.calcsize(tag_format)
if len(camera_info) < offset + tag_size:
continue
packed_tag_value = camera_info[offset:offset + tag_size]
tag_value = struct.unpack(tag_format, packed_tag_value)[0]
tag_name = tag[0]
if len(tag) > 2:
if callable(tag[2]):
tag_value = tag[2](tag_value)
else:
tag_value = tag[2].get(tag_value, tag_value)
logger.debug(" %s %s", tag_name, tag_value)
self._tags['MakerNote ' + tag_name] = IfdTag(str(tag_value), None,
0, None, None, None)
def parse_xmp(self, xmp_string):
import xml.dom.minidom
logger.debug('XMP cleaning data')
xml = xml.dom.minidom.parseString(xmp_string)
pretty = xml.toprettyxml()
cleaned = []
for line in pretty.splitlines():
if line.strip():
cleaned.append(line)
self._tags['Image ApplicationNotes'] = IfdTag('\n'.join(cleaned), None,
1, None, None, None)
def increment_base(data, base):
return ord(data[base + 2]) * 256 + ord(data[base + 3]) + 2
def process_file(f, stop_tag=DEFAULT_STOP_TAG, details=True, strict=False, debug=False):
"""
Process an image file (expects an open file object).
This is the function that has to deal with all the arbitrary nasty bits
of the EXIF standard.
"""
# by default do not fake an EXIF beginning
fake_exif = 0
# determine whether it's a JPEG or TIFF
data = f.read(12)
if data[0:4] in [b'II*\x00', b'MM\x00*']:
# it's a TIFF file
logger.debug("TIFF format recognized in data[0:4]")
f.seek(0)
endian = f.read(1)
f.read(1)
offset = 0
elif data[0:2] == b'\xFF\xD8':
# it's a JPEG file
logger.debug("JPEG format recognized data[0:2]=0x%X%X", ord(data[0]),
ord(data[1]))
base = 2
logger.debug("data[2]=0x%X data[3]=0x%X data[6:10]=%s",
ord(data[2]), ord(data[3]), data[6:10])
while ord(data[2]) == 0xFF and data[6:10] in (b'JFIF', b'JFXX',
b'OLYM', b'Phot'):
length = ord(data[4]) * 256 + ord(data[5])
logger.debug(" Length offset is %s", length)
f.read(length - 8)
# fake an EXIF beginning of file
# I don't think this is used. --gd
data = b'\xFF\x00' + f.read(10)
fake_exif = 1
if base > 2:
logger.debug(" Added to base")
base = base + length + 4 - 2
else:
logger.debug(" Added to zero")
base = length + 4
logger.debug(" Set segment base to 0x%X", base)
# Big ugly patch to deal with APP2 (or other) data coming before APP1
f.seek(0)
# in theory, this could be insufficient since 64K is the maximum size--gd
data = f.read(base + 4000)
# base = 2
while 1:
logger.debug(" Segment base 0x%X", base)
if data[base:base + 2] == b'\xFF\xE1':
# APP1
logger.debug(" APP1 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord(data[base + 2]),
ord(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
if data[base + 4:base + 8] == b"Exif":
logger.debug(" Decrement base by 2 to get to pre-segment header (for compatibility with later code)")
base -= 2
break
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xE0':
# APP0
logger.debug(" APP0 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord(data[base + 2]),
ord(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xE2':
# APP2
logger.debug(" APP2 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord(data[base + 2]),
ord(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xEE':
# APP14
logger.debug(" APP14 Adobe segment at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord(data[base + 2]),
ord(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
logger.debug(" There is useful EXIF-like data here, but we "
"have no parser for it.")
elif data[base:base + 2] == b'\xFF\xDB':
logger.debug(" JPEG image data at base 0x%X No more segments "
"are expected.",
base)
break
elif data[base:base + 2] == b'\xFF\xD8':
# APP12
logger.debug(" FFD8 segment at base 0x%X", base)
logger.debug(" Got 0x%X 0x%X and %s instead",
ord(data[base]),
ord(data[base + 1]),
data[4 + base:10 + base])
logger.debug(" Length: 0x%X 0x%X", ord(data[base + 2]),
ord(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xEC':
# APP12
logger.debug(" APP12 XMP (Ducky) or Pictureinfo segment at "
"base 0x%X",
base)
logger.debug(" Got 0x%X and 0x%X instead", ord(data[base]),
ord(data[base + 1]))
logger.debug(" Length: 0x%X 0x%X",
ord(data[base + 2]),
ord(data[base + 3]))
logger.debug("Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
logger.debug(
" There is useful EXIF-like data here (quality, comment, "
"copyright), but we have no parser for it.")
else:
try:
increment = increment_base(data, base)
logger.debug(" Got 0x%X and 0x%X instead",
ord(data[base]),
ord(data[base + 1]))
except IndexError:
logger.debug(" Unexpected/unhandled segment type or file"
" content.")
return {}
else:
logger.debug(" Increment base by %s", increment)
base += increment
f.seek(base + 12)
if ord(data[2 + base]) == 0xFF and data[6 + base:10 + base] == b'Exif':
# detected EXIF header
offset = f.tell()
endian = f.read(1)
# HACK TEST: endian = 'M'
elif ord(data[2 + base]) == 0xFF and data[6 + base:10 + base + 1] == b'Ducky':
# detected Ducky header.
logger.debug("EXIF-like header (normally 0xFF and code): 0x%X and"
" %s", ord(data[2 + base]), data[6 + base:10 + base + 1])
offset = f.tell()
endian = f.read(1)
elif ord(data[2 + base]) == 0xFF and data[6 + base:10 + base + 1] == b'Adobe':
# detected APP14 (Adobe)
logger.debug("EXIF-like header (normally 0xFF and code): 0x%X and "
"%s", ord(data[2 + base]), data[6 + base:10 + base + 1])
offset = f.tell()
endian = f.read(1)
else:
# no EXIF information
logger.debug("No EXIF header expected data[2+base]==0xFF and "
"data[6+base:10+base]===Exif (or Duck)")
logger.debug("Did get 0x%X and %s",
ord(data[2 + base]), data[6 + base:10 + base + 1])
return {}
else:
# file format not recognized
logger.debug("File format not recognized.")
return {}
endian = chr(ord(endian[0]))
# deal with the EXIF info we found
logger.debug("Endian format is %s (%s)", endian, {
'I': 'Intel',
'M': 'Motorola',
'\x01': 'Adobe Ducky',
'd': 'XMP/Adobe unknown'
}[endian])
hdr = ExifHeader(f, endian, offset, fake_exif, strict, debug, details)
ifd_list = hdr.list_ifd()
thumb_ifd = False
ctr = 0
for ifd in ifd_list:
if ctr == 0:
ifd_name = 'Image'
elif ctr == 1:
ifd_name = 'Thumbnail'
thumb_ifd = ifd
else:
ifd_name = 'IFD %d' % ctr
logger.debug('IFD %d (%s) at offset %s:', ctr, ifd_name, ifd)
hdr.dump_ifd(ifd, ifd_name, stop_tag=stop_tag)
ctr += 1
# EXIF IFD
exif_off = hdr._tags.get('Image ExifOffset')
if exif_off:
logger.debug('Exif SubIFD at offset %s:', exif_off._values[0])
hdr.dump_ifd(exif_off._values[0], 'EXIF', stop_tag=stop_tag)
# deal with MakerNote contained in EXIF IFD
# (Some apps use MakerNote tags but do not use a format for which we
# have a description, do not process these).
if details and 'EXIF MakerNote' in hdr._tags and 'Image Make' in hdr._tags:
hdr.decode_maker_note()
# extract thumbnails
if details and thumb_ifd:
hdr.extract_tiff_thumbnail(thumb_ifd)
hdr.extract_jpeg_thumbnail()
# parse XMP tags (experimental)
if debug and details:
xmp_string = b''
# Easy we already have them
if 'Image ApplicationNotes' in hdr.tags:
logger.debug('XMP present in Exif')
xmp_string = make_string(hdr.tags['Image ApplicationNotes'].values)
# We need to look in the entire file for the XML
else:
logger.debug('XMP not in Exif, searching file for XMP info...')
xml_started = False
xml_finished = False
for line in f:
open_tag = line.find(b'<x:xmpmeta')
close_tag = line.find(b'</x:xmpmeta>')
if open_tag != -1:
xml_started = True
line = line[open_tag:]
logger.debug('XMP found opening tag at line '
'position %s' % open_tag)
if close_tag != -1:
logger.debug('XMP found closing tag at line '
'position %s' % close_tag)
line_offset = 0
if open_tag != -1:
line_offset = open_tag
line = line[:(close_tag - line_offset) + 12]
xml_finished = True
if xml_started:
xmp_string += line
if xml_finished:
break
logger.debug('XMP Finished searching for info')
if xmp_string:
hdr.parse_xmp(xmp_string)
return hdr._tags
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase, mock
from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
API_VERSION = "v1"
GCP_CONN_ID = "google_cloud_default"
class TestGoogleDisplayVideo360Hook(TestCase):
def setUp(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleDisplayVideo360Hook(gcp_conn_id=GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook._authorize"
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks." "display_video.build"
)
def test_gen_conn(self, mock_build, mock_authorize):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
"doubleclickbidmanager",
API_VERSION,
http=mock_authorize.return_value,
cache_discovery=False,
)
self.assertEqual(mock_build.return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook._authorize"
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks." "display_video.build"
)
def test_get_conn_to_display_video(self, mock_build, mock_authorize):
result = self.hook.get_conn_to_display_video()
mock_build.assert_called_once_with(
"displayvideo",
API_VERSION,
http=mock_authorize.return_value,
cache_discovery=False,
)
self.assertEqual(mock_build.return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_create_query(self, get_conn_mock):
body = {"body": "test"}
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.createquery.return_value.execute.return_value = (
return_value
)
result = self.hook.create_query(query=body)
get_conn_mock.return_value.queries.return_value.createquery.assert_called_once_with(
body=body
)
self.assertEqual(return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_delete_query(self, get_conn_mock):
query_id = "QUERY_ID"
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.deletequery.return_value.execute.return_value = (
return_value
)
self.hook.delete_query(query_id=query_id)
get_conn_mock.return_value.queries.return_value.deletequery.assert_called_once_with(
queryId=query_id
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_get_query(self, get_conn_mock):
query_id = "QUERY_ID"
return_value = "TEST"
get_conn_mock.return_value.queries.return_value.getquery.return_value.execute.return_value = (
return_value
)
result = self.hook.get_query(query_id=query_id)
get_conn_mock.return_value.queries.return_value.getquery.assert_called_once_with(
queryId=query_id
)
self.assertEqual(return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_list_queries(self, get_conn_mock):
queries = ["test"]
return_value = {"queries": queries}
get_conn_mock.return_value.queries.return_value.listqueries.return_value.execute.return_value = (
return_value
)
result = self.hook.list_queries()
get_conn_mock.return_value.queries.return_value.listqueries.assert_called_once_with()
self.assertEqual(queries, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_run_query(self, get_conn_mock):
query_id = "QUERY_ID"
params = {"params": "test"}
self.hook.run_query(query_id=query_id, params=params)
get_conn_mock.return_value.queries.return_value.runquery.assert_called_once_with(
queryId=query_id, body=params
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_download_line_items_should_be_called_once(self, get_conn_mock):
request_body = {
"filterType": "filter_type",
"filterIds": [],
"format": "format",
"fileSpec": "file_spec"
}
self.hook.download_line_items(request_body=request_body)
get_conn_mock.return_value\
.lineitems.return_value\
.downloadlineitems.assert_called_once()
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_download_line_items_should_be_called_with_params(self, get_conn_mock):
request_body = {
"filterType": "filter_type",
"filterIds": [],
"format": "format",
"fileSpec": "file_spec"
}
self.hook.download_line_items(request_body=request_body)
get_conn_mock.return_value \
.lineitems.return_value \
.downloadlineitems.assert_called_once_with(body=request_body)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_download_line_items_should_return_equal_values(self, get_conn_mock):
line_item = ["holy_hand_grenade"]
response = {"lineItems": line_item}
request_body = {
"filterType": "filter_type",
"filterIds": [],
"format": "format",
"fileSpec": "file_spec"
}
get_conn_mock.return_value \
.lineitems.return_value \
.downloadlineitems.return_value.execute.return_value = response
result = self.hook.download_line_items(request_body)
self.assertEqual(line_item, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_upload_line_items_should_be_called_once(self, get_conn_mock):
line_items = ["this", "is", "super", "awesome", "test"]
self.hook.upload_line_items(line_items)
get_conn_mock.return_value \
.lineitems.return_value \
.uploadlineitems.assert_called_once()
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_upload_line_items_should_be_called_with_params(self, get_conn_mock):
line_items = "I spent too much time on this"
request_body = {
"lineItems": line_items,
"dryRun": False,
"format": "CSV",
}
self.hook.upload_line_items(line_items)
get_conn_mock.return_value \
.lineitems.return_value \
.uploadlineitems.assert_called_once_with(body=request_body)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn"
)
def test_upload_line_items_should_return_equal_values(self, get_conn_mock):
line_items = {
"lineItems": "string",
"format": "string",
"dryRun": False
}
return_value = "TEST"
get_conn_mock.return_value \
.lineitems.return_value \
.uploadlineitems.return_value \
.execute.return_value = return_value
result = self.hook.upload_line_items(line_items)
self.assertEqual(return_value, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_create_sdf_download_tasks_called_with_params(
self, get_conn_to_display_video
):
body_request = {
"version": "version",
"partnerId": "partner_id",
"advertiserId": "advertiser_id",
"parentEntityFilter": "parent_entity_filter",
"idFilter": "id_filter",
"inventorySourceFilter": "inventory_source_filter",
}
self.hook.create_sdf_download_operation(body_request=body_request)
get_conn_to_display_video.return_value.sdfdownloadtasks.return_value.create.assert_called_once_with(
body=body_request
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_create_sdf_download_tasks_called_once(self, get_conn_to_display_video):
body_request = {
"version": "version",
"partnerId": "partner_id",
"advertiserId": "advertiser_id",
"parentEntityFilter": "parent_entity_filter",
"idFilter": "id_filter",
"inventorySourceFilter": "inventory_source_filter",
}
self.hook.create_sdf_download_operation(body_request=body_request)
get_conn_to_display_video.return_value.sdfdownloadtasks.return_value.create.assert_called_once()
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_create_sdf_download_tasks_return_equal_values(
self, get_conn_to_display_video
):
response = ["name"]
body_request = {
"version": "version",
"partnerId": "partner_id",
"advertiserId": "advertiser_id",
"parentEntityFilter": "parent_entity_filter",
"idFilter": "id_filter",
"inventorySourceFilter": "inventory_source_filter",
}
get_conn_to_display_video.return_value.\
sdfdownloadtasks.return_value.\
create.return_value\
.execute.return_value = response
result = self.hook.create_sdf_download_operation(body_request=body_request)
self.assertEqual(response, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_get_sdf_download_tasks_called_with_params(self, get_conn_to_display_video):
operation_name = "operation_name"
self.hook.get_sdf_download_operation(operation_name=operation_name)
get_conn_to_display_video.return_value.\
sdfdownloadtasks.return_value.\
operation.return_value.\
get.assert_called_once_with(name=operation_name)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_get_sdf_download_tasks_called_once(self, get_conn_to_display_video):
operation_name = "name"
self.hook.get_sdf_download_operation(operation_name=operation_name)
get_conn_to_display_video.return_value.\
sdfdownloadtasks.return_value.\
operation.return_value.\
get.assert_called_once()
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def get_sdf_download_tasks_return_equal_values(self, get_conn_to_display_video):
operation_name = "operation"
response = "reposonse"
get_conn_to_display_video.return_value.\
sdfdownloadtasks.return_value.\
operation.return_value.get = response
result = self.hook.get_sdf_download_operation(operation_name=operation_name)
self.assertEqual(operation_name, result)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_download_media_called_once(self, get_conn_to_display_video):
resource_name = "resource_name"
self.hook.download_media(resource_name=resource_name)
get_conn_to_display_video.return_value.\
media.return_value.\
download_media.assert_called_once()
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_download_media_called_once_with_params(self, get_conn_to_display_video):
resource_name = "resource_name"
self.hook.download_media(resource_name=resource_name)
get_conn_to_display_video.return_value.\
media.return_value.\
download_media.assert_called_once_with(resource_name=resource_name)
| |
#-------------------------------------------------------------------------------
#
#
# Written by: David C. Morrill (based on similar routines written by Eric Jones)
#
# Date: 2007-05-01
#
# (c) Copyright 2002-7 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
""" Tick generator classes and helper functions for calculating axis
tick-related values (i.e., bounds and intervals).
"""
# Major library imports
from numpy import arange, argsort, array, ceil, concatenate, equal, finfo, \
float64, floor, linspace, log10, minimum, ndarray, newaxis, \
putmask, shape
# Enthought library imports
from traits.api import HasTraits, Any
class AbstractTickGenerator(HasTraits):
""" Abstract class for tick generators.
"""
def get_ticks(self, data_low, data_high, bounds_low, bounds_high, interval,
use_endpoints=False, scale='linear'):
""" Returns a list of ticks points in data space.
Parameters
----------
data_low, data_high : float
The actual minimum and maximum of index values of the entire
dataset.
bounds_low, bounds_high : "auto", "fit", float
The range for which ticks should be generated.
interval : "auto", float
If the value is a positive number, it specifies the length
of the tick interval; a negative integer specifies the
number of tick intervals; 'auto' specifies that the number and
length of the tick intervals are automatically calculated, based
on the range of the axis.
use_endpoints : Boolean
If True, the lower and upper bounds of the data are used as the
lower and upper end points of the axis. If False, the end points
might not fall exactly on the bounds.
scale : 'linear' or 'log'
The type of scale the ticks are for.
Returns
-------
tick_list : array of floats
Where ticks are to be placed.
Example
-------
If the range of x-values in a line plot span from -15.0 to +15.0, but
the plot is currently displaying only the region from 3.1 to 6.83, and
the user wants the interval to be automatically computed to be some
nice value, then call get_ticks() thusly::
get_ticks(-15.0, 15.0, 3.1, 6.83, "auto")
A reasonable return value in this case would be::
[3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5]
"""
raise NotImplementedError
class DefaultTickGenerator(AbstractTickGenerator):
""" An implementation of AbstractTickGenerator that simply uses the
auto_ticks() and log_auto_ticks() functions.
"""
def get_ticks(self, data_low, data_high, bounds_low,
bounds_high, interval, use_endpoints=False,
scale='linear'):
if scale == 'linear':
return array(auto_ticks(data_low, data_high, bounds_low, bounds_high,
interval, use_endpoints=False), float64)
elif scale == 'log':
return array(log_auto_ticks(data_low, data_high, bounds_low, bounds_high,
interval, use_endpoints=False), float64)
class ShowAllTickGenerator(AbstractTickGenerator):
""" Uses the abstract interface, but returns all "positions" instead
of decimating the ticks.
You must provide a sequence of values as a *positions* keyword argument
to the constructor.
"""
# A sequence of positions for ticks.
positions = Any
def get_ticks(self, data_low, data_high, bounds_low, bounds_high, interval,
use_endpoints=False, scale='linear'):
""" Returns an array based on **positions**.
"""
# ignore all the high, low, etc. data and just return every position
return array(self.positions, float64)
#-------------------------------------------------------------------------------
# Code imported from plt/plot_utility.py:
#-------------------------------------------------------------------------------
def auto_ticks ( data_low, data_high, bound_low, bound_high, tick_interval,
use_endpoints = True):
""" Finds locations for axis tick marks.
Calculates the locations for tick marks on an axis. The *bound_low*,
*bound_high*, and *tick_interval* parameters specify how the axis end
points and tick interval are calculated.
Parameters
----------
data_low, data_high : number
The minimum and maximum values of the data along this axis.
If any of the bound settings are 'auto' or 'fit', the axis
traits are calculated automatically from these values.
bound_low, bound_high : 'auto', 'fit', or a number.
The lower and upper bounds of the axis. If the value is a number,
that value is used for the corresponding end point. If the value is
'auto', then the end point is calculated automatically. If the
value is 'fit', then the axis bound is set to the corresponding
*data_low* or *data_high* value.
tick_interval : can be 'auto' or a number
If the value is a positive number, it specifies the length
of the tick interval; a negative integer specifies the
number of tick intervals; 'auto' specifies that the number and
length of the tick intervals are automatically calculated, based
on the range of the axis.
use_endpoints : Boolean
If True, the lower and upper bounds of the data are used as the
lower and upper end points of the axis. If False, the end points
might not fall exactly on the bounds.
Returns
-------
An array of tick mark locations. The first and last tick entries are the
axis end points.
"""
is_auto_low = (bound_low == 'auto')
is_auto_high = (bound_high == 'auto')
if isinstance(bound_low, basestring):
lower = data_low
else:
lower = float( bound_low )
if isinstance(bound_high, basestring):
upper = data_high
else:
upper = float( bound_high )
if (tick_interval == 'auto') or (tick_interval == 0.0):
rng = abs( upper - lower )
if rng == 0.0:
tick_interval = 0.5
lower = data_low - 0.5
upper = data_high + 0.5
elif is_base2( rng ) and is_base2( upper ) and rng > 4:
if rng == 2:
tick_interval = 1
elif rng == 4:
tick_interval = 4
else:
tick_interval = rng / 4 # maybe we want it 8?
else:
tick_interval = auto_interval( lower, upper )
elif tick_interval < 0:
intervals = -tick_interval
tick_interval = tick_intervals( lower, upper, intervals )
if is_auto_low and is_auto_high:
is_auto_low = is_auto_high = False
lower = tick_interval * floor( lower / tick_interval )
while ((abs( lower ) >= tick_interval) and
((lower + tick_interval * (intervals - 1)) >= upper)):
lower -= tick_interval
upper = lower + tick_interval * intervals
# If the lower or upper bound are set to 'auto',
# calculate them based on the newly chosen tick_interval:
if is_auto_low or is_auto_high:
delta = 0.01 * tick_interval * (data_low == data_high)
auto_lower, auto_upper = auto_bounds( data_low - delta,
data_high + delta, tick_interval )
if is_auto_low:
lower = auto_lower
if is_auto_high:
upper = auto_upper
# Compute the range of ticks values:
start = floor( lower / tick_interval ) * tick_interval
end = floor( upper / tick_interval ) * tick_interval
# If we return the same value for the upper bound and lower bound, the
# layout code will not be able to lay out the tick marks (divide by zero).
if start == end:
lower = start = start - tick_interval
upper = end = start - tick_interval
if upper > end:
end += tick_interval
ticks = arange( start, end + (tick_interval / 2.0), tick_interval )
if len( ticks ) < 2:
ticks = array( ( ( lower - lower * 1.0e-7 ), lower ) )
if (not is_auto_low) and use_endpoints:
ticks[0] = lower
if (not is_auto_high) and use_endpoints:
ticks[-1] = upper
return [tick for tick in ticks if tick >= bound_low and tick <= bound_high]
#--------------------------------------------------------------------------------
# Determine if a number is a power of 2:
#--------------------------------------------------------------------------------
def is_base2 ( range ):
""" Returns True if *range* is a positive base-2 number (2, 4, 8, 16, ...).
"""
if range <= 0.0:
return False
else:
lg = log2( range )
return ((lg == floor( lg )) and (lg > 0.0))
#--------------------------------------------------------------------------------
# Compute n log 2:
#--------------------------------------------------------------------------------
def log2 ( num ):
""" Returns the base 2 logarithm of a number (or array).
"""
# !! 1e-16 is here to prevent errors when log is 0
if num == 0.0:
num += 1.0e-16
elif type( num ) is ndarray:
putmask( num, equal( num, 0.0), 1.0e-16 )
return log10( num ) / log10( 2 )
#--------------------------------------------------------------------------------
# Compute the best tick interval for a specified data range:
#--------------------------------------------------------------------------------
def heckbert_interval(data_low, data_high, numticks=8):
"""
Returns a "nice" range and interval for a given data range and a preferred
number of ticks. From Paul Heckbert's algorithm in Graphics Gems.
"""
range = _nice(data_high - data_low)
d = _nice(range / (numticks-1), round=True)
graphmin = floor(data_low / d) * d
graphmax = ceil(data_high / d) * d
#nfrac = max(-floor(log10(d)), 0)
return graphmin, graphmax, d
def _nice(x, round=False):
""" if round is False, then use ceil(range) """
expv = floor(log10(x))
f = x / pow(10, expv)
if round:
if f < 1.5:
nf = 1.0
elif f < 3.0:
nf = 2.0
elif f < 7.0:
nf = 5.0;
else:
nf = 10.0
else:
if f <= 1.0:
nf = 1.0
elif f <= 2.0:
nf = 2.0
elif f <= 5.0:
nf = 5.0
else:
nf = 10.0
return nf * pow(10, expv)
def auto_interval ( data_low, data_high ):
""" Calculates the tick interval for a range.
The boundaries for the data to be plotted on the axis are::
data_bounds = (data_low,data_high)
The function chooses the number of tick marks, which can be between
3 and 9 marks (including end points), and chooses tick intervals at
1, 2, 2.5, 5, 10, 20, ...
Returns
-------
interval : float
tick mark interval for axis
"""
range = float( data_high ) - float( data_low )
# We'll choose from between 2 and 8 tick marks.
# Preference is given to more ticks:
# Note reverse order and see kludge below...
divisions = arange( 8.0, 2.0, -1.0 ) # ( 7, 6, ..., 3 )
# Calculate the intervals for the divisions:
candidate_intervals = range / divisions
# Get magnitudes and mantissas for each candidate:
magnitudes = 10.0 ** floor( log10( candidate_intervals ) )
mantissas = candidate_intervals / magnitudes
# List of "pleasing" intervals between ticks on graph.
# Only the first magnitude are listed, higher mags others are inferred:
magic_intervals = array( ( 1.0, 2.0, 2.5, 5.0, 10.0 ) )
# Calculate the absolute differences between the candidates
# (with magnitude removed) and the magic intervals:
differences = abs( magic_intervals[:,newaxis] - mantissas )
# Find the division and magic interval combo that produce the
# smallest differences:
# KLUDGE: 'argsort' doesn't preserve the order of equal values,
# so we subtract a small, index dependent amount from each difference
# to force correct ordering.
sh = shape( differences )
small = 2.2e-16 * arange( sh[1] ) * arange( sh[0] )[:,newaxis]
small = small[::-1,::-1] #reverse the order
differences = differences - small
# ? Numeric should allow keyword "axis" ? comment out for now
#best_mantissa = minimum.reduce(differences,axis=0)
#best_magic = minimum.reduce(differences,axis=-1)
best_mantissa = minimum.reduce( differences, 0 )
best_magic = minimum.reduce( differences, -1 )
magic_index = argsort( best_magic )[0]
mantissa_index = argsort( best_mantissa )[0]
# The best interval is the magic_interval multiplied by the magnitude
# of the best mantissa:
interval = magic_intervals[ magic_index ]
magnitude = magnitudes[ mantissa_index ]
result = interval * magnitude
if result == 0.0:
result = finfo(float).eps
return result
#--------------------------------------------------------------------------------
# Compute the best tick interval length to achieve a specified number of tick
# intervals:
#--------------------------------------------------------------------------------
def tick_intervals ( data_low, data_high, intervals ):
""" Computes the best tick interval length to achieve a specified number of
tick intervals.
Parameters
----------
data_low, data_high : number
The minimum and maximum values of the data along this axis.
If any of the bound settings are 'auto' or 'fit', the axis
traits are calculated automatically from these values.
intervals : number
The desired number of intervals
Returns
-------
Returns a float indicating the tick interval length.
"""
range = float( data_high - data_low )
if range == 0.0:
range = 1.0
interval = range / intervals
factor = 10.0 ** floor( log10( interval ) )
interval /= factor
if interval < 2.0:
interval = 2.0
index = 0
elif interval < 2.5:
interval = 2.5
index = 1
elif interval < 5.0:
interval = 5.0
index = 2
else:
interval = 10.0
index = 3
while True:
result = interval * factor
if ((floor( data_low / result ) * result) + (intervals * result) >=
data_high):
return result
index = (index + 1) % 4
interval *= ( 2.0, 1.25, 2.0, 2.0 )[ index ]
def log_auto_ticks(data_low, data_high,
bound_low, bound_high,
tick_interval, use_endpoints = True):
"""Like auto_ticks(), but for log scales."""
tick_goal = 15
magic_numbers = [1, 2, 5]
explicit_ticks = False
if data_low<=0.0:
return []
if tick_interval != 'auto':
if tick_interval < 0:
tick_goal = -tick_interval
else:
magic_numbers = [tick_interval]
explicit_ticks = True
if data_low>data_high:
data_low, data_high = data_high, data_low
log_low = log10(data_low)
log_high = log10(data_high)
log_interval = log_high-log_low
if log_interval < 1.0:
# If less than a factor of 10 separates the data, just use the normal
# linear approach
return auto_ticks(data_low, data_high,
bound_low, bound_high,
tick_interval,
use_endpoints = False)
elif log_interval < (tick_goal+1)/2 or explicit_ticks:
# If there's enough space, try to put lines at the magic number multipliers
# inside each power of ten
# Try each interval to see how many ticks we get
for interval in magic_numbers:
ticklist = []
for exp in range(int(floor(log_low)), int(ceil(log_high))):
for multiplier in linspace(interval, 10.0, round(10.0/interval),
endpoint=1):
tick = 10**exp*multiplier
if tick >= data_low and tick <= data_high:
ticklist.append(tick)
if len(ticklist)<tick_goal+3 or explicit_ticks:
return ticklist
else:
# We put lines at every power of ten or less
startlog = ceil(log_low)
endlog = floor(log_high)
interval = ceil((endlog-startlog)/9.0)
expticks = arange(startlog, endlog, interval)
# There's no function that is like arange but inclusive, so
# we have to check whether the endpoint should be included.
if (endlog-startlog) % interval == 0.0:
expticks = concatenate([expticks, [endlog]])
return 10**expticks
#-------------------------------------------------------------------------------
# Compute the best lower and upper axis bounds for a range of data:
#-------------------------------------------------------------------------------
def auto_bounds ( data_low, data_high, tick_interval ):
""" Calculates appropriate upper and lower bounds for the axis from
the data bounds and the given axis interval.
The boundaries hit either exactly on the lower and upper values
or on the tick mark just beyond the lower and upper values.
"""
return ( calc_bound( data_low, tick_interval, False ),
calc_bound( data_high, tick_interval, True ) )
#-------------------------------------------------------------------------------
# Compute the best axis endpoint for a specified data value:
#-------------------------------------------------------------------------------
def calc_bound ( end_point, tick_interval, is_upper ):
""" Finds an axis end point that includes the value *end_point*.
If the tick mark interval results in a tick mark hitting directly on the
end point, *end_point* is returned. Otherwise, the location of the tick
mark just past *end_point* is returned. The *is_upper* parameter
specifies whether *end_point* is at the upper (True) or lower (False)
end of the axis.
"""
quotient, remainder = divmod( end_point, tick_interval )
if ((remainder == 0.0) or
(((tick_interval - remainder) / tick_interval) < 0.00001)):
return end_point
c1 = (quotient + 1.0) * tick_interval
c2 = quotient * tick_interval
if is_upper:
return max( c1, c2 )
return min( c1, c2 )
| |
# TODO: rewrite the functinos in this module to be more Pythonic
import enum as _enum
from spgl.system.error import error
@_enum.unique
class TokenType(_enum.IntEnum):
"""The enumerated values of the <code>get_token_type</code> method."""
SEPARATOR = 0
WORD = 1
NUMBER = 2
STRING = 3
OPERATOR = 4
class TokenScanner():
def __init__(self, input_stream):
self.input_stream = input_stream
self._init_scannner()
def set_input(self, input_stream):
self.input_stream = input_stream
def has_more_tokens(self):
token = self.next_token()
self.save_token(token)
return token != ""
def next_token(self):
if self._saved_tokens:
return self._saved_tokens.pop(0)
while True:
if self.ignore_whitespace:
self._skip_spaces()
ch = self.input_stream.read(1)
if ch == '/' and self._ignore_comments:
ch = self.input_stream.read(1)
if ch == '/':
while True:
ch = self.input_stream.read(1)
if ch in ('', '\r', '\n'):
break
continue
elif ch == '*':
prev = ''
while True:
ch = self.input_stream.read(1)
if not ch or (prev == '*' and ch == '/'):
break
prev = ch
continue
if ch:
self.unget_char(ch)
ch = '/'
if not ch:
return ''
if ch in ('"', "'") and self._scan_strings:
self.unget_char(ch)
return self._scan_string()
if ch.isdigit() and self._scan_numbers:
self.unget_char(ch)
return self._scan_number()
if self.is_word_character(ch):
self.unget_char(ch)
return self._scan_word()
op = ch
while self._is_operator_prefix(op):
ch = self.input_stream.read(1)
if not ch:
break
op += ch
while len(op) > 1 and not self._is_operator(op):
self.unget_char(op[-1])
op = op[:-1]
return op
def save_token(self, token):
self._saved_tokens.append(token)
def get_position(self):
pass
def ignore_whitespace(self):
pass
def ignore_comments(self, python_style=True, c_style=False):
pass
def scan_numbers(self):
pass
def scan_strings(self):
pass
def add_word_characters(self, characters):
self._word_characters += characters
def is_word_character(self, ch):
return ch.isalnum() or ch in self._word_characters
def add_operator(self, op):
self._operators.append(op)
def verify_token(self, expected):
token = self.next_token()
if token != expected:
# TODO: error w/ buffer
error('Found "{}" when expecting "{}"'.format(token, expected))
def get_token_type(self, token):
if not token:
error('Empty token: TODO(EOF)?')
ch = token[0]
if ch.isspace():
return TokenType.SEPARATOR
elif ch == '"' or (ch == "'" and len(token) > 1):
return TokenType.STRING
elif ch.isdigit():
return TokenType.NUMBER
elif self.is_word_character(ch):
return TokenType.WORD
else:
return TokenType.OPERATOR
def get_char(self):
return self.input_stream.read(1)
def unget_char(self, ch):
# TODO: char must match current location
current = self.input_stream.tell()
if current > 0:
self.input_stream.seek(current - 1)
def get_string_value(self, token):
out = ''
start = 0
finish = len(token)
if finish > 1 and (token[0] == '"' or token[0] == "'"):
start = 1
finish -= 1
for i in range(start, finish):
ch = token[i]
if ch == '\\':
i += 1
ch = token[i]
if ch.isdigit() or ch == 'x':
base = 8
if ch == 'x':
base = 16
i += 1
result = 0
digit = 0
while i < finish:
ch = token[i]
if ch.isdigit():
digit = ord(ch) - ord(0)
elif ch.isalpha():
digit = ord(ch.upper()) - ord('A') + 10
else:
digit = base
if digit >= base:
break
result = base * result + digit
i += 1
ch = chr(result)
i -= 1
else:
if ch == 'a': ch = '\a'
elif ch == 'b': ch = '\b'
elif ch == 'f': ch = '\f'
elif ch == 'n': ch = '\n'
elif ch == 'r': ch = '\r'
elif ch == 't': ch = '\t'
elif ch == 'v': ch = '\v'
# TODO: other delims?
out += ch
return out
# Private
def _init_scannner(self):
self._buffer = None
self._isp = None
self._string_input = False
self._ignore_whitespace = False
self._ignore_comments = False
self._scan_numbers = False
self._scan_strings = False
self._word_characters = []
self._saved_tokens = []
self._operators = []
def _skip_spaces(self):
while True:
ch = self.input_stream.read(1)
if not ch:
return
if not ch.isspace():
self.unget_char(ch)
return
def _scan_word(self):
token = ''
while True:
ch = self.input_stream.read(1)
if not ch:
break
if not self.is_word_character(ch):
self.unget_char(ch)
break
token += ch
return token
def _scan_number(self):
token = ''
state = _NumberScannerState.INITIAL_STATE
while state != _NumberScannerState.FINAL_STATE:
ch = self.input_stream.read(1)
if state == _NumberScannerState.INITIAL_STATE:
if not ch.isdigit():
error('internal error: illegal call')
state = _NumberScannerState.BEFORE_DECIMAL_POINT
elif state == _NumberScannerState.BEFORE_DECIMAL_POINT:
if ch == '.':
state = _NumberScannerState.AFTER_DECIMAL_POINT
elif ch in ('e', 'E'):
state = _NumberScannerState.STARTING_EXPONENT
elif not ch.isdigit():
if ch:
self.unget_char(ch)
state = _NumberScannerState.FINAL_STATE
elif state == _NumberScannerState.AFTER_DECIMAL_POINT:
if ch in ('e', 'E'):
state = _NumberScannerState.STARTING_EXPONENT
elif not ch.isdigit():
if ch:
self.unget_char(ch)
state = _NumberScannerState.FINAL_STATE
elif state == _NumberScannerState.STARTING_EXPONENT:
if ch in ('-', '+'):
state = _NumberScannerState.FOUND_EXPONENT_SIGN
elif ch.isdigit():
state = _NumberScannerState.SCANNING_EXPONENT
else:
if ch:
self.input_stream.unget_char(ch)
self.input_stream.unget_char(ch)
state = _NumberScannerState.FINAL_STATE
elif state == _NumberScannerState.FOUND_EXPONENT_SIGN:
if ch.isdigit():
state = _NumberScannerState.SCANNING_EXPONENT
else:
if ch:
self.input_stream.unget_char(ch)
self.input_stream.unget_char(ch)
self.input_stream.unget_char(ch)
state = _NumberScannerState.FINAL_STATE
elif case == _NumberScannerState.SCANNING_EXPONENT:
if not ch.isdigit():
if ch:
self.input_stream.unget_char(ch)
state = _NumberScannerState.FINAL_STATE
else:
state = _NumberScannerState.FINAL_STATE
if state != _NumberScannerState.FINAL_STATE:
token += ch
return token
def _scan_string(self):
token = ''
delim = self.input_stream.read(1)
token += delim
escape = False
while True:
ch = self.input_stream.read(1)
if not ch:
error('found unterminated string') # TODO: fn name
if ch == delim and not escape:
break
escape = ch == '\\' and not escape
token += ch
return token
def _is_operator(self, op):
return op in self._operators
def _is_operator_prefix(self, op):
return any(operator.startswith(op) for operator in self._operators)
@_enum.unique
class _NumberScannerState(_enum.IntEnum):
INITIAL_STATE = 0
BEFORE_DECIMAL_POINT = 1
AFTER_DECIMAL_POINT = 2
STARTING_EXPONENT = 3
FOUND_EXPONENT_SIGN = 4
SCANNING_EXPONENT = 5
FINAL_STATE = 6
def _test():
pass
if __name__ == '__main__':
import io as _io
source = _io.StringIO('hello 3.14 "world" is this weird >> then I think so')
scanner = TokenScanner(source)
scanner.add_operator('>>')
scanner._scan_numbers = True
scanner._scan_words = True
while scanner.has_more_tokens():
token = scanner.next_token()
print(token)
print(scanner.get_token_type(token))
| |
"""Unit test for Zookeeper to FS module
"""
import collections
import glob
import os
import shutil
import tempfile
import unittest
from tests.testutils import mockzk
import kazoo
import mock
from treadmill import fs
from treadmill import zksync
class ZkSyncTest(mockzk.MockZookeeperTestCase):
"""Mock test for treadmill.zksync"""
def setUp(self):
"""Setup common test variables"""
super(ZkSyncTest, self).setUp()
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
super(ZkSyncTest, self).tearDown()
def _check_file(self, fpath, content=None):
"""Check that file exists and content matches (if specified)."""
self.assertTrue(os.path.exists(os.path.join(self.root, fpath)))
if content is not None:
with open(os.path.join(self.root, fpath)) as f:
self.assertTrue(content == f.read())
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_sync_children(self):
"""Test zk2fs sync with no data."""
# Disable W0212: accessing protected members.
# pylint: disable=W0212
zk_content = {
'a': {
'x': b'1',
'y': b'2',
'z': b'3',
},
}
self.make_mock_zk(zk_content)
zk2fs_sync = zksync.Zk2Fs(kazoo.client.KazooClient(), self.root)
fs.mkdir_safe(os.path.join(self.root, 'a'))
zk2fs_sync._children_watch('/a', ['x', 'y', 'z'],
False,
zk2fs_sync._default_on_add,
zk2fs_sync._default_on_del)
self._check_file('a/x', '1')
self._check_file('a/y', '2')
self._check_file('a/z', '3')
self.assertNotIn('/a/x', zk2fs_sync.watches)
# Common files are ignored in sync, 'x' content will not be updated.
zk_content['a']['x'] = b'123'
zk_content['a']['q'] = b'qqq'
zk2fs_sync._children_watch('/a', ['x', 'y', 'z', 'q'],
False,
zk2fs_sync._default_on_add,
zk2fs_sync._default_on_del)
self._check_file('a/x', '1')
self._check_file('a/q', 'qqq')
# Removing node from zk will delete it from file system.
del zk_content['a']['x']
zk2fs_sync._children_watch('/a', ['y', 'z', 'q'],
False,
zk2fs_sync._default_on_add,
zk2fs_sync._default_on_del)
self.assertFalse(os.path.exists(os.path.join(self.root, 'a/x')))
@mock.patch('glob.glob', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_sync_children_unordered(self):
"""Test zk2fs sync with unordered data."""
# Disable W0212: accessing protected members.
# pylint: disable=W0212
zk_content = {
'a': {
'z': b'1',
'x': b'2',
'y': b'3',
},
}
self.make_mock_zk(zk_content)
glob.glob.return_value = ['a/b', 'a/a', 'a/y']
add = []
rm = []
zk2fs_sync = zksync.Zk2Fs(kazoo.client.KazooClient(), self.root)
zk2fs_sync._children_watch('/a', ['z', 'x', 'y'],
False,
lambda x: add.append(os.path.basename(x)),
lambda x: rm.append(os.path.basename(x)))
# y first because its common
self.assertSequenceEqual(['y', 'x', 'z'], add)
self.assertSequenceEqual(['a', 'b'], rm)
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_sync_children_datawatch(self):
"""Test data sync."""
# accessing protexted members.
# pylint: disable=W0212
zk_content = {
'a': {
'x': b'1',
'y': b'2',
'z': b'3',
},
}
self.make_mock_zk(zk_content)
zk2fs_sync = zksync.Zk2Fs(kazoo.client.KazooClient(), self.root)
fs.mkdir_safe(os.path.join(self.root, 'a'))
zk2fs_sync._children_watch('/a', ['x', 'y', 'z'],
True,
zk2fs_sync._default_on_add,
zk2fs_sync._default_on_del)
self._check_file('a/x', '1')
self._check_file('a/y', '2')
self._check_file('a/z', '3')
self.assertIn('/a/x', zk2fs_sync.watches)
self.assertIn('/a/y', zk2fs_sync.watches)
self.assertIn('/a/z', zk2fs_sync.watches)
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_sync_data(self):
"""Test data sync."""
# accessing protexted members.
# pylint: disable=W0212
zk_content = {
'a': {
'x': b'1',
'y': b'2',
'z': b'3',
},
}
mock_stat = collections.namedtuple('ZkStat', ['last_modified'])(0)
self.make_mock_zk(zk_content)
zk2fs_sync = zksync.Zk2Fs(kazoo.client.KazooClient(), self.root)
fs.mkdir_safe(os.path.join(self.root, 'a'))
event = kazoo.protocol.states.WatchedEvent(
'CREATED', 'CONNECTED', '/a/x')
zk2fs_sync._data_watch('/a/x', b'aaa', mock_stat, event)
self._check_file('a/x', 'aaa')
event = kazoo.protocol.states.WatchedEvent(
'DELETED', 'CONNECTED', '/a/x')
zk2fs_sync._data_watch('/a/x', 'aaa', mock_stat, event)
self.assertFalse(os.path.exists(os.path.join(self.root, 'a/x')))
event = kazoo.protocol.states.WatchedEvent(
'CREATED', 'CONNECTED', '/a/x')
zk2fs_sync._data_watch('/a/x', b'aaa', mock_stat, event)
self._check_file('a/x', 'aaa')
zk2fs_sync._data_watch('/a/x', None, None, None)
self.assertFalse(os.path.exists(os.path.join(self.root, 'a/x')))
@mock.patch('kazoo.client.KazooClient.get', mock.Mock())
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
def test_sync_children_immutable(self):
"""Test zk2fs sync with no watch needed."""
# Disable W0212: accessing protected members.
# pylint: disable=W0212
zk_content = {
'a': {
'x': b'1',
'y': b'2',
'z': b'3',
},
}
self.make_mock_zk(zk_content)
zk2fs_sync = zksync.Zk2Fs(kazoo.client.KazooClient(), self.root)
fs.mkdir_safe(os.path.join(self.root, 'a'))
zk2fs_sync.sync_children('/a',
watch_data=False,
on_add=zk2fs_sync._default_on_add,
on_del=zk2fs_sync._default_on_del,
need_watch_predicate=lambda *args: False,
cont_watch_predicate=lambda *args: False)
self._check_file('a/x', '1')
self._check_file('a/y', '2')
self._check_file('a/z', '3')
self.assertNotIn('/a/x', zk2fs_sync.watches)
self.assertNotIn('/a', zk2fs_sync.watches)
self.assertTrue(os.path.exists(os.path.join(self.root, 'a', '.done')))
kazoo.client.KazooClient.get_children.reset_mock()
zk2fs_sync.sync_children('/a',
watch_data=False,
on_add=zk2fs_sync._default_on_add,
on_del=zk2fs_sync._default_on_del,
need_watch_predicate=lambda *args: False,
cont_watch_predicate=lambda *args: False)
self.assertFalse(kazoo.client.KazooClient.get_children.called)
def test_write_data(self):
"""Tests writing data to filesystem."""
path_ok = os.path.join(self.root, 'a')
zksync.write_data(path_ok, None, 12345)
self.assertTrue(os.path.exists(path_ok))
path_too_long = os.path.join(self.root, 'a' * 1024)
self.assertRaises(
OSError,
zksync.write_data, path_too_long, None, 12345)
self.assertFalse(os.path.exists(path_too_long))
zksync.write_data(path_too_long, None, 12345, raise_err=False)
self.assertFalse(os.path.exists(path_too_long))
if __name__ == '__main__':
unittest.main()
| |
"""
In-memory storage for JobControl state.
This is mostly a reference implementation, and to be used
for testing purposes.
"""
from collections import defaultdict
from datetime import datetime, timedelta
from itertools import count
import copy
from jobcontrol.interfaces import StorageBase
from jobcontrol.exceptions import NotFound
from jobcontrol.utils import ExceptionPlaceholder
class MemoryStorage(StorageBase):
def __init__(self):
# Does nothing in default implementation, but in others
# migth get arguments / do stuff.
self._init_vars()
@classmethod
def from_url(cls, url):
# No need for an URL -- it's just an in-memory storage!
return cls()
def _init_vars(self):
self._jobs = {}
self._builds = {}
self._log_messages = defaultdict(list) # build: messages
# self._jobs_seq = count()
self._builds_seq = count()
# ------------------------------------------------------------
# Installation methods.
# For resource initialization, if needed.
# ------------------------------------------------------------
def install(self):
self._init_vars()
def uninstall(self):
self._init_vars()
def get_job_builds(self, job_id, started=None, finished=None,
success=None, skipped=None, order='asc', limit=100):
filters = [lambda x: x['job_id'] == job_id]
if started is not None:
filters.append(lambda x: x['started'] is started)
if finished is not None:
filters.append(lambda x: x['finished'] is finished)
if success is not None:
filters.append(lambda x: x['success'] is success)
if skipped is not None:
filters.append(lambda x: x['skipped'] is skipped)
if order == 'asc':
order_func = lambda x: sorted(x, key=lambda y: y[1]['id'])
elif order == 'desc':
order_func = lambda x: reversed(
sorted(x, key=lambda y: y[1]['id']))
else:
raise ValueError("Invalid order direction: {0}"
.format(order))
for build_id, build in order_func(self._builds.iteritems()):
if (limit is not None) and limit <= 0:
return
if all(f(build) for f in filters):
yield copy.deepcopy(build)
if limit is not None:
limit -= 1
# ------------------------------------------------------------
# Build CRUD methods
# ------------------------------------------------------------
def create_build(self, job_id, config=None):
build_id = self._builds_seq.next()
build = self._normalize_build_info({
'id': build_id,
'job_id': job_id,
'config': config or {},
# Progress is stored in a dict; then we'll have to rebuild it
# into a proper tree.
'progress_info': {},
})
self._builds[build_id] = build
return build_id
def get_build(self, build_id):
if build_id not in self._builds:
raise NotFound('No such build: {0}'.format(build_id))
return copy.deepcopy(self._builds[build_id])
def delete_build(self, build_id):
self._log_messages.pop(build_id, None)
self._builds.pop(build_id, None)
def start_build(self, build_id):
if build_id not in self._builds:
raise NotFound('No such build: {0}'.format(build_id))
self._builds[build_id]['started'] = True
self._builds[build_id]['start_time'] = datetime.now()
def finish_build(self, build_id, success=True, skipped=False, retval=None,
exception=None, exception_tb=None):
if build_id not in self._builds:
raise NotFound('No such build: {0}'.format(build_id))
# So we can fail coherently if it is not serializable
self.pack(retval)
try:
self.pack(exception)
except:
exception = ExceptionPlaceholder(exception)
self._builds[build_id]['finished'] = True
self._builds[build_id]['end_time'] = datetime.now()
self._builds[build_id]['success'] = success
self._builds[build_id]['skipped'] = skipped
self._builds[build_id]['retval'] = retval
self._builds[build_id]['exception'] = exception
self._builds[build_id]['exception_tb'] = exception_tb
def report_build_progress(self, build_id, current, total, group_name=None,
status_line=''):
try:
build = self._builds[build_id]
except KeyError:
raise NotFound("Build {0} not found".format(build_id))
if not group_name:
group_name = None
if group_name is not None:
if isinstance(group_name, list):
group_name = tuple(group_name)
if not isinstance(group_name, tuple):
raise TypeError('group_name must be a tuple (or None)')
build['progress_info'][group_name] = {
'current': current,
'total': total,
'status_line': status_line,
}
def get_build_progress_info(self, build_id):
items = []
build = self.get_build(build_id)
for group_name, item in build['progress_info'].iteritems():
_item = item.copy()
_item['group_name'] = group_name
items.append(_item)
return items
def log_message(self, build_id, record):
record = self._prepare_log_record(record)
record['build_id'] = build_id
self._log_messages[build_id].append(record)
def prune_log_messages(self, build_id=None, max_age=None,
level=None):
filters = []
if build_id is not None:
filters.append(lambda x: x['build_id'] == build_id)
if max_age is not None:
expire_date = datetime.now() - timedelta(seconds=max_age)
filters.append(lambda x: x['created'] < expire_date)
if level is not None:
filters.append(lambda x: x['record'].levelno < level)
self._log_messages[build_id] = [
msg for msg in self._log_messages[build_id]
if not (all(f(msg) for f in filters))
]
def iter_log_messages(self, build_id=None, max_date=None,
min_date=None, min_level=None):
filters = []
if build_id is not None:
filters.append(lambda x: x.build_id == build_id)
if max_date is not None:
filters.append(lambda x: x.created < max_date)
if min_date is not None:
filters.append(lambda x: x.created >= min_date)
if min_level is not None:
filters.append(lambda x: x.levelno >= min_level)
for msg in self._log_messages[build_id]:
if all(f(msg) for f in filters):
yield msg
| |
from __future__ import division, print_function, absolute_import
from tempfile import mkdtemp, mktemp
import os
import shutil
import numpy as np
from numpy import array, transpose, pi
from numpy.testing import (assert_equal,
assert_array_equal, assert_array_almost_equal,
assert_raises)
import scipy.sparse
from scipy.io.mmio import mminfo, mmread, mmwrite
class TestMMIOArray(object):
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(a, b)
def check_exact(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_equal(a, b)
def test_simple_integer(self):
self.check_exact([[1, 2], [3, 4]],
(2, 2, 4, 'array', 'integer', 'general'))
def test_32bit_integer(self):
a = array([[2**31-1, 2**31-2], [2**31-3, 2**31-4]], dtype=np.int32)
self.check_exact(a, (2, 2, 4, 'array', 'integer', 'general'))
def test_64bit_integer(self):
a = array([[2**31, 2**32], [2**63-2, 2**63-1]], dtype=np.int64)
if (np.intp(0).itemsize < 8):
assert_raises(OverflowError, mmwrite, self.fn, a)
else:
self.check_exact(a, (2, 2, 4, 'array', 'integer', 'general'))
def test_simple_upper_triangle_integer(self):
self.check_exact([[0, 1], [0, 0]],
(2, 2, 4, 'array', 'integer', 'general'))
def test_simple_lower_triangle_integer(self):
self.check_exact([[0, 0], [1, 0]],
(2, 2, 4, 'array', 'integer', 'general'))
def test_simple_rectangular_integer(self):
self.check_exact([[1, 2, 3], [4, 5, 6]],
(2, 3, 6, 'array', 'integer', 'general'))
def test_simple_rectangular_float(self):
self.check([[1, 2], [3.5, 4], [5, 6]],
(3, 2, 6, 'array', 'real', 'general'))
def test_simple_float(self):
self.check([[1, 2], [3, 4.0]],
(2, 2, 4, 'array', 'real', 'general'))
def test_simple_complex(self):
self.check([[1, 2], [3, 4j]],
(2, 2, 4, 'array', 'complex', 'general'))
def test_simple_symmetric_integer(self):
self.check_exact([[1, 2], [2, 4]],
(2, 2, 4, 'array', 'integer', 'symmetric'))
def test_simple_skew_symmetric_integer(self):
self.check_exact([[1, 2], [-2, 4]],
(2, 2, 4, 'array', 'integer', 'skew-symmetric'))
def test_simple_skew_symmetric_float(self):
self.check(array([[1, 2], [-2.0, 4]], 'f'),
(2, 2, 4, 'array', 'real', 'skew-symmetric'))
def test_simple_hermitian_complex(self):
self.check([[1, 2+3j], [2-3j, 4]],
(2, 2, 4, 'array', 'complex', 'hermitian'))
def test_random_symmetric_float(self):
sz = (20, 20)
a = np.random.random(sz)
a = a + transpose(a)
self.check(a, (20, 20, 400, 'array', 'real', 'symmetric'))
def test_random_rectangular_float(self):
sz = (20, 15)
a = np.random.random(sz)
self.check(a, (20, 15, 300, 'array', 'real', 'general'))
class TestMMIOSparseCSR(TestMMIOArray):
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(a.todense(), b.todense())
def check_exact(self, a, info):
mmwrite(self.fn, a)
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_equal(a.todense(), b.todense())
def test_simple_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [3, 4]]),
(2, 2, 4, 'coordinate', 'integer', 'general'))
def test_32bit_integer(self):
a = scipy.sparse.csr_matrix(array([[2**31-1, -2**31+2],
[2**31-3, 2**31-4]],
dtype=np.int32))
self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
def test_64bit_integer(self):
a = scipy.sparse.csr_matrix(array([[2**32+1, 2**32+1],
[-2**63+2, 2**63-2]],
dtype=np.int64))
if (np.intp(0).itemsize < 8):
assert_raises(OverflowError, mmwrite, self.fn, a)
else:
self.check_exact(a, (2, 2, 4, 'coordinate', 'integer', 'general'))
def test_simple_upper_triangle_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[0, 1], [0, 0]]),
(2, 2, 1, 'coordinate', 'integer', 'general'))
def test_simple_lower_triangle_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[0, 0], [1, 0]]),
(2, 2, 1, 'coordinate', 'integer', 'general'))
def test_simple_rectangular_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[1, 2, 3], [4, 5, 6]]),
(2, 3, 6, 'coordinate', 'integer', 'general'))
def test_simple_rectangular_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3.5, 4], [5, 6]]),
(3, 2, 6, 'coordinate', 'real', 'general'))
def test_simple_float(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4.0]]),
(2, 2, 4, 'coordinate', 'real', 'general'))
def test_simple_complex(self):
self.check(scipy.sparse.csr_matrix([[1, 2], [3, 4j]]),
(2, 2, 4, 'coordinate', 'complex', 'general'))
def test_simple_symmetric_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [2, 4]]),
(2, 2, 3, 'coordinate', 'integer', 'symmetric'))
def test_simple_skew_symmetric_integer(self):
self.check_exact(scipy.sparse.csr_matrix([[1, 2], [-2, 4]]),
(2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'))
def test_simple_skew_symmetric_float(self):
self.check(scipy.sparse.csr_matrix(array([[1, 2], [-2.0, 4]], 'f')),
(2, 2, 3, 'coordinate', 'real', 'skew-symmetric'))
def test_simple_hermitian_complex(self):
self.check(scipy.sparse.csr_matrix([[1, 2+3j], [2-3j, 4]]),
(2, 2, 3, 'coordinate', 'complex', 'hermitian'))
def test_random_symmetric_float(self):
sz = (20, 20)
a = np.random.random(sz)
a = a + transpose(a)
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 20, 210, 'coordinate', 'real', 'symmetric'))
def test_random_rectangular_float(self):
sz = (20, 15)
a = np.random.random(sz)
a = scipy.sparse.csr_matrix(a)
self.check(a, (20, 15, 300, 'coordinate', 'real', 'general'))
def test_simple_pattern(self):
a = scipy.sparse.csr_matrix([[0, 1.5], [3.0, 2.5]])
p = np.zeros_like(a.todense())
p[a.todense() > 0] = 1
info = (2, 2, 3, 'coordinate', 'pattern', 'general')
mmwrite(self.fn, a, field='pattern')
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn)
assert_array_almost_equal(p, b.todense())
_32bit_integer_dense_example = '''\
%%MatrixMarket matrix array integer general
2 2
2147483647
2147483646
2147483647
2147483646
'''
_32bit_integer_sparse_example = '''\
%%MatrixMarket matrix coordinate integer symmetric
2 2 2
1 1 2147483647
2 2 2147483646
'''
_64bit_integer_dense_example = '''\
%%MatrixMarket matrix array integer general
2 2
2147483648
-9223372036854775806
-2147483648
9223372036854775807
'''
_64bit_integer_sparse_general_example = '''\
%%MatrixMarket matrix coordinate integer general
2 2 3
1 1 2147483648
1 2 9223372036854775807
2 2 9223372036854775807
'''
_64bit_integer_sparse_symmetric_example = '''\
%%MatrixMarket matrix coordinate integer symmetric
2 2 3
1 1 2147483648
1 2 -9223372036854775807
2 2 9223372036854775807
'''
_64bit_integer_sparse_skew_example = '''\
%%MatrixMarket matrix coordinate integer skew-symmetric
2 2 3
1 1 2147483648
1 2 -9223372036854775807
2 2 9223372036854775807
'''
_over64bit_integer_dense_example = '''\
%%MatrixMarket matrix array integer general
2 2
2147483648
9223372036854775807
2147483648
9223372036854775808
'''
_over64bit_integer_sparse_example = '''\
%%MatrixMarket matrix coordinate integer symmetric
2 2 2
1 1 2147483648
2 2 19223372036854775808
'''
class TestMMIOReadLargeIntegers(object):
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check_read(self, example, a, info, dense, over32, over64):
with open(self.fn, 'w') as f:
f.write(example)
assert_equal(mminfo(self.fn), info)
if (over32 and (np.intp(0).itemsize < 8)) or over64:
assert_raises(OverflowError, mmread, self.fn)
else:
b = mmread(self.fn)
if not dense:
b = b.todense()
assert_equal(a, b)
def test_read_32bit_integer_dense(self):
a = array([[2**31-1, 2**31-1],
[2**31-2, 2**31-2]], dtype=np.int64)
self.check_read(_32bit_integer_dense_example,
a,
(2, 2, 4, 'array', 'integer', 'general'),
dense=True,
over32=False,
over64=False)
def test_read_32bit_integer_sparse(self):
a = array([[2**31-1, 0],
[0, 2**31-2]], dtype=np.int64)
self.check_read(_32bit_integer_sparse_example,
a,
(2, 2, 2, 'coordinate', 'integer', 'symmetric'),
dense=False,
over32=False,
over64=False)
def test_read_64bit_integer_dense(self):
a = array([[2**31, -2**31],
[-2**63+2, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_dense_example,
a,
(2, 2, 4, 'array', 'integer', 'general'),
dense=True,
over32=True,
over64=False)
def test_read_64bit_integer_sparse_general(self):
a = array([[2**31, 2**63-1],
[0, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_sparse_general_example,
a,
(2, 2, 3, 'coordinate', 'integer', 'general'),
dense=False,
over32=True,
over64=False)
def test_read_64bit_integer_sparse_symmetric(self):
a = array([[2**31, -2**63+1],
[-2**63+1, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_sparse_symmetric_example,
a,
(2, 2, 3, 'coordinate', 'integer', 'symmetric'),
dense=False,
over32=True,
over64=False)
def test_read_64bit_integer_sparse_skew(self):
a = array([[2**31, -2**63+1],
[2**63-1, 2**63-1]], dtype=np.int64)
self.check_read(_64bit_integer_sparse_skew_example,
a,
(2, 2, 3, 'coordinate', 'integer', 'skew-symmetric'),
dense=False,
over32=True,
over64=False)
def test_read_over64bit_integer_dense(self):
self.check_read(_over64bit_integer_dense_example,
None,
(2, 2, 4, 'array', 'integer', 'general'),
dense=True,
over32=True,
over64=True)
def test_read_over64bit_integer_sparse(self):
self.check_read(_over64bit_integer_sparse_example,
None,
(2, 2, 2, 'coordinate', 'integer', 'symmetric'),
dense=False,
over32=True,
over64=True)
_general_example = '''\
%%MatrixMarket matrix coordinate real general
%=================================================================================
%
% This ASCII file represents a sparse MxN matrix with L
% nonzeros in the following Matrix Market format:
%
% +----------------------------------------------+
% |%%MatrixMarket matrix coordinate real general | <--- header line
% |% | <--+
% |% comments | |-- 0 or more comment lines
% |% | <--+
% | M N L | <--- rows, columns, entries
% | I1 J1 A(I1, J1) | <--+
% | I2 J2 A(I2, J2) | |
% | I3 J3 A(I3, J3) | |-- L lines
% | . . . | |
% | IL JL A(IL, JL) | <--+
% +----------------------------------------------+
%
% Indices are 1-based, i.e. A(1,1) is the first element.
%
%=================================================================================
5 5 8
1 1 1.000e+00
2 2 1.050e+01
3 3 1.500e-02
1 4 6.000e+00
4 2 2.505e+02
4 4 -2.800e+02
4 5 3.332e+01
5 5 1.200e+01
'''
_hermitian_example = '''\
%%MatrixMarket matrix coordinate complex hermitian
5 5 7
1 1 1.0 0
2 2 10.5 0
4 2 250.5 22.22
3 3 1.5e-2 0
4 4 -2.8e2 0
5 5 12. 0
5 4 0 33.32
'''
_skew_example = '''\
%%MatrixMarket matrix coordinate real skew-symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 0
'''
_symmetric_example = '''\
%%MatrixMarket matrix coordinate real symmetric
5 5 7
1 1 1.0
2 2 10.5
4 2 250.5
3 3 1.5e-2
4 4 -2.8e2
5 5 12.
5 4 8
'''
_symmetric_pattern_example = '''\
%%MatrixMarket matrix coordinate pattern symmetric
5 5 7
1 1
2 2
4 2
3 3
4 4
5 5
5 4
'''
class TestMMIOCoordinate(object):
def setup_method(self):
self.tmpdir = mkdtemp()
self.fn = os.path.join(self.tmpdir, 'testfile.mtx')
def teardown_method(self):
shutil.rmtree(self.tmpdir)
def check_read(self, example, a, info):
f = open(self.fn, 'w')
f.write(example)
f.close()
assert_equal(mminfo(self.fn), info)
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_read_general(self):
a = [[1, 0, 0, 6, 0],
[0, 10.5, 0, 0, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 33.32],
[0, 0, 0, 0, 12]]
self.check_read(_general_example, a,
(5, 5, 8, 'coordinate', 'real', 'general'))
def test_read_hermitian(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5 - 22.22j, 0],
[0, 0, .015, 0, 0],
[0, 250.5 + 22.22j, 0, -280, -33.32j],
[0, 0, 0, 33.32j, 12]]
self.check_read(_hermitian_example, a,
(5, 5, 7, 'coordinate', 'complex', 'hermitian'))
def test_read_skew(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, -250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 0],
[0, 0, 0, 0, 12]]
self.check_read(_skew_example, a,
(5, 5, 7, 'coordinate', 'real', 'skew-symmetric'))
def test_read_symmetric(self):
a = [[1, 0, 0, 0, 0],
[0, 10.5, 0, 250.5, 0],
[0, 0, .015, 0, 0],
[0, 250.5, 0, -280, 8],
[0, 0, 0, 8, 12]]
self.check_read(_symmetric_example, a,
(5, 5, 7, 'coordinate', 'real', 'symmetric'))
def test_read_symmetric_pattern(self):
a = [[1, 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 0, 1, 1]]
self.check_read(_symmetric_pattern_example, a,
(5, 5, 7, 'coordinate', 'pattern', 'symmetric'))
def test_empty_write_read(self):
# http://projects.scipy.org/scipy/ticket/883
b = scipy.sparse.coo_matrix((10, 10))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(10, 10, 0, 'coordinate', 'real', 'symmetric'))
a = b.todense()
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_bzip2_py3(self):
# test if fix for #2152 works
try:
# bz2 module isn't always built when building Python.
import bz2
except:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
fn_bzip2 = "%s.bz2" % self.fn
with open(self.fn, 'rb') as f_in:
f_out = bz2.BZ2File(fn_bzip2, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_bzip2).todense()
assert_array_almost_equal(a, b.todense())
def test_gzip_py3(self):
# test if fix for #2152 works
try:
# gzip module can be missing from Python installation
import gzip
except:
return
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
fn_gzip = "%s.gz" % self.fn
with open(self.fn, 'rb') as f_in:
f_out = gzip.open(fn_gzip, 'wb')
f_out.write(f_in.read())
f_out.close()
a = mmread(fn_gzip).todense()
assert_array_almost_equal(a, b.todense())
def test_real_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(5, 5, 8, 'coordinate', 'real', 'general'))
a = b.todense()
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_complex_write_read(self):
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
b = scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5))
mmwrite(self.fn, b)
assert_equal(mminfo(self.fn),
(5, 5, 8, 'coordinate', 'complex', 'general'))
a = b.todense()
b = mmread(self.fn).todense()
assert_array_almost_equal(a, b)
def test_sparse_formats(self):
mats = []
I = array([0, 0, 1, 2, 3, 3, 3, 4])
J = array([0, 3, 1, 2, 1, 3, 4, 4])
V = array([1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0])
mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
V = array([1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j,
250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j])
mats.append(scipy.sparse.coo_matrix((V, (I, J)), shape=(5, 5)))
for mat in mats:
expected = mat.todense()
for fmt in ['csr', 'csc', 'coo']:
fn = mktemp(dir=self.tmpdir) # safe, we own tmpdir
mmwrite(fn, mat.asformat(fmt))
result = mmread(fn).todense()
assert_array_almost_equal(result, expected)
def test_precision(self):
test_values = [pi] + [10**(i) for i in range(0, -10, -1)]
test_precisions = range(1, 10)
for value in test_values:
for precision in test_precisions:
# construct sparse matrix with test value at last main diagonal
n = 10**precision + 1
A = scipy.sparse.dok_matrix((n, n))
A[n-1, n-1] = value
# write matrix with test precision and read again
mmwrite(self.fn, A, precision=precision)
A = scipy.io.mmread(self.fn)
# check for right entries in matrix
assert_array_equal(A.row, [n-1])
assert_array_equal(A.col, [n-1])
assert_array_almost_equal(A.data,
[float('%%.%dg' % precision % value)])
| |
#
# Cassandra Cluster Management lib
#
from __future__ import absolute_import
import copy
import fnmatch
import logging
import os
import platform
import re
import shutil
import signal
import socket
import stat
import subprocess
import sys
import time
from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module
import yaml
from six import print_
BIN_DIR = "bin"
CASSANDRA_CONF_DIR = "conf"
DSE_CASSANDRA_CONF_DIR = "resources/cassandra/conf"
OPSCENTER_CONF_DIR = "conf"
CASSANDRA_CONF = "cassandra.yaml"
JVM_OPTS = "jvm.options"
LOG4J_CONF = "log4j-server.properties"
LOG4J_TOOL_CONF = "log4j-tools.properties"
LOGBACK_CONF = "logback.xml"
LOGBACK_TOOLS_CONF = "logback-tools.xml"
CASSANDRA_ENV = "cassandra-env.sh"
CASSANDRA_WIN_ENV = "cassandra-env.ps1"
CASSANDRA_SH = "cassandra.in.sh"
CONFIG_FILE = "config"
CCM_CONFIG_DIR = "CCM_CONFIG_DIR"
logging.basicConfig(format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG
)
LOG = logging.getLogger('ccm')
def error(msg):
LOG.error(msg)
def warning(msg):
LOG.warning(msg)
def info(msg):
LOG.info(msg)
def debug(msg):
LOG.debug(msg)
class CCMError(Exception):
pass
class LoadError(CCMError):
pass
class ArgumentError(CCMError):
pass
class UnavailableSocketError(CCMError):
pass
class TimeoutError(Exception):
def __init__(self, data):
Exception.__init__(self, str(data))
class LogPatternToVersion(object):
def __init__(self, versions_to_patterns, default_pattern=None):
self.versions_to_patterns, self.default_pattern = versions_to_patterns, default_pattern
def __call__(self, version):
keys_less_than_version = [k for k in self.versions_to_patterns if k <= version]
if not keys_less_than_version:
if self.default_pattern is not None:
return self.default_pattern
else:
raise ValueError("Some kind of default pattern must be specified!")
return self.versions_to_patterns[max(keys_less_than_version, key=lambda v: LooseVersion(v) if not isinstance(v, LooseVersion) else v)]
def __repr__(self):
return str(self.__class__) + "(versions_to_patterns={}, default_pattern={})".format(self.versions_to_patterns, self.default_pattern)
@property
def patterns(self):
patterns = list(self.versions_to_patterns.values())
if self.default_pattern is not None:
patterns = patterns + [self.default_pattern]
return patterns
@property
def versions(self):
return list(self.versions_to_patterns)
def get_default_path():
if CCM_CONFIG_DIR in os.environ and os.environ[CCM_CONFIG_DIR]:
default_path = os.environ[CCM_CONFIG_DIR]
else:
default_path = os.path.join(get_user_home(), '.ccm')
if not os.path.exists(default_path):
os.mkdir(default_path)
return default_path
def get_default_path_display_name():
default_path = get_default_path().lower()
user_home = get_user_home().lower()
if default_path.startswith(user_home):
default_path = os.path.join('~', default_path[len(user_home) + 1:])
return default_path
def get_user_home():
if is_win():
if sys.platform == "cygwin":
# Need the fully qualified directory
output = subprocess.Popen(["cygpath", "-m", os.path.expanduser('~')], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0].rstrip()
return output
else:
return os.environ['USERPROFILE']
else:
return os.path.expanduser('~')
def get_config():
config_path = os.path.join(get_default_path(), CONFIG_FILE)
if not os.path.exists(config_path):
return {}
with open(config_path, 'r') as f:
return yaml.load(f)
def now_ms():
return int(round(time.time() * 1000))
def parse_interface(itf, default_port):
i = itf.split(':')
if len(i) == 1:
return (i[0].strip(), default_port)
elif len(i) == 2:
return (i[0].strip(), int(i[1].strip()))
else:
raise ValueError("Invalid interface definition: " + itf)
def current_cluster_name(path):
try:
with open(os.path.join(path, 'CURRENT'), 'r') as f:
return f.readline().strip()
except IOError:
return None
def switch_cluster(path, new_name):
with open(os.path.join(path, 'CURRENT'), 'w') as f:
f.write(new_name + '\n')
def replace_in_file(file, regexp, replace):
replaces_in_file(file, [(regexp, replace)])
def replaces_in_file(file, replacement_list):
rs = [(re.compile(regexp), repl) for (regexp, repl) in replacement_list]
file_tmp = file + "." + str(os.getpid()) + ".tmp"
with open(file, 'r') as f:
with open(file_tmp, 'w') as f_tmp:
for line in f:
for r, replace in rs:
match = r.search(line)
if match:
line = replace + "\n"
f_tmp.write(line)
shutil.move(file_tmp, file)
def replace_or_add_into_file_tail(file, regexp, replace):
replaces_or_add_into_file_tail(file, [(regexp, replace)])
def replaces_or_add_into_file_tail(file, replacement_list, add_config_close=True):
rs = [(re.compile(regexp), repl) for (regexp, repl) in replacement_list]
is_line_found = False
file_tmp = file + "." + str(os.getpid()) + ".tmp"
with open(file, 'r') as f:
with open(file_tmp, 'w') as f_tmp:
for line in f:
for r, replace in rs:
match = r.search(line)
if match:
line = replace + "\n"
is_line_found = True
if "</configuration>" not in line:
f_tmp.write(line)
# In case, entry is not found, and need to be added
if not is_line_found:
f_tmp.write('\n' + replace + "\n")
# We are moving the closing tag to the end of the file.
# Previously, we were having an issue where new lines we wrote
# were appearing after the closing tag, and thus being ignored.
if add_config_close:
f_tmp.write("</configuration>\n")
shutil.move(file_tmp, file)
def rmdirs(path):
if is_win():
# Handle Windows 255 char limit
shutil.rmtree(u"\\\\?\\" + path)
else:
shutil.rmtree(path)
def make_cassandra_env(install_dir, node_path, update_conf=True):
if is_win() and get_version_from_build(node_path=node_path) >= '2.1':
sh_file = os.path.join(CASSANDRA_CONF_DIR, CASSANDRA_WIN_ENV)
else:
sh_file = os.path.join(BIN_DIR, CASSANDRA_SH)
orig = os.path.join(install_dir, sh_file)
dst = os.path.join(node_path, sh_file)
if not is_win() or not os.path.exists(dst):
shutil.copy(orig, dst)
if update_conf and not (is_win() and get_version_from_build(node_path=node_path) >= '2.1'):
replacements = [
('CASSANDRA_HOME=', '\tCASSANDRA_HOME=%s' % install_dir),
('CASSANDRA_CONF=', '\tCASSANDRA_CONF=%s' % os.path.join(node_path, 'conf'))
]
replaces_in_file(dst, replacements)
# If a cluster-wide cassandra.in.sh file exists in the parent
# directory, append it to the node specific one:
cluster_sh_file = os.path.join(node_path, os.path.pardir, 'cassandra.in.sh')
if os.path.exists(cluster_sh_file):
append = open(cluster_sh_file).read()
with open(dst, 'a') as f:
f.write('\n\n### Start Cluster wide config ###\n')
f.write(append)
f.write('\n### End Cluster wide config ###\n\n')
env = os.environ.copy()
env['CASSANDRA_INCLUDE'] = os.path.join(dst)
env['MAX_HEAP_SIZE'] = os.environ.get('CCM_MAX_HEAP_SIZE', '500M')
env['HEAP_NEWSIZE'] = os.environ.get('CCM_HEAP_NEWSIZE', '50M')
env['CASSANDRA_HOME'] = install_dir
env['CASSANDRA_CONF'] = os.path.join(node_path, 'conf')
return env
def make_dse_env(install_dir, node_path, node_ip):
env = os.environ.copy()
env['MAX_HEAP_SIZE'] = os.environ.get('CCM_MAX_HEAP_SIZE', '500M')
env['HEAP_NEWSIZE'] = os.environ.get('CCM_HEAP_NEWSIZE', '50M')
env['SPARK_WORKER_MEMORY'] = os.environ.get('SPARK_WORKER_MEMORY', '1024M')
env['SPARK_WORKER_CORES'] = os.environ.get('SPARK_WORKER_CORES', '2')
env['DSE_HOME'] = os.path.join(install_dir)
env['DSE_CONF'] = os.path.join(node_path, 'resources', 'dse', 'conf')
env['CASSANDRA_HOME'] = os.path.join(install_dir, 'resources', 'cassandra')
env['CASSANDRA_CONF'] = os.path.join(node_path, 'resources', 'cassandra', 'conf')
env['HIVE_CONF_DIR'] = os.path.join(node_path, 'resources', 'hive', 'conf')
env['SQOOP_CONF_DIR'] = os.path.join(node_path, 'resources', 'sqoop', 'conf')
env['TOMCAT_HOME'] = os.path.join(node_path, 'resources', 'tomcat')
env['TOMCAT_CONF_DIR'] = os.path.join(node_path, 'resources', 'tomcat', 'conf')
env['PIG_CONF_DIR'] = os.path.join(node_path, 'resources', 'pig', 'conf')
env['MAHOUT_CONF_DIR'] = os.path.join(node_path, 'resources', 'mahout', 'conf')
env['SPARK_CONF_DIR'] = os.path.join(node_path, 'resources', 'spark', 'conf')
env['SHARK_CONF_DIR'] = os.path.join(node_path, 'resources', 'shark', 'conf')
env['GREMLIN_CONSOLE_CONF_DIR'] = os.path.join(node_path, 'resources', 'graph', 'gremlin-console', 'conf')
env['SPARK_WORKER_DIR'] = os.path.join(node_path, 'spark', 'worker')
env['SPARK_LOCAL_DIRS'] = os.path.join(node_path, 'spark', 'rdd')
env['SPARK_WORKER_LOG_DIR'] = os.path.join(node_path, 'logs', 'spark', 'worker')
env['SPARK_MASTER_LOG_DIR'] = os.path.join(node_path, 'logs', 'spark', 'master')
env['DSE_LOG_ROOT'] = os.path.join(node_path, 'logs', 'dse')
env['CASSANDRA_LOG_DIR'] = os.path.join(node_path, 'logs')
env['SPARK_LOCAL_IP'] = '' + node_ip
if get_version_from_build(node_path=node_path) >= '5.0':
env['HADOOP1_CONF_DIR'] = os.path.join(node_path, 'resources', 'hadoop', 'conf')
env['HADOOP2_CONF_DIR'] = os.path.join(node_path, 'resources', 'hadoop2-client', 'conf')
else:
env['HADOOP_CONF_DIR'] = os.path.join(node_path, 'resources', 'hadoop', 'conf')
return env
def check_win_requirements():
if is_win():
# Make sure ant.bat is in the path and executable before continuing
try:
subprocess.Popen('ant.bat', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except Exception:
sys.exit("ERROR! Could not find or execute ant.bat. Please fix this before attempting to run ccm on Windows.")
# Confirm matching architectures
# 32-bit python distributions will launch 32-bit cmd environments, losing PowerShell execution privileges on a 64-bit system
if sys.maxsize <= 2 ** 32 and platform.machine().endswith('64'):
sys.exit("ERROR! 64-bit os and 32-bit python distribution found. ccm requires matching architectures.")
def is_win():
return sys.platform in ("cygwin", "win32")
def is_modern_windows_install(version):
"""
The 2.1 release line was when Cassandra received beta windows support.
Many features are gated based on that added compatibility.
Handles floats, strings, and LooseVersions by first converting all three types to a string, then to a LooseVersion.
"""
version = LooseVersion(str(version))
if is_win() and version >= LooseVersion('2.1'):
return True
else:
return False
def is_ps_unrestricted():
if not is_win():
raise CCMError("Can only check PS Execution Policy on Windows")
else:
try:
p = subprocess.Popen(['powershell', 'Get-ExecutionPolicy'], stdout=subprocess.PIPE)
# pylint: disable=E0602
except WindowsError:
print_("ERROR: Could not find powershell. Is it in your path?")
if "Unrestricted" in str(p.communicate()[0]):
return True
else:
return False
def join_bin(root, dir, executable):
return os.path.join(root, dir, platform_binary(executable))
def platform_binary(input):
return input + ".bat" if is_win() else input
def platform_pager():
return "more" if sys.platform == "win32" else "less"
def add_exec_permission(path, executable):
# 1) os.chmod on Windows can't add executable permissions
# 2) chmod from other folders doesn't work in cygwin, so we have to navigate the shell
# to the folder with the executable with it and then chmod it from there
if sys.platform == "cygwin":
cmd = "cd " + path + "; chmod u+x " + executable
os.system(cmd)
def parse_path(executable):
sep = os.sep
if sys.platform == "win32":
sep = "\\\\"
tokens = re.split(sep, executable)
del tokens[-1]
return os.sep.join(tokens)
def parse_bin(executable):
tokens = re.split(os.sep, executable)
return tokens[-1]
def get_stress_bin(install_dir):
candidates = [
os.path.join(install_dir, 'contrib', 'stress', 'bin', 'stress'),
os.path.join(install_dir, 'tools', 'stress', 'bin', 'stress'),
os.path.join(install_dir, 'tools', 'bin', 'stress'),
os.path.join(install_dir, 'tools', 'bin', 'cassandra-stress'),
os.path.join(install_dir, 'resources', 'cassandra', 'tools', 'bin', 'cassandra-stress')
]
candidates = [platform_binary(s) for s in candidates]
for candidate in candidates:
if os.path.exists(candidate):
stress = candidate
break
else:
raise Exception("Cannot find stress binary (maybe it isn't compiled)")
# make sure it's executable -> win32 doesn't care
if sys.platform == "cygwin":
# Yes, we're unwinding the path join from above.
path = parse_path(stress)
short_bin = parse_bin(stress)
add_exec_permission(path, short_bin)
elif not os.access(stress, os.X_OK):
try:
# try to add user execute permissions
# os.chmod doesn't work on Windows and isn't necessary unless in cygwin...
if sys.platform == "cygwin":
add_exec_permission(path, stress)
else:
os.chmod(stress, os.stat(stress).st_mode | stat.S_IXUSR)
except:
raise Exception("stress binary is not executable: %s" % (stress,))
return stress
def isDse(install_dir):
if install_dir is None:
raise ArgumentError('Undefined installation directory')
bin_dir = os.path.join(install_dir, BIN_DIR)
if not os.path.exists(bin_dir):
raise ArgumentError('Installation directory does not contain a bin directory: %s' % install_dir)
dse_script = os.path.join(bin_dir, 'dse')
return os.path.exists(dse_script)
def isOpscenter(install_dir):
if install_dir is None:
raise ArgumentError('Undefined installation directory')
bin_dir = os.path.join(install_dir, BIN_DIR)
if not os.path.exists(bin_dir):
raise ArgumentError('Installation directory does not contain a bin directory')
opscenter_script = os.path.join(bin_dir, 'opscenter')
return os.path.exists(opscenter_script)
def validate_install_dir(install_dir):
if install_dir is None:
raise ArgumentError('Undefined installation directory')
# Windows requires absolute pathing on installation dir - abort if specified cygwin style
if is_win():
if ':' not in install_dir:
raise ArgumentError('%s does not appear to be a cassandra or dse installation directory. Please use absolute pathing (e.g. C:/cassandra.' % install_dir)
bin_dir = os.path.join(install_dir, BIN_DIR)
if isDse(install_dir):
conf_dir = os.path.join(install_dir, DSE_CASSANDRA_CONF_DIR)
elif isOpscenter(install_dir):
conf_dir = os.path.join(install_dir, OPSCENTER_CONF_DIR)
else:
conf_dir = os.path.join(install_dir, CASSANDRA_CONF_DIR)
cnd = os.path.exists(bin_dir)
cnd = cnd and os.path.exists(conf_dir)
if not isOpscenter(install_dir):
cnd = cnd and os.path.exists(os.path.join(conf_dir, CASSANDRA_CONF))
if not cnd:
raise ArgumentError('%s does not appear to be a cassandra or dse installation directory' % install_dir)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
raise UnavailableSocketError("Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (addr, port, msg))
def check_socket_listening(itf, timeout=60):
end = time.time() + timeout
while time.time() <= end:
try:
sock = socket.socket()
sock.connect(itf)
sock.close()
return True
except socket.error:
if sock:
sock.close()
# Try again in another 200ms
time.sleep(.2)
continue
return False
def interface_is_ipv6(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
return socket.AF_INET6 == info[0][0]
# note: does not handle collapsing hextets with leading zeros
def normalize_interface(itf):
if not itf:
return itf
ip = itf[0]
parts = ip.partition('::')
if '::' in parts:
missing_hextets = 9 - ip.count(':')
zeros = '0'.join([':'] * missing_hextets)
ip = ''.join(['0' if p == '' else zeros if p == '::' else p for p in ip.partition('::')])
return (ip, itf[1])
def parse_settings(args, literal_yaml=False):
settings = {}
if literal_yaml:
for s in args:
settings = dict(settings, **yaml.load(s))
else:
for s in args:
if is_win():
# Allow for absolute path on Windows for value in key/value pair
splitted = s.split(':', 1)
else:
splitted = s.split(':')
if len(splitted) != 2:
raise ArgumentError("A new setting should be of the form 'key: value', got " + s)
key = splitted[0].strip()
val = splitted[1].strip()
# ok, that's not super beautiful
if val.lower() == "true":
val = True
elif val.lower() == "false":
val = False
else:
try:
val = int(val)
except ValueError:
pass
splitted = key.split('.')
split_length = len(splitted)
if split_length >= 2:
# Where we are currently at in the dict.
tree_pos = settings
# Iterate over each split and build structure as needed.
for pos in range(split_length):
split = splitted[pos]
if pos == split_length - 1:
# If at the last split, set value.
tree_pos[split] = val
else:
# If not at last split, create a new dict at the current
# position for this split if it doesn't already exist
# and update the current position.
if split not in tree_pos:
tree_pos[split] = {}
tree_pos = tree_pos[split]
else:
settings[key] = val
return settings
#
# Copy file from source to destination with reasonable error handling
#
def copy_file(src_file, dst_file):
try:
shutil.copy2(src_file, dst_file)
except (IOError, shutil.Error) as e:
print_(str(e), file=sys.stderr)
exit(1)
def copy_directory(src_dir, dst_dir):
for name in os.listdir(src_dir):
filename = os.path.join(src_dir, name)
if os.path.isfile(filename):
shutil.copy(filename, dst_dir)
def get_version_from_build(install_dir=None, node_path=None):
if install_dir is None and node_path is not None:
install_dir = get_install_dir_from_cluster_conf(node_path)
if install_dir is not None:
# Binary cassandra installs will have a 0.version.txt file
version_file = os.path.join(install_dir, '0.version.txt')
if os.path.exists(version_file):
with open(version_file) as f:
return LooseVersion(f.read().strip())
# For DSE look for a dse*.jar and extract the version number
dse_version = get_dse_version(install_dir)
if (dse_version is not None):
return LooseVersion(dse_version)
# Source cassandra installs we can read from build.xml
build = os.path.join(install_dir, 'build.xml')
with open(build) as f:
for line in f:
match = re.search('name="base\.version" value="([0-9.]+)[^"]*"', line)
if match:
return LooseVersion(match.group(1))
raise CCMError("Cannot find version")
def get_dse_version(install_dir):
for root, dirs, files in os.walk(install_dir):
for file in files:
match = re.search('^dse(?:-core)?-([0-9.]+)(?:-.*)?\.jar', file)
if match:
return match.group(1)
return None
def get_dse_cassandra_version(install_dir):
clib = os.path.join(install_dir, 'resources', 'cassandra', 'lib')
for file in os.listdir(clib):
if fnmatch.fnmatch(file, 'cassandra-all*.jar'):
match = re.search('cassandra-all-([0-9.]+)(?:-.*)?\.jar', file)
if match:
return LooseVersion(match.group(1))
raise ArgumentError("Unable to determine Cassandra version in: " + install_dir)
def get_install_dir_from_cluster_conf(node_path):
file = os.path.join(os.path.dirname(node_path), "cluster.conf")
with open(file) as f:
for line in f:
match = re.search('install_dir: (.*?)$', line)
if match:
return match.group(1)
return None
def is_dse_cluster(path):
try:
with open(os.path.join(path, 'CURRENT'), 'r') as f:
name = f.readline().strip()
cluster_path = os.path.join(path, name)
filename = os.path.join(cluster_path, 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.load(f)
if 'dse_dir' in data:
return True
except IOError:
return False
def invalidate_cache():
rmdirs(os.path.join(get_default_path(), 'repository'))
def get_jdk_version():
version = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
ver_pattern = '\"(\d+\.\d+).*\"'
return re.search(ver_pattern, str(version)).groups()[0]
def assert_jdk_valid_for_cassandra_version(cassandra_version):
if cassandra_version >= '3.0' and get_jdk_version() < '1.8':
print_('ERROR: Cassandra 3.0+ requires Java >= 1.8, found Java {}'.format(get_jdk_version()))
exit(1)
def merge_configuration(original, changes, delete_empty=True):
if not isinstance(original, dict):
# if original is not a dictionary, assume changes override it.
new = changes
else:
# Copy original so we do not mutate it.
new = copy.deepcopy(original)
for k, v in changes.items():
# If the new value is None or an empty string, delete it
# if it's in the original data.
if delete_empty and k in new and new[k] is not None and \
(v is None or (isinstance(v, str) and len(v) == 0)):
del new[k]
else:
new_value = v
# If key is in both dicts, update it with new values.
if k in new:
if isinstance(v, dict):
new_value = merge_configuration(new[k], v, delete_empty)
new[k] = new_value
return new
def is_intlike(obj):
try:
int(obj)
return True
except TypeError:
return False
raise RuntimeError('Reached end of {}; should not be possible'.format(is_intlike.__name__))
def wait_for_any_log(nodes, pattern, timeout, filename='system.log'):
"""
Look for a pattern in the system.log of any in a given list
of nodes.
@param nodes The list of nodes whose logs to scan
@param pattern The target pattern
@param timeout How long to wait for the pattern. Note that
strictly speaking, timeout is not really a timeout,
but a maximum number of attempts. This implies that
the all the grepping takes no time at all, so it is
somewhat inaccurate, but probably close enough.
@return The first node in whose log the pattern was found
"""
for _ in range(timeout):
for node in nodes:
found = node.grep_log(pattern, filename=filename)
if found:
return node
time.sleep(1)
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) +
" Unable to find: " + repr(pattern) + " in any node log within " + str(timeout) + "s")
def get_default_signals():
if is_win():
# Fill the dictionary with SIGTERM as the cluster is killed forcefully
# on Windows regardless of assigned signal (TASKKILL is used)
default_signal_events = {'1': signal.SIGTERM, '9': signal.SIGTERM}
else:
default_signal_events = {'1': signal.SIGHUP, '9': signal.SIGKILL}
return default_signal_events
| |
#!/usr/bin/env python
#
# Simple JSON editor that allows strings to be edited with embedded new lines
from __future__ import generators, unicode_literals, print_function
import sys
if sys.version_info[0] < 3:
str = unicode
import Tkinter as tkinter
import ttk
import tkSimpleDialog as simpledialog
import tkFileDialog as filedialog
else:
import tkinter
import tkinter.ttk as ttk
import tkinter.simpledialog as simpledialog
import tkinter.filedialog as filedialog
import json
import os
import GUIApplication
import webbrowser
def augment(text, augmentation):
if len(text):
return text + ' : ' + augmentation
return augmentation
class Model(object):
def __init__(self):
self.filename = "new.json"
self.object = {}
def load(self, sourcefile):
if type(sourcefile) == str:
sourcefile = open(sourcefile, 'r')
self.filename = sourcefile.name
json_text = sourcefile.read()
sourcefile.close()
self.loads(json_text)
def loads(self, json_text):
self.object = json.loads(json_text)
def save(self, filename=None):
if filename is None:
filename = self.filename
with open(filename, 'w') as file:
json.dump(self.object, file, sort_keys=True, indent=4,
separators=(',', ': '))
self.filename = filename
class ViewModel(object):
def __init__(self, view):
self.filetypes = (('JSON files', '*.json'),
('All files', '*.*'))
self.item = None
self.view = view
cm = self.view.context_menu
self.bind_menu(view.menu_file, 'New', command=self.cmd_new)
self.bind_menu(view.menu_file, 'Open ...', command=self.cmd_open)
self.bind_menu(view.menu_file, 'Save', command=self.cmd_save)
self.bind_menu(view.menu_file, 'Save As ...', command=self.cmd_save_as)
self.bind_menu(view.menu_file, 'Quit', command=view.cmd_quit)
self.bind_menu(view.menu_help, 'Documentation', command=self.cmd_documentation)
self.bind_menu(cm, 'Add object', command=self.cmd_add_object)
self.bind_menu(cm, 'Rename', command=self.cmd_rename)
self.bind_menu(cm, 'Add array', command=self.cmd_add_array)
self.bind_menu(cm, 'Move up', command=self.cmd_move_up)
self.bind_menu(cm, 'Move down', command=self.cmd_move_down)
self.bind_menu(cm, 'Add string', command=self.cmd_add_string)
self.bind_menu(cm, 'Add boolean', command=self.cmd_add_boolean)
self.bind_menu(cm, 'Add number', command=self.cmd_add_number)
self.bind_menu(cm, 'Add null', command=self.cmd_add_null)
self.bind_menu(cm, 'Delete', command=self.cmd_delete)
self.bind_menu(cm, 'Unfold subtree', command=self.cmd_unfold_subtree)
self.bind_menu(cm, 'Fold subtree', command=self.cmd_fold_subtree)
self.view.treeview.bind('<<TreeviewSelect>>', self.on_treeview_select)
self.view.treeview.bind('<Button-2>', self.on_show_menu)
self.view.treeview.bind('<Button-3>', self.on_show_menu)
self.view.treeview.bind('<Button-1>', self.on_treeview_button)
self.view.parent_label.bind('<Button-1>', self.on_hide_menu)
self.view.parent_name.bind('<Button-1>', self.on_hide_menu)
self.view.item_text.bind_class('KeyUp', '<Key>', self.on_item_keyup)
self.view.item_text.bind('<Button-1>', self.on_hide_menu)
self.view.root.bind('<FocusOut>', self.on_hide_menu)
if len(sys.argv) > 1:
self.model = Model()
self.model.load(str(sys.argv[1]))
self.new_tree()
else:
self.cmd_new()
self.transform = {
'\b': "\\b",
'\f': "\\f",
'\n': "\\n",
'\r': "\\r",
'\t': "\\t",
'\\': "\\\\",
'"': "\\\""
}
def cmd_add_object(self):
self.new_node({})
def cmd_rename(self):
name = simpledialog.askstring('Rename', 'Name:')
if not name:
return
selected = self.selected()
old_value = self.view.treeview.item(selected, 'values')
new_text = name + self.view.treeview.item(selected, 'text')[len(old_value[0]):]
self.view.treeview.item(selected, text=new_text, values=(name, old_value[1]))
self.view.cmd_dirty()
def cmd_add_array(self):
self.new_node([])
def cmd_move_up(self):
self.move_selected(-1)
def cmd_move_down(self):
self.move_selected(1)
def cmd_add_string(self):
self.new_node('')
def cmd_add_boolean(self):
self.new_node(True)
def cmd_add_number(self):
self.new_node(0.0)
def cmd_add_null(self):
self.new_node(None)
def cmd_delete(self):
selected = self.selected()
parent = self.view.treeview.parent(selected)
if parent == '':
return
del self.item_type[selected]
self.view.treeview.delete(selected)
self.view.cmd_dirty()
def _unfold_subtree(self,item,unfold=True):
self.view.treeview.item(item,open=unfold)
for child in self.view.treeview.get_children(item):
self._unfold_subtree(child,unfold)
def cmd_unfold_subtree(self):
selected = self.selected()
self._unfold_subtree(selected)
def cmd_fold_subtree(self):
selected = self.selected()
self._unfold_subtree(selected,False)
def cmd_new(self):
self.model = Model()
self.view.cmd_dirty()
self.new_tree()
def cmd_open(self):
file = filedialog.askopenfile(
filetypes=self.filetypes,
title='Open JSON File',
parent=self.view.root)
if file:
self.model = Model()
self.model.load(file)
self.view.cmd_clean()
self.new_tree()
def cmd_save(self):
self.model.loads(self.tree_to_json())
self.model.save()
self.view.cmd_clean()
self.update_title()
def cmd_save_as(self):
filename = filedialog.asksaveasfilename(
filetypes=self.filetypes,
title='Save JSON As',
parent=self.view.root)
if filename:
self.model.loads(self.tree_to_json())
self.model.save(filename)
self.view.cmd_clean()
self.update_title()
def cmd_documentation(self):
webbrowser.open_new('https://www.intrepiduniverse.com/projects/jsonEditor.html')
def on_item_keyup(self, event):
if not self.item is None:
text = self.view.item_text.get(1.0, tkinter.END)[:-1]
type = self.item_type[self.item]
if type == bool:
cast = lambda x : x.lower().strip() in ['true', '1', 't', 'y', 'yes']
elif type in (int, float):
def to_number(text):
try:
return type(text)
except ValueError:
return 0
cast = to_number
else:
cast = lambda x : str(x)
value = str(cast(text))
values = self.view.treeview.item(self.item, 'values')
self.view.treeview.item(self.item, text=augment(values[0], value), values=(values[0], value))
self.view.cmd_dirty()
def on_treeview_button(self, event):
self.on_hide_menu(event)
if self.view.treeview.identify_region(event.x, event.y) == "separator":
return "break"
def on_treeview_select(self, event):
selected = self.selected()
if selected:
self.edit(selected)
def on_show_menu(self, event):
if self.view.root.focus_get() is None:
return
item = self.event_to_item(event)
self.menu_for_item(item)
self.view.treeview.selection_set(item)
self.view.context_menu.post(event.x_root, event.y_root)
def on_hide_menu(self, event):
self.view.context_menu.unpost()
def bind_menu(self, menu, entry, **kwargs):
index = menu.index(entry)
menu.entryconfig(index, **kwargs)
def object_to_tree(self, obj, container_id='', key_id=None):
if container_id == '':
self.view.treeview.delete(*self.view.treeview.get_children())
key_id = self.view.treeview.insert(container_id, 'end', text='root')
self.item_type = {'': 'root', key_id: dict}
key_item = self.view.treeview.item(key_id)
key_text = key_item['text']
if isinstance(obj, dict):
self.view.treeview.item(key_id, text=augment(key_text, '{ ... }'), values=(key_text, dict))
self.item_type[key_id] = dict
for key in sorted(obj):
inner_key_id = self.view.treeview.insert(key_id, 'end', text=key)
self.object_to_tree(obj[key], key_item, inner_key_id)
elif isinstance(obj, list):
self.view.treeview.item(key_id, text=augment(key_text, '[ ,,, ]'), values=(key_text, list))
self.item_type[key_id] = list
for item in obj:
inner_key_id = self.view.treeview.insert(key_id, 'end', text='')
self.object_to_tree(item, key_item, inner_key_id)
else:
if obj is None:
value_text = '<null>'
elif type(obj) in (bool, int, float):
value_text = str(obj)
else:
obj = str(obj)
value_text = str(obj)
self.view.treeview.item(key_id, text=augment(key_text, value_text), values=(key_text, value_text))
self.item_type[key_id] = type(obj)
if obj is None:
self.item_type[key_id] = 'null'
def tree_to_json(self, node_id=''):
if not node_id:
node_id = self.view.treeview.get_children()[0]
type = self.item_type[node_id]
tree = self.view.treeview
if type == dict:
inner = ''
for key_id in tree.get_children(node_id):
if inner:
inner += ', '
value = str(self.tree_to_json(key_id))
inner += '"' + str(tree.item(key_id)['values'][0]) + '": ' + value
return '{' + inner + '}'
elif type == list:
inner = ''
for key_id in tree.get_children(node_id):
if inner:
inner += ', '
inner += str(self.tree_to_json(key_id))
return '[' + inner + ']'
elif type in (int, float):
return tree.item(node_id)['values'][1]
elif type == bool:
return tree.item(node_id)['values'][1].lower()
elif type == 'null':
return 'null'
else:
string = str()
for c in str(tree.item(node_id)['values'][1]):
transformed = self.transform.get(c)
if transformed:
string += transformed
elif ord(c) < 32:
pass
else:
string += c
return '"' + string + '"'
def new_tree(self):
self.object_to_tree(self.model.object)
self.item = None
self.set_parent_name('')
self.view.item_text.delete(1.0, tkinter.END)
self.update_title()
def new_node(self, value):
container_id = self.selected()
if self.item_type[container_id] == dict:
key = str(simpledialog.askstring('Key name', 'Name:'))
if not key:
return
for child_id in self.view.treeview.get_children(container_id):
if key == self.view.treeview.item(child_id, 'values')[0]:
raise Exception('Key already exists : ' + key)
key_id = self.view.treeview.insert(container_id, 'end', text=key)
self.object_to_tree(value, container_id, key_id)
elif self.item_type[container_id] == list:
key_id = self.view.treeview.insert(container_id, 'end', text='')
self.object_to_tree(value, container_id, key_id)
self.view.treeview.selection_set(key_id)
self.view.treeview.see(key_id)
self.view.cmd_dirty()
def edit(self, item_id):
type = self.item_type[item_id]
if type not in (dict, list, 'null'):
values = self.view.treeview.item(item_id, 'values')
value_text = self.view.treeview.item(item_id, 'text').replace(values[0] + ' : ', '')
self.set_parent_name(str(type) + ' ' + values[0])
self.view.item_text.delete(1.0, tkinter.END)
self.view.item_text.insert(1.0, value_text)
self.item = item_id
def set_parent_name(self, text):
self.view.parent_name.configure(state=tkinter.NORMAL)
self.view.parent_name.delete(0, tkinter.END)
self.view.parent_name.insert(0, text)
self.view.parent_name.configure(state=tkinter.DISABLED)
def update_title(self):
filename = self.model.filename[-50:]
if filename != self.model.filename:
filename = '... ' + filename
self.view.title("JSONEdit " + filename)
def menu_for_item(self, item_id):
type = self.item_type[item_id]
context_matrix = {
'root' : [0,0,0,0,0,0,0,0,0,2,0,2,5,6],
dict : [1,4,1,3,3,1,1,1,1,2,1,2,5,6],
list : [1,4,1,3,3,1,1,1,1,2,1,2,5,6],
str : [0,4,0,3,3,0,0,0,0,2,1,2,0,6],
int : [0,4,0,3,3,0,0,0,0,2,1,2,0,0],
float : [0,4,0,3,3,0,0,0,0,2,1,2,0,0],
bool : [0,4,0,3,3,0,0,0,0,2,1,2,0,0],
'null' : [0,4,0,3,3,0,0,0,0,2,1,2,0,0],
}
menu = self.view.context_menu
for i in range(len(context_matrix[type])):
state = context_matrix[type][i]
parent = self.view.treeview.parent(item_id)
parent_type = self.item_type[parent]
if state == 0:
menu.entryconfigure(i, state=tkinter.DISABLED)
elif state == 1:
menu.entryconfigure(i, state=tkinter.NORMAL)
elif state == 3:
if parent_type == list:
menu.entryconfigure(i, state=tkinter.NORMAL)
else:
menu.entryconfigure(i, state=tkinter.DISABLED)
elif state == 4:
if parent_type == dict:
menu.entryconfigure(i, state=tkinter.NORMAL)
else:
menu.entryconfigure(i, state=tkinter.DISABLED)
elif state == 5:
if len(self.view.treeview.get_children(item_id)) > 0 and self.view.treeview.item(item_id,'open') == False:
menu.entryconfigure(i, state=tkinter.NORMAL)
else:
menu.entryconfigure(i, state=tkinter.DISABLED)
elif state == 6:
if len(self.view.treeview.get_children(item_id)) > 0 and self.view.treeview.item(item_id,'open'):
menu.entryconfigure(i, state=tkinter.NORMAL)
else:
menu.entryconfigure(i, state=tkinter.DISABLED)
def move_selected(self, offset):
selected = self.selected()
parent = self.view.treeview.parent(selected)
index = self.view.treeview.index(selected)
index = index + offset
self.view.treeview.move(selected, parent, index)
self.view.cmd_dirty()
def selected(self):
selection = self.view.treeview.selection()
if len(selection) == 1:
return selection[0]
return None
def event_to_item(self, event):
return self.view.treeview.identify('item', event.x, event.y)
class JSONEdit(GUIApplication.GUIApplication):
def __init__(self, root):
super(JSONEdit, self).__init__(root, 'JSONEdit')
self.create_widgets()
self.apply_style(root, 'white')
self.viewmodel = ViewModel(self)
def create_menu(self):
self.menu = tkinter.Menu(self.root)
self.menu_file = tkinter.Menu(self.menu, tearoff=False)
self.menu_file.add_command(label='New')
self.menu_file.add_command(label='Open ...')
self.menu_file.add_separator()
self.menu_file.add_command(label='Save')
self.menu_file.add_command(label='Save As ...')
self.menu_file.add_separator()
self.menu_file.add_command(label='Quit')
self.menu_help = tkinter.Menu(self.menu, tearoff=False)
self.menu_help.add_command(label='Version 1.0.0', state=tkinter.DISABLED)
self.menu_help.add_command(label='Documentation')
self.root.config(menu=self.menu)
self.menu.add_cascade(label='File', menu=self.menu_file)
self.menu.add_cascade(label='Help', menu=self.menu_help)
def create_context_menu(self):
menu = tkinter.Menu(self.root, tearoff=False)
menu.add_command(label='Add object')
menu.add_command(label='Rename')
menu.add_command(label='Add array')
menu.add_command(label='Move up')
menu.add_command(label='Move down')
menu.add_command(label='Add string')
menu.add_command(label='Add boolean')
menu.add_command(label='Add number')
menu.add_command(label='Add null')
menu.add_separator()
menu.add_command(label='Delete')
menu.add_separator()
menu.add_command(label='Unfold subtree')
menu.add_command(label='Fold subtree')
self.context_menu = menu
def create_widgets(self):
self.create_menu()
self.create_context_menu()
self.pane = tkinter.PanedWindow(self.root, orient=tkinter.HORIZONTAL, sashwidth=4, showhandle=True)
self.pane.pack(fill=tkinter.BOTH, expand=True)
self.treeview, self.treeview_scrolled = self.create_scrolled(
self.root, ttk.Treeview, True, True)
self.pane.add(self.treeview_scrolled)
self.treeview.heading('#0', text='Document Tree')
self.object_frame = tkinter.Frame(self.pane, bg='lightgrey')
self.object_frame.grid()
self.pane.add(self.object_frame)
self.parent_label = tkinter.Label(
self.object_frame, text='Key :', foreground='blue', anchor=tkinter.W)
self.parent_label.grid(column=0, row=0)
self.parent_name = tkinter.Entry(self.object_frame)
self.parent_name.grid(column=1, row=0, sticky=tkinter.EW)
self.parent_name.config(state=tkinter.DISABLED)
self.item_text, self.item_text_scrolled = self.create_scrolled(
self.object_frame, tkinter.Text, True, True)
self.extend_bindtags(self.item_text)
self.item_text_scrolled.grid(
column=0, row=1, columnspan=2, sticky=tkinter.NSEW)
self.grid_weights(self.object_frame, [0, 1], [0, 1])
self.grid_weights(self.root, [0, 1], [1])
if __name__ == '__main__':
GUIApplication.main(JSONEdit)
| |
# ------------------------------------------------------------------------
#
# Copyright 2005-2015 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# ------------------------------------------------------------------------
import subprocess
import os
import socket
from plugins.contracts import ICartridgeAgentPlugin
from modules.util.log import LogFactory
from entity import *
from config import Config
class WSO2StartupHandler(ICartridgeAgentPlugin):
"""
Configures and starts configurator, carbon server
"""
log = LogFactory().get_log(__name__)
# class constants
CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
CONST_APPLICATION_ID = "APPLICATION_ID"
CONST_MB_IP = "MB_IP"
CONST_SERVICE_NAME = "SERVICE_NAME"
CONST_CLUSTER_ID = "CLUSTER_ID"
CONST_WORKER = "worker"
CONST_MANAGER = "manager"
CONST_MGT = "mgt"
CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
CONST_PORT_MAPPING_PT_HTTP_TRANSPORT = "pt-http"
CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT = "pt-https"
CONST_PROTOCOL_HTTP = "http"
CONST_PROTOCOL_HTTPS = "https"
CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
CONST_PRODUCT = "CEP"
SERVICES = ["wso2cep-400-worker", "wso2cep-400-presenter"]
# list of environment variables exported by the plugin
ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_PT_HTTP_PROXY_PORT'
ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_PT_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = 'CONFIG_PARAM_LOCAL_MEMBER_HOST'
ENV_CONFIG_PARAM_HOST_IP = 'CONFIG_PARAM_HOST_IP'
# clustering related environment variables read from payload_parameters
ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
def run_plugin(self, values):
# read from 'values'
port_mappings_str = values[self.CONST_PORT_MAPPINGS].replace("'", "")
app_id = values[self.CONST_APPLICATION_ID]
mb_ip = values[self.CONST_MB_IP]
service_type = values[self.CONST_SERVICE_NAME]
my_cluster_id = values[self.CONST_CLUSTER_ID]
clustering = values.get(self.ENV_CONFIG_PARAM_CLUSTERING, 'false')
membership_scheme = values.get(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME)
# read topology from PCA TopologyContext
topology = TopologyContext.topology
# log above values
WSO2StartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
WSO2StartupHandler.log.info("Application ID: %s" % app_id)
WSO2StartupHandler.log.info("MB IP: %s" % mb_ip)
WSO2StartupHandler.log.info("Service Name: %s" % service_type)
WSO2StartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
WSO2StartupHandler.log.info("Clustering: %s" % clustering)
WSO2StartupHandler.log.info("Membership Scheme: %s" % membership_scheme)
# export Proxy Ports as Env. variables - used in catalina-server.xml
mgt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
self.CONST_PROTOCOL_HTTP)
mgt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
self.CONST_PROTOCOL_HTTPS)
pt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_PT_HTTP_TRANSPORT,
self.CONST_PROTOCOL_HTTP)
pt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT,
self.CONST_PROTOCOL_HTTPS)
self.export_env_var(self.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
self.export_env_var(self.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)
self.export_env_var(self.ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT, pt_http_proxy_port)
self.export_env_var(self.ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT, pt_https_proxy_port)
# if CONFIG_PARAM_MEMBERSHIP_SCHEME is not set, set the private-paas membership scheme as default one
if clustering == 'true' and membership_scheme is None:
membership_scheme = self.CONST_PPAAS_MEMBERSHIP_SCHEME
self.export_env_var(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)
# check if clustering is enabled
if clustering == 'true':
# set hostnames
self.export_host_names(topology, app_id)
# check if membership scheme is set to 'private-paas'
if membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
# export Cluster_Ids as Env. variables - used in axis2.xml
self.export_cluster_ids(topology, app_id, service_type, my_cluster_id)
# export mb_ip as Env.variable - used in jndi.properties
self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)
# set instance private ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
private_ip = self.get_member_private_ip(topology, Config.service_name, Config.cluster_id, Config.member_id)
self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST, private_ip)
# set sub-domain
sub_domain = None
if service_type.endswith(self.CONST_MANAGER):
sub_domain = self.CONST_MGT
elif service_type.endswith(self.CONST_WORKER):
sub_domain = self.CONST_WORKER
self.export_env_var(self.ENV_CONFIG_PARAM_HOST_IP, private_ip)
# if sub_domain is not None:
self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)
# start configurator
WSO2StartupHandler.log.info("Configuring WSO2 %s..." % self.CONST_PRODUCT)
config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
env_var = os.environ.copy()
p = subprocess.Popen(config_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2StartupHandler.log.info("WSO2 %s configured successfully" % self.CONST_PRODUCT)
# start server
WSO2StartupHandler.log.info("Starting WSO2 %s ..." % self.CONST_PRODUCT)
if service_type.endswith(self.CONST_WORKER):
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -DworkerNode=true start"
else:
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dsetup start"
env_var = os.environ.copy()
p = subprocess.Popen(start_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2StartupHandler.log.info("WSO2 %s started successfully" % self.CONST_PRODUCT)
def get_member_private_ip(self, topology, service_name, cluster_id, member_id):
"""
return member private ip
:return: local_ip
"""
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % member_id)
if member.member_default_private_ip and not member.member_default_private_ip.isspace():
WSO2StartupHandler.log.info(
"Member private ip read from the topology: %s" % member.member_default_private_ip)
return member.member_default_private_ip
else:
local_ip = socket.gethostbyname(socket.gethostname())
WSO2StartupHandler.log.info(
"Member private ip not found in the topology. Reading from the socket interface: %s" % local_ip)
return local_ip
def export_host_names(self, topology, app_id):
"""
Set hostnames of services read from topology for worker manager instances
exports MgtHostName and HostName
:return: void
"""
mgt_host_name = None
host_name = None
for service_name in self.SERVICES:
if service_name.endswith(self.CONST_MANAGER):
mgr_cluster = self.get_cluster_of_service(topology, service_name, app_id)
if mgr_cluster is not None:
mgt_host_name = mgr_cluster.hostnames[0]
elif service_name.endswith(self.CONST_WORKER):
worker_cluster = self.get_cluster_of_service(topology, service_name, app_id)
if worker_cluster is not None:
host_name = worker_cluster.hostnames[0]
self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
def export_cluster_ids(self, topology, app_id, service_type, my_cluster_id):
"""
Set clusterIds of services read from topology for worker manager instances
else use own clusterId
:return: void
"""
cluster_ids = []
cluster_id_of_service = None
if service_type.endswith(self.CONST_MANAGER) or service_type.endswith(self.CONST_WORKER):
for service_name in self.SERVICES:
cluster_of_service = self.get_cluster_of_service(topology, service_name, app_id)
if cluster_of_service is not None:
cluster_id_of_service = cluster_of_service.cluster_id
if cluster_id_of_service is not None:
cluster_ids.append(cluster_id_of_service)
else:
cluster_ids.append(my_cluster_id)
# If clusterIds are available, export them as environment variables
if cluster_ids:
cluster_ids_string = ",".join(cluster_ids)
self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)
@staticmethod
def get_cluster_of_service(topology, service_name, app_id):
cluster_obj = None
clusters = None
if topology is not None:
if topology.service_exists(service_name):
service = topology.get_service(service_name)
if service is not None:
clusters = service.get_clusters()
else:
WSO2StartupHandler.log.warn("[Service] %s is None" % service_name)
else:
WSO2StartupHandler.log.warn("[Service] %s is not available in topology" % service_name)
else:
WSO2StartupHandler.log.warn("Topology is empty.")
if clusters is not None:
for cluster in clusters:
if cluster.app_id == app_id:
cluster_obj = cluster
return cluster_obj
@staticmethod
def read_proxy_port(port_mappings_str, port_mapping_name, port_mapping_protocol):
"""
returns proxy port of the requested port mapping
:return: void
"""
# port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
# NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
# NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:7280|TYPE:ClientIP;
# NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:7243|TYPE:NodePort
if port_mappings_str is not None:
port_mappings_array = port_mappings_str.split(";")
if port_mappings_array:
for port_mapping in port_mappings_array:
# WSO2StartupHandler.log.debug("port_mapping: %s" % port_mapping)
name_value_array = port_mapping.split("|")
name = name_value_array[0].split(":")[1]
protocol = name_value_array[1].split(":")[1]
proxy_port = name_value_array[3].split(":")[1]
# If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
if proxy_port == '0':
proxy_port = name_value_array[2].split(":")[1]
if name == port_mapping_name and protocol == port_mapping_protocol:
return proxy_port
@staticmethod
def export_env_var(variable, value):
"""
exports key value pairs as env. variables
:return: void
"""
if value is not None:
os.environ[variable] = value
WSO2StartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
else:
WSO2StartupHandler.log.warn("Could not export environment variable %s " % variable)
| |
"""
Support for interfacing with the XBMC/Kodi JSON-RPC API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.kodi/
"""
import asyncio
from collections import OrderedDict
from functools import wraps
import logging
import urllib
import re
import os
import aiohttp
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_STOP,
SUPPORT_TURN_OFF, SUPPORT_PLAY, SUPPORT_VOLUME_STEP, SUPPORT_SHUFFLE_SET,
MediaPlayerDevice, PLATFORM_SCHEMA, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW,
MEDIA_TYPE_VIDEO, MEDIA_TYPE_PLAYLIST, MEDIA_PLAYER_SCHEMA, DOMAIN,
SUPPORT_TURN_ON)
from homeassistant.const import (
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME,
CONF_PORT, CONF_PROXY_SSL, CONF_USERNAME, CONF_PASSWORD,
CONF_TIMEOUT, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import script, config_validation as cv
from homeassistant.helpers.template import Template
from homeassistant.util.yaml import dump
REQUIREMENTS = ['jsonrpc-async==0.6', 'jsonrpc-websocket==0.5']
_LOGGER = logging.getLogger(__name__)
EVENT_KODI_CALL_METHOD_RESULT = 'kodi_call_method_result'
CONF_TCP_PORT = 'tcp_port'
CONF_TURN_ON_ACTION = 'turn_on_action'
CONF_TURN_OFF_ACTION = 'turn_off_action'
CONF_ENABLE_WEBSOCKET = 'enable_websocket'
DEFAULT_NAME = 'Kodi'
DEFAULT_PORT = 8080
DEFAULT_TCP_PORT = 9090
DEFAULT_TIMEOUT = 5
DEFAULT_PROXY_SSL = False
DEFAULT_ENABLE_WEBSOCKET = True
DEPRECATED_TURN_OFF_ACTIONS = {
None: None,
'quit': 'Application.Quit',
'hibernate': 'System.Hibernate',
'suspend': 'System.Suspend',
'reboot': 'System.Reboot',
'shutdown': 'System.Shutdown'
}
# https://github.com/xbmc/xbmc/blob/master/xbmc/media/MediaType.h
MEDIA_TYPES = {
'music': MEDIA_TYPE_MUSIC,
'artist': MEDIA_TYPE_MUSIC,
'album': MEDIA_TYPE_MUSIC,
'song': MEDIA_TYPE_MUSIC,
'video': MEDIA_TYPE_VIDEO,
'set': MEDIA_TYPE_PLAYLIST,
'musicvideo': MEDIA_TYPE_VIDEO,
'movie': MEDIA_TYPE_VIDEO,
'tvshow': MEDIA_TYPE_TVSHOW,
'season': MEDIA_TYPE_TVSHOW,
'episode': MEDIA_TYPE_TVSHOW,
}
SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_SHUFFLE_SET | \
SUPPORT_PLAY | SUPPORT_VOLUME_STEP
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TCP_PORT, default=DEFAULT_TCP_PORT): cv.port,
vol.Optional(CONF_PROXY_SSL, default=DEFAULT_PROXY_SSL): cv.boolean,
vol.Optional(CONF_TURN_ON_ACTION, default=None): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_TURN_OFF_ACTION):
vol.Any(cv.SCRIPT_SCHEMA, vol.In(DEPRECATED_TURN_OFF_ACTIONS)),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Inclusive(CONF_USERNAME, 'auth'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'auth'): cv.string,
vol.Optional(CONF_ENABLE_WEBSOCKET, default=DEFAULT_ENABLE_WEBSOCKET):
cv.boolean,
})
SERVICE_ADD_MEDIA = 'kodi_add_to_playlist'
SERVICE_CALL_METHOD = 'kodi_call_method'
DATA_KODI = 'kodi'
ATTR_MEDIA_TYPE = 'media_type'
ATTR_MEDIA_NAME = 'media_name'
ATTR_MEDIA_ARTIST_NAME = 'artist_name'
ATTR_MEDIA_ID = 'media_id'
ATTR_METHOD = 'method'
MEDIA_PLAYER_ADD_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_TYPE): cv.string,
vol.Optional(ATTR_MEDIA_ID): cv.string,
vol.Optional(ATTR_MEDIA_NAME): cv.string,
vol.Optional(ATTR_MEDIA_ARTIST_NAME): cv.string,
})
MEDIA_PLAYER_CALL_METHOD_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_METHOD): cv.string,
}, extra=vol.ALLOW_EXTRA)
SERVICE_TO_METHOD = {
SERVICE_ADD_MEDIA: {
'method': 'async_add_media_to_playlist',
'schema': MEDIA_PLAYER_ADD_MEDIA_SCHEMA},
SERVICE_CALL_METHOD: {
'method': 'async_call_method',
'schema': MEDIA_PLAYER_CALL_METHOD_SCHEMA},
}
def _check_deprecated_turn_off(hass, turn_off_action):
"""Create an equivalent script for old turn off actions."""
if isinstance(turn_off_action, str):
method = DEPRECATED_TURN_OFF_ACTIONS[turn_off_action]
new_config = OrderedDict(
[('service', '{}.{}'.format(DOMAIN, SERVICE_CALL_METHOD)),
('data_template', OrderedDict(
[('entity_id', '{{ entity_id }}'),
('method', method)]))])
example_conf = dump(OrderedDict(
[(CONF_TURN_OFF_ACTION, new_config)]))
_LOGGER.warning(
"The '%s' action for turn off Kodi is deprecated and "
"will cease to function in a future release. You need to "
"change it for a generic Home Assistant script sequence, "
"which is, for this turn_off action, like this:\n%s",
turn_off_action, example_conf)
new_config['data_template'] = OrderedDict(
[(key, Template(value, hass))
for key, value in new_config['data_template'].items()])
turn_off_action = [new_config]
return turn_off_action
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Kodi platform."""
if DATA_KODI not in hass.data:
hass.data[DATA_KODI] = []
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
tcp_port = config.get(CONF_TCP_PORT)
encryption = config.get(CONF_PROXY_SSL)
websocket = config.get(CONF_ENABLE_WEBSOCKET)
entity = KodiDevice(
hass,
name=name,
host=host, port=port, tcp_port=tcp_port, encryption=encryption,
username=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
turn_on_action=config.get(CONF_TURN_ON_ACTION),
turn_off_action=config.get(CONF_TURN_OFF_ACTION),
timeout=config.get(CONF_TIMEOUT), websocket=websocket)
hass.data[DATA_KODI].append(entity)
async_add_devices([entity], update_before_add=True)
@asyncio.coroutine
def async_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {key: value for key, value in service.data.items()
if key != 'entity_id'}
entity_ids = service.data.get('entity_id')
if entity_ids:
target_players = [player for player in hass.data[DATA_KODI]
if player.entity_id in entity_ids]
else:
target_players = hass.data[DATA_KODI]
update_tasks = []
for player in target_players:
yield from getattr(player, method['method'])(**params)
for player in target_players:
if player.should_poll:
update_coro = player.async_update_ha_state(True)
update_tasks.append(update_coro)
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
if hass.services.has_service(DOMAIN, SERVICE_ADD_MEDIA):
return
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]['schema']
hass.services.async_register(
DOMAIN, service, async_service_handler,
description=descriptions.get(service), schema=schema)
def cmd(func):
"""Catch command exceptions."""
@wraps(func)
@asyncio.coroutine
def wrapper(obj, *args, **kwargs):
"""Wrap all command methods."""
import jsonrpc_base
try:
yield from func(obj, *args, **kwargs)
except jsonrpc_base.jsonrpc.TransportError as exc:
# If Kodi is off, we expect calls to fail.
if obj.state == STATE_OFF:
log_function = _LOGGER.info
else:
log_function = _LOGGER.error
log_function("Error calling %s on entity %s: %r",
func.__name__, obj.entity_id, exc)
return wrapper
class KodiDevice(MediaPlayerDevice):
"""Representation of a XBMC/Kodi device."""
def __init__(self, hass, name, host, port, tcp_port, encryption=False,
username=None, password=None,
turn_on_action=None, turn_off_action=None,
timeout=DEFAULT_TIMEOUT, websocket=True):
"""Initialize the Kodi device."""
import jsonrpc_async
import jsonrpc_websocket
self.hass = hass
self._name = name
kwargs = {
'timeout': timeout,
'session': async_get_clientsession(hass),
}
if username is not None:
kwargs['auth'] = aiohttp.BasicAuth(username, password)
image_auth_string = "{}:{}@".format(username, password)
else:
image_auth_string = ""
http_protocol = 'https' if encryption else 'http'
ws_protocol = 'wss' if encryption else 'ws'
self._http_url = '{}://{}:{}/jsonrpc'.format(http_protocol, host, port)
self._image_url = '{}://{}{}:{}/image'.format(
http_protocol, image_auth_string, host, port)
self._ws_url = '{}://{}:{}/jsonrpc'.format(ws_protocol, host, tcp_port)
self._http_server = jsonrpc_async.Server(self._http_url, **kwargs)
if websocket:
# Setup websocket connection
self._ws_server = jsonrpc_websocket.Server(self._ws_url, **kwargs)
# Register notification listeners
self._ws_server.Player.OnPause = self.async_on_speed_event
self._ws_server.Player.OnPlay = self.async_on_speed_event
self._ws_server.Player.OnSpeedChanged = self.async_on_speed_event
self._ws_server.Player.OnStop = self.async_on_stop
self._ws_server.Application.OnVolumeChanged = \
self.async_on_volume_changed
self._ws_server.System.OnQuit = self.async_on_quit
self._ws_server.System.OnRestart = self.async_on_quit
self._ws_server.System.OnSleep = self.async_on_quit
def on_hass_stop(event):
"""Close websocket connection when hass stops."""
self.hass.async_add_job(self._ws_server.close())
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, on_hass_stop)
else:
self._ws_server = None
# Script creation for the turn on/off config options
if turn_on_action is not None:
turn_on_action = script.Script(
self.hass, turn_on_action,
"{} turn ON script".format(self.name),
self.async_update_ha_state(True))
if turn_off_action is not None:
turn_off_action = script.Script(
self.hass, _check_deprecated_turn_off(hass, turn_off_action),
"{} turn OFF script".format(self.name))
self._turn_on_action = turn_on_action
self._turn_off_action = turn_off_action
self._enable_websocket = websocket
self._players = list()
self._properties = {}
self._item = {}
self._app_properties = {}
@callback
def async_on_speed_event(self, sender, data):
"""Handle player changes between playing and paused."""
self._properties['speed'] = data['player']['speed']
if not hasattr(data['item'], 'id'):
# If no item id is given, perform a full update
force_refresh = True
else:
# If a new item is playing, force a complete refresh
force_refresh = data['item']['id'] != self._item.get('id')
self.hass.async_add_job(self.async_update_ha_state(force_refresh))
@callback
def async_on_stop(self, sender, data):
"""Handle the stop of the player playback."""
# Prevent stop notifications which are sent after quit notification
if self._players is None:
return
self._players = []
self._properties = {}
self._item = {}
self.hass.async_add_job(self.async_update_ha_state())
@callback
def async_on_volume_changed(self, sender, data):
"""Handle the volume changes."""
self._app_properties['volume'] = data['volume']
self._app_properties['muted'] = data['muted']
self.hass.async_add_job(self.async_update_ha_state())
@callback
def async_on_quit(self, sender, data):
"""Reset the player state on quit action."""
self._players = None
self._properties = {}
self._item = {}
self._app_properties = {}
self.hass.async_add_job(self._ws_server.close())
@asyncio.coroutine
def _get_players(self):
"""Return the active player objects or None."""
import jsonrpc_base
try:
return (yield from self.server.Player.GetActivePlayers())
except jsonrpc_base.jsonrpc.TransportError:
if self._players is not None:
_LOGGER.info("Unable to fetch kodi data")
_LOGGER.debug("Unable to fetch kodi data", exc_info=True)
return None
@property
def state(self):
"""Return the state of the device."""
if self._players is None:
return STATE_OFF
if not self._players:
return STATE_IDLE
if self._properties['speed'] == 0 and not self._properties['live']:
return STATE_PAUSED
return STATE_PLAYING
@asyncio.coroutine
def async_ws_connect(self):
"""Connect to Kodi via websocket protocol."""
import jsonrpc_base
try:
ws_loop_future = yield from self._ws_server.ws_connect()
except jsonrpc_base.jsonrpc.TransportError:
_LOGGER.info("Unable to connect to Kodi via websocket")
_LOGGER.debug(
"Unable to connect to Kodi via websocket", exc_info=True)
return
@asyncio.coroutine
def ws_loop_wrapper():
"""Catch exceptions from the websocket loop task."""
try:
yield from ws_loop_future
except jsonrpc_base.TransportError:
# Kodi abruptly ends ws connection when exiting. We will try
# to reconnect on the next poll.
pass
# Update HA state after Kodi disconnects
self.hass.async_add_job(self.async_update_ha_state())
# Create a task instead of adding a tracking job, since this task will
# run until the websocket connection is closed.
self.hass.loop.create_task(ws_loop_wrapper())
@asyncio.coroutine
def async_update(self):
"""Retrieve latest state."""
self._players = yield from self._get_players()
if self._players is None:
self._properties = {}
self._item = {}
self._app_properties = {}
return
if self._enable_websocket and not self._ws_server.connected:
self.hass.async_add_job(self.async_ws_connect())
self._app_properties = \
yield from self.server.Application.GetProperties(
['volume', 'muted']
)
if self._players:
player_id = self._players[0]['playerid']
assert isinstance(player_id, int)
self._properties = yield from self.server.Player.GetProperties(
player_id,
['time', 'totaltime', 'speed', 'live']
)
self._item = (yield from self.server.Player.GetItem(
player_id,
['title', 'file', 'uniqueid', 'thumbnail', 'artist',
'albumartist', 'showtitle', 'album', 'season', 'episode']
))['item']
else:
self._properties = {}
self._item = {}
self._app_properties = {}
@property
def server(self):
"""Active server for json-rpc requests."""
if self._enable_websocket and self._ws_server.connected:
return self._ws_server
return self._http_server
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return not (self._enable_websocket and self._ws_server.connected)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if 'volume' in self._app_properties:
return self._app_properties['volume'] / 100.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._app_properties.get('muted')
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._item.get('uniqueid', None)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPES.get(self._item.get('type'))
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._properties.get('live'):
return None
total_time = self._properties.get('totaltime')
if total_time is None:
return None
return (
total_time['hours'] * 3600 +
total_time['minutes'] * 60 +
total_time['seconds'])
@property
def media_image_url(self):
"""Image url of current playing media."""
thumbnail = self._item.get('thumbnail')
if thumbnail is None:
return None
url_components = urllib.parse.urlparse(thumbnail)
if url_components.scheme == 'image':
return '{}/{}'.format(
self._image_url,
urllib.parse.quote_plus(thumbnail))
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
return self._item.get(
'title', self._item.get('label', self._item.get('file')))
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return self._item.get('showtitle')
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return self._item.get('season')
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return self._item.get('episode')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._item.get('album')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
artists = self._item.get('artist', [])
if artists:
return artists[0]
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
artists = self._item.get('albumartist', [])
if artists:
return artists[0]
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = SUPPORT_KODI
if self._turn_on_action is not None:
supported_features |= SUPPORT_TURN_ON
if self._turn_off_action is not None:
supported_features |= SUPPORT_TURN_OFF
return supported_features
@cmd
@asyncio.coroutine
def async_turn_on(self):
"""Execute turn_on_action to turn on media player."""
if self._turn_on_action is not None:
yield from self._turn_on_action.async_run(
variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_on requested but turn_on_action is none")
@cmd
@asyncio.coroutine
def async_turn_off(self):
"""Execute turn_off_action to turn off media player."""
if self._turn_off_action is not None:
yield from self._turn_off_action.async_run(
variables={"entity_id": self.entity_id})
else:
_LOGGER.warning("turn_off requested but turn_off_action is none")
@cmd
@asyncio.coroutine
def async_volume_up(self):
"""Volume up the media player."""
assert (
yield from self.server.Input.ExecuteAction('volumeup')) == 'OK'
@cmd
@asyncio.coroutine
def async_volume_down(self):
"""Volume down the media player."""
assert (
yield from self.server.Input.ExecuteAction('volumedown')) == 'OK'
@cmd
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
return self.server.Application.SetVolume(int(volume * 100))
@cmd
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.server.Application.SetMute(mute)
@asyncio.coroutine
def async_set_play_state(self, state):
"""Handle play/pause/toggle."""
players = yield from self._get_players()
if players is not None and players:
yield from self.server.Player.PlayPause(
players[0]['playerid'], state)
@cmd
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state('toggle')
@cmd
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(True)
@cmd
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(False)
@cmd
@asyncio.coroutine
def async_media_stop(self):
"""Stop the media player."""
players = yield from self._get_players()
if players:
yield from self.server.Player.Stop(players[0]['playerid'])
@asyncio.coroutine
def _goto(self, direction):
"""Handle for previous/next track."""
players = yield from self._get_players()
if players:
if direction == 'previous':
# First seek to position 0. Kodi goes to the beginning of the
# current track if the current track is not at the beginning.
yield from self.server.Player.Seek(players[0]['playerid'], 0)
yield from self.server.Player.GoTo(
players[0]['playerid'], direction)
@cmd
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('next')
@cmd
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('previous')
@cmd
@asyncio.coroutine
def async_media_seek(self, position):
"""Send seek command."""
players = yield from self._get_players()
time = {}
time['milliseconds'] = int((position % 1) * 1000)
position = int(position)
time['seconds'] = int(position % 60)
position /= 60
time['minutes'] = int(position % 60)
position /= 60
time['hours'] = int(position)
if players:
yield from self.server.Player.Seek(players[0]['playerid'], time)
@cmd
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player.
This method must be run in the event loop and returns a coroutine.
"""
if media_type == "CHANNEL":
return self.server.Player.Open(
{"item": {"channelid": int(media_id)}})
elif media_type == "PLAYLIST":
return self.server.Player.Open(
{"item": {"playlistid": int(media_id)}})
return self.server.Player.Open(
{"item": {"file": str(media_id)}})
@asyncio.coroutine
def async_set_shuffle(self, shuffle):
"""Set shuffle mode, for the first player."""
if len(self._players) < 1:
raise RuntimeError("Error: No active player.")
yield from self.server.Player.SetShuffle(
{"playerid": self._players[0]['playerid'], "shuffle": shuffle})
@asyncio.coroutine
def async_call_method(self, method, **kwargs):
"""Run Kodi JSONRPC API method with params."""
import jsonrpc_base
_LOGGER.debug("Run API method %s, kwargs=%s", method, kwargs)
result_ok = False
try:
result = yield from getattr(self.server, method)(**kwargs)
result_ok = True
except jsonrpc_base.jsonrpc.ProtocolError as exc:
result = exc.args[2]['error']
_LOGGER.error("Run API method %s.%s(%s) error: %s",
self.entity_id, method, kwargs, result)
except jsonrpc_base.jsonrpc.TransportError:
result = None
_LOGGER.warning("TransportError trying to run API method "
"%s.%s(%s)", self.entity_id, method, kwargs)
if isinstance(result, dict):
event_data = {'entity_id': self.entity_id,
'result': result,
'result_ok': result_ok,
'input': {'method': method, 'params': kwargs}}
_LOGGER.debug("EVENT kodi_call_method_result: %s", event_data)
self.hass.bus.async_fire(EVENT_KODI_CALL_METHOD_RESULT,
event_data=event_data)
return result
@asyncio.coroutine
def async_add_media_to_playlist(
self, media_type, media_id=None, media_name='ALL', artist_name=''):
"""Add a media to default playlist (i.e. playlistid=0).
First the media type must be selected, then
the media can be specified in terms of id or
name and optionally artist name.
All the albums of an artist can be added with
media_name="ALL"
"""
import jsonrpc_base
params = {"playlistid": 0}
if media_type == "SONG":
if media_id is None:
media_id = yield from self.async_find_song(
media_name, artist_name)
if media_id:
params["item"] = {"songid": int(media_id)}
elif media_type == "ALBUM":
if media_id is None:
if media_name == "ALL":
yield from self.async_add_all_albums(artist_name)
return
media_id = yield from self.async_find_album(
media_name, artist_name)
if media_id:
params["item"] = {"albumid": int(media_id)}
else:
raise RuntimeError("Unrecognized media type.")
if media_id is not None:
try:
yield from self.server.Playlist.Add(params)
except jsonrpc_base.jsonrpc.ProtocolError as exc:
result = exc.args[2]['error']
_LOGGER.error("Run API method %s.Playlist.Add(%s) error: %s",
self.entity_id, media_type, result)
except jsonrpc_base.jsonrpc.TransportError:
_LOGGER.warning("TransportError trying to add playlist to %s",
self.entity_id)
else:
_LOGGER.warning("No media detected for Playlist.Add")
@asyncio.coroutine
def async_add_all_albums(self, artist_name):
"""Add all albums of an artist to default playlist (i.e. playlistid=0).
The artist is specified in terms of name.
"""
artist_id = yield from self.async_find_artist(artist_name)
albums = yield from self.async_get_albums(artist_id)
for alb in albums['albums']:
yield from self.server.Playlist.Add(
{"playlistid": 0, "item": {"albumid": int(alb['albumid'])}})
@asyncio.coroutine
def async_clear_playlist(self):
"""Clear default playlist (i.e. playlistid=0)."""
return self.server.Playlist.Clear({"playlistid": 0})
@asyncio.coroutine
def async_get_artists(self):
"""Get artists list."""
return (yield from self.server.AudioLibrary.GetArtists())
@asyncio.coroutine
def async_get_albums(self, artist_id=None):
"""Get albums list."""
if artist_id is None:
return (yield from self.server.AudioLibrary.GetAlbums())
return (yield from self.server.AudioLibrary.GetAlbums(
{"filter": {"artistid": int(artist_id)}}))
@asyncio.coroutine
def async_find_artist(self, artist_name):
"""Find artist by name."""
artists = yield from self.async_get_artists()
try:
out = self._find(
artist_name, [a['artist'] for a in artists['artists']])
return artists['artists'][out[0][0]]['artistid']
except KeyError:
_LOGGER.warning("No artists were found: %s", artist_name)
return None
@asyncio.coroutine
def async_get_songs(self, artist_id=None):
"""Get songs list."""
if artist_id is None:
return (yield from self.server.AudioLibrary.GetSongs())
return (yield from self.server.AudioLibrary.GetSongs(
{"filter": {"artistid": int(artist_id)}}))
@asyncio.coroutine
def async_find_song(self, song_name, artist_name=''):
"""Find song by name and optionally artist name."""
artist_id = None
if artist_name != '':
artist_id = yield from self.async_find_artist(artist_name)
songs = yield from self.async_get_songs(artist_id)
if songs['limits']['total'] == 0:
return None
out = self._find(song_name, [a['label'] for a in songs['songs']])
return songs['songs'][out[0][0]]['songid']
@asyncio.coroutine
def async_find_album(self, album_name, artist_name=''):
"""Find album by name and optionally artist name."""
artist_id = None
if artist_name != '':
artist_id = yield from self.async_find_artist(artist_name)
albums = yield from self.async_get_albums(artist_id)
try:
out = self._find(
album_name, [a['label'] for a in albums['albums']])
return albums['albums'][out[0][0]]['albumid']
except KeyError:
_LOGGER.warning("No albums were found with artist: %s, album: %s",
artist_name, album_name)
return None
@staticmethod
def _find(key_word, words):
key_word = key_word.split(' ')
patt = [re.compile(
'(^| )' + k + '( |$)', re.IGNORECASE) for k in key_word]
out = [[i, 0] for i in range(len(words))]
for i in range(len(words)):
mtc = [p.search(words[i]) for p in patt]
rate = [m is not None for m in mtc].count(True)
out[i][1] = rate
return sorted(out, key=lambda out: out[1], reverse=True)
| |
import datetime
import unittest2
import webtest
import json
import md5
import webapp2
import api_main
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.auth_type import AuthType
from consts.event_type import EventType
from controllers.api.api_event_controller import ApiEventController
from models.api_auth_access import ApiAuthAccess
from models.award import Award
from models.event import Event
from models.event_team import EventTeam
from models.match import Match
from models.team import Team
class TestApiTrustedController(unittest2.TestCase):
def setUp(self):
self.testapp = webtest.TestApp(api_main.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_taskqueue_stub(root_path=".")
self.teams_auth = ApiAuthAccess(id='tEsT_id_0',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_TEAMS])
self.matches_auth = ApiAuthAccess(id='tEsT_id_1',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES])
self.rankings_auth = ApiAuthAccess(id='tEsT_id_2',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_RANKINGS])
self.alliances_auth = ApiAuthAccess(id='tEsT_id_3',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_ALLIANCES])
self.awards_auth = ApiAuthAccess(id='tEsT_id_4',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_AWARDS])
self.video_auth = ApiAuthAccess(id='tEsT_id_5',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.MATCH_VIDEO])
self.event = Event(
id='2014casj',
event_type_enum=EventType.REGIONAL,
event_short='casj',
year=2014,
)
self.event.put()
def tearDown(self):
self.testbed.deactivate()
def test_auth(self):
request_path = '/api/trusted/v1/event/2014casj/matches/update'
# Fail
response = self.testapp.post(request_path, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail
request_body = json.dumps([])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
self.rankings_auth.put()
self.matches_auth.put()
# Pass
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Fail; bad X-TBA-Auth-Id
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'badTestAuthId', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail; bad sig
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': '123abc'}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail; bad sig due to wrong body
body2 = json.dumps([{}])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, body2, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail; bad event
request_path2 = '/api/trusted/v1/event/2014cama/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path2, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail; insufficient auth_types_enum
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_alliance_selections_update(self):
self.alliances_auth.put()
alliances = [['frc971', 'frc254', 'frc1662'],
['frc1678', 'frc368', 'frc4171'],
['frc2035', 'frc192', 'frc4990'],
['frc1323', 'frc846', 'frc2135'],
['frc2144', 'frc1388', 'frc668'],
['frc1280', 'frc604', 'frc100'],
['frc114', 'frc852', 'frc841'],
['frc2473', 'frc3256', 'frc1868']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
for i, selection in enumerate(self.event.alliance_selections):
self.assertEqual(alliances[i], selection['picks'])
def test_awards_update(self):
self.awards_auth.put()
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc1', 'awardee': 'Bob Bobby'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 2)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
self.assertTrue('2014casj_5' in [a.key.id() for a in db_awards])
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'}]
request_body = json.dumps(awards)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 1)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
def test_matches_update(self):
self.matches_auth.put()
update_request_path = '/api/trusted/v1/event/2014casj/matches/update'
delete_request_path = '/api/trusted/v1/event/2014casj/matches/delete'
# add one match
matches = [{
'comp_level': 'qm',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 25},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 26},
},
'time_string': '9:00 AM',
'time_utc': '2014-08-31T16:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 1)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
# add another match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260},
},
'time_string': '10:00 AM',
'time_utc': '2014-08-31T17:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
# add a match and delete a match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
keys_to_delete = ['qm1']
request_body = json.dumps(keys_to_delete)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['keys_deleted'], ['qm1'])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
# verify match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
def test_rankings_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, 0, 10])
def test_rankings_wlt_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, '10-0-0', 0, 10])
def test_eventteams_update(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
Team(id='frc604', team_number=604).put()
Team(id='frc100', team_number=100).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 3)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' in [et.key.id() for et in db_eventteams])
def test_eventteams_unknown(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' not in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 1)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' not in [et.key.id() for et in db_eventteams])
def test_match_videos_add(self):
self.video_auth.put()
match1 = Match(
id="2014casj_qm1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="qm",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
youtube_videos=["abcdef"]
)
match1.put()
match2 = Match(
id="2014casj_sf1m1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="sf",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
)
match2.put()
match_videos = {'qm1': 'aFZy8iibMD0', 'sf1m1': 'RpSgUrsghv4'}
request_body = json.dumps(match_videos)
request_path = '/api/trusted/v1/event/2014casj/match_videos/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_5', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(set(Match.get_by_id('2014casj_qm1').youtube_videos), {'abcdef', 'aFZy8iibMD0'})
self.assertEqual(set(Match.get_by_id('2014casj_sf1m1').youtube_videos), {'RpSgUrsghv4'})
| |
# -*- coding: utf-8 -*-
from __future__ import division
from math import floor
from math import sqrt
from operator import itemgetter
from matplotlib import mlab
import matplotlib.pyplot as plt
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
def truncate(x, d):
return int(x*(10.0**d))/(10.0**d)
class Corpus_Information:
def __init__(self, WordLists, lFiles):
"""
takes in wordlists and convert that completely to statistic and give anomalies (about file size)
:param WordLists: an array contain dictionaries map from word to word count
each dictionary is word count of a particular file
:param FileNames: an parallel array of WordLists, contain file name of the files(in order to plot)
"""
# initialize
NumFile = len(WordLists)
FileAnomalyStdE = {}
FileAnomalyIQR = {}
FileSizes = {}
for i in range(NumFile):
FileSizes.update({lFiles[i]: sum(WordLists[i].values())})
# 1 standard error analysis
Average_FileSize = sum(FileSizes.values()) / NumFile
# calculate the StdE
StdE_FileSize = 0
for filesize in FileSizes.values():
StdE_FileSize += (filesize - Average_FileSize) ** 2
StdE_FileSize /= NumFile
StdE_FileSize = sqrt(StdE_FileSize)
# calculate the anomaly
for file in lFiles:
if FileSizes[file] > Average_FileSize + 2 * StdE_FileSize:
FileAnomalyStdE.update({file.name: 'large'})
elif FileSizes[file] < Average_FileSize - 2 * StdE_FileSize:
FileAnomalyStdE.update({file.name: 'small'})
# 2 IQR analysis
TempList = sorted(FileSizes.items(), key=itemgetter(1))
Mid = TempList[int(NumFile / 2)][1]
Q3 = TempList[int(NumFile * 3 / 4)][1]
Q1 = TempList[int(NumFile / 4)][1]
IQR = Q3 - Q1
# calculate the anomaly
for file in lFiles:
if FileSizes[file] > Mid + 1.5 * IQR:
FileAnomalyIQR.update({file.name: 'large'})
elif FileSizes[file] < Mid - 1.5 * IQR:
FileAnomalyIQR.update({file.name: 'small'})
# pack the data
self.NumFile = NumFile # number of files
self.FileSizes = FileSizes # an array of the total word count of each file
self.Average = Average_FileSize # average file size
self.StdE = StdE_FileSize # standard error of file size
self.FileAnomalyStdE = FileAnomalyStdE
# an array contain dictionary map anomaly file to how they are different from others(too large or too small)
# analyzed in using standard error
self.Q1 = Q1 # Q1 of a all the file sizes
self.Median = Mid # median of all the file sizes
self.Q3 = Q3 # Q1 of a all the file sizes
self.IQR = IQR # Q1 of a all the file sizes
self.FileAnomalyIQR = FileAnomalyIQR
# an array contain dictionary map anomaly file to how they are different from others(too large or too small)
# analyzed in using IQR
def list(self):
"""
print all the statistics in a good manner
"""
print
print 'average:', self.Average, ' standard error:', self.StdE
print 'file size anomaly calculated using standard error:', self.FileAnomalyStdE
print 'median:', self.Median, ' Q1:', self.Q1, ' Q3:', self.Q3, ' IQR', self.IQR
print 'file size anomaly calculated using IQR:', self.FileAnomalyIQR
def plot(self, path):
"""
plot a bar chart to represent the statistics
x is the file name
y is the file size(using word count to represent)
"""
plt.bar(range(self.NumFile), self.FileSizes.values(), align='center')
plt.xticks(range(self.NumFile), self.FileSizes.keys())
plt.xticks(rotation=50)
plt.xlabel('File Name')
plt.ylabel('File Size(in term of word count)')
plt.savefig(path)
plt.close()
def returnstatistics(self):
"""
:return: a dictionary map the statistic name to the actual statistics
"""
return {'average': truncate(self.Average,3),
'StdE': self.StdE,
'fileanomalyStdE': self.FileAnomalyStdE,
'median': self.Median,
'Q1': self.Q1,
'Q3': self.Q3,
'IQR': self.IQR,
'fileanomalyIQR': self.FileAnomalyIQR}
class File_Information:
def __init__(self, WordList, FileName):
"""
takes a WordList of a file and the file name of that file to give statistics of that particular file
:param WordList: an dictionary map word to word count representing the word count of particular file
:param FileName: the file name of that file
"""
# initialize
NumWord = len(WordList)
TotalWordCount = sum(WordList.values())
# 1 standard error analysis
AverageWordCount = TotalWordCount / NumWord
# calculate the StdE
StdEWordCount = 0
for WordCount in WordList.values():
StdEWordCount += (WordCount - AverageWordCount) ** 2
StdEWordCount /= NumWord
StdEWordCount = sqrt(StdEWordCount)
# 2 IQR analysis
TempList = sorted(WordList.items(), key=itemgetter(1))
Mid = TempList[int(NumWord / 2)][1]
Q3 = TempList[int(NumWord * 3 / 4)][1]
Q1 = TempList[int(NumWord / 4)][1]
IQR = Q3 - Q1
# pack the data
self.FileName = FileName
self.NumWord = NumWord
self.TotalWordCount = TotalWordCount
self.WordCount = WordList
self.Average = AverageWordCount
self.StdE = StdEWordCount
self.Q1 = Q1
self.Median = Mid
self.Q3 = Q3
self.IQR = IQR
self.Hapax = (WordList.values().count(1))
def list(self):
"""
print all the statistics in a good manner
"""
print
print 'information for', "'" + self.FileName + "'"
print 'total word count:', self.TotalWordCount
print '1. in term of word count:'
print ' average:', self.Average, ' standard error:', self.StdE
print ' median:', self.Median, ' Q1:', self.Q1, ' Q3:', self.Q3, ' IQR', self.IQR
print '2. in term of probability'
print ' average:', self.Average / self.TotalWordCount, ' standard error:', self.StdE / self.TotalWordCount
print ' median:', self.Median / self.TotalWordCount, ' Q1:', self.Q1 / self.TotalWordCount, \
' Q3:', self.Q3 / self.TotalWordCount, ' IQR', self.IQR / self.TotalWordCount
def plot(self, path, num_bins=0):
"""
draw a histogram to represent the data
:param num_bins: number of bars, default is (Number different word in the file )/ 2,
if it is too large take 50 as default (see '#default of num_bins')
"""
# plot data
mu = self.Average # mean of distribution
sigma = self.StdE # standard deviation of distribution
if num_bins == 0: # default of num_bins
num_bins = min([round(self.NumWord / 2), 50])
# print num_bins
# the histogram of the data
n, bins, patches = plt.hist(self.WordCount.values(), num_bins, normed=1, facecolor='green', alpha=0.5)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.xlabel('Word Count')
plt.ylabel('Probability(how many words have this word count)')
plt.title(r'Histogram of word count: $\mu=' + str(self.Average) + '$, $\sigma=' + str(self.StdE) + '$')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.savefig(path)
plt.close()
def returnstatistics(self):
"""
:return: a dictionary map the statistic name to the actual statistics
"""
return {'name': self.FileName,
'numUniqueWords': int(self.NumWord),
'totalwordCount': int(round(self.TotalWordCount, 2)),
'median': self.Median,
'Q1': self.Q1,
'Q3': self.Q3,
'IQR': self.IQR,
'average': truncate(self.Average,1),
'stdE': self.StdE,
'Hapax': self.Hapax}
| |
"""
sentry.models.user
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import warnings
from bitfield import BitField
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.core.urlresolvers import reverse
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField
from sentry.models import LostPasswordHash
from sentry.utils.http import absolute_uri
audit_logger = logging.getLogger('sentry.audit.user')
class UserManager(BaseManager, UserManager):
pass
class User(BaseModel, AbstractBaseUser):
__core__ = True
id = BoundedAutoField(primary_key=True)
username = models.CharField(_('username'), max_length=128, unique=True)
# this column is called first_name for legacy reasons, but it is the entire
# display name
name = models.CharField(_('name'), max_length=200, blank=True, db_column='first_name')
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin '
'site.')
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'
)
)
is_superuser = models.BooleanField(
_('superuser status'),
default=False,
help_text=_(
'Designates that this user has all permissions without '
'explicitly assigning them.'
)
)
is_managed = models.BooleanField(
_('managed'),
default=False,
help_text=_(
'Designates whether this user should be treated as '
'managed. Select this to disallow the user from '
'modifying their account (username, password, etc).'
)
)
is_sentry_app = models.NullBooleanField(
_('is sentry app'),
null=True,
default=None,
help_text=_(
'Designates whether this user is the entity used for Permissions'
'on behalf of a Sentry App. Cannot login or use Sentry like a'
'normal User would.'
)
)
is_password_expired = models.BooleanField(
_('password expired'),
default=False,
help_text=_(
'If set to true then the user needs to change the '
'password on next sign in.'
)
)
last_password_change = models.DateTimeField(
_('date of last password change'),
null=True,
help_text=_('The date the password was changed last.')
)
flags = BitField(
flags=(
(
'newsletter_consent_prompt',
'Do we need to ask this user for newsletter consent?'
),
),
default=0,
null=True,
)
session_nonce = models.CharField(max_length=12, null=True)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
last_active = models.DateTimeField(_('last active'), default=timezone.now, null=True)
objects = UserManager(cache_fields=['pk'])
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
app_label = 'sentry'
db_table = 'auth_user'
verbose_name = _('user')
verbose_name_plural = _('users')
def delete(self):
if self.username == 'sentry':
raise Exception('You cannot delete the "sentry" user as it is required by Sentry.')
avatar = self.avatar.first()
if avatar:
avatar.delete()
return super(User, self).delete()
def save(self, *args, **kwargs):
if not self.username:
self.username = self.email
return super(User, self).save(*args, **kwargs)
def has_perm(self, perm_name):
warnings.warn('User.has_perm is deprecated', DeprecationWarning)
return self.is_superuser
def has_module_perms(self, app_label):
warnings.warn('User.has_module_perms is deprecated', DeprecationWarning)
return self.is_superuser
def get_unverified_emails(self):
return self.emails.filter(is_verified=False)
def get_verified_emails(self):
return self.emails.filter(is_verified=True)
def has_unverified_emails(self):
return self.get_unverified_emails().exists()
def get_label(self):
return self.email or self.username or self.id
def get_display_name(self):
return self.name or self.email or self.username
def get_full_name(self):
return self.name
def get_short_name(self):
return self.username
def get_salutation_name(self):
name = self.name or self.username.split('@', 1)[0].split('.', 1)[0]
first_name = name.split(' ', 1)[0]
return first_name.capitalize()
def get_avatar_type(self):
avatar = self.avatar.first()
if avatar:
return avatar.get_avatar_type_display()
return 'letter_avatar'
def send_confirm_email_singular(self, email, is_new_user=False):
from sentry import options
from sentry.utils.email import MessageBuilder
if not email.hash_is_valid():
email.set_hash()
email.save()
context = {
'user':
self,
'url':
absolute_uri(
reverse('sentry-account-confirm-email', args=[self.id, email.validation_hash])
),
'confirm_email':
email.email,
'is_new_user':
is_new_user,
}
msg = MessageBuilder(
subject='%sConfirm Email' % (options.get('mail.subject-prefix'), ),
template='sentry/emails/confirm_email.txt',
html_template='sentry/emails/confirm_email.html',
type='user.confirm_email',
context=context,
)
msg.send_async([email.email])
def send_confirm_emails(self, is_new_user=False):
email_list = self.get_unverified_emails()
for email in email_list:
self.send_confirm_email_singular(email, is_new_user)
def merge_to(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry import roles
from sentry.models import (
Activity, AuditLogEntry, AuthIdentity, Authenticator, GroupAssignee, GroupBookmark, GroupSeen,
GroupShare, GroupSubscription, OrganizationMember, OrganizationMemberTeam, UserAvatar,
UserEmail, UserOption,
)
audit_logger.info(
'user.merge', extra={
'from_user_id': from_user.id,
'to_user_id': to_user.id,
}
)
for obj in OrganizationMember.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
# identify the highest priority membership
to_member = OrganizationMember.objects.get(
organization=obj.organization_id,
user=to_user,
)
if roles.get(obj.role).priority > roles.get(to_member.role).priority:
to_member.update(role=obj.role)
for team in obj.teams.all():
try:
with transaction.atomic():
OrganizationMemberTeam.objects.create(
organizationmember=to_member,
team=team,
)
except IntegrityError:
pass
model_list = (
Authenticator, GroupAssignee, GroupBookmark, GroupSeen, GroupShare,
GroupSubscription, UserAvatar, UserEmail, UserOption,
)
for model in model_list:
for obj in model.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
Activity.objects.filter(
user=from_user,
).update(user=to_user)
AuditLogEntry.objects.filter(
actor=from_user,
).update(actor=to_user)
AuditLogEntry.objects.filter(
target_user=from_user,
).update(target_user=to_user)
# remove any duplicate identities that exist on the current user that
# might conflict w/ the new users existing SSO
AuthIdentity.objects.filter(
user=from_user,
auth_provider__organization__in=AuthIdentity.objects.filter(
user=to_user,
).values('auth_provider__organization')
).delete()
AuthIdentity.objects.filter(
user=from_user,
).update(user=to_user)
def set_password(self, raw_password):
super(User, self).set_password(raw_password)
self.last_password_change = timezone.now()
self.is_password_expired = False
def refresh_session_nonce(self, request=None):
from django.utils.crypto import get_random_string
self.session_nonce = get_random_string(12)
if request is not None:
request.session['_nonce'] = self.session_nonce
def get_orgs(self):
from sentry.models import (Organization, OrganizationMember, OrganizationStatus)
return Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
id__in=OrganizationMember.objects.filter(
user=self,
).values('organization'),
)
def get_orgs_require_2fa(self):
from sentry.models import (Organization, OrganizationStatus)
return Organization.objects.filter(
flags=models.F('flags').bitor(Organization.flags.require_2fa),
status=OrganizationStatus.VISIBLE,
member_set__user=self,
)
def clear_lost_passwords(self):
LostPasswordHash.objects.filter(user=self).delete()
# HACK(dcramer): last_login needs nullable for Django 1.8
User._meta.get_field('last_login').null = True
| |
'''
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
'''
from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
# TL; DR, the main training process starts on line: 250,
# you may want to start reading the code from there
##############################################################################
# parameters #################################################################
##############################################################################
# A, paths
train = '/home/laomie/projects/python/data/avazu_train.csv' # path to training file
test = '/home/laomie/projects/python/data/avazu_test.csv' # path to testing file
submission = '/home/laomie/submission1234.csv' # path of to be outputted submission file
# B, model
alpha = .1 # learning rate
beta = 1. # smoothing parameter for adaptive learning rate
L1 = 1. # L1 regularization, larger value means more regularized
L2 = 1. # L2 regularization, larger value means more regularized
# C, feature/hash trick
#D = 2 ** 20 # number of weights to use
D = 2 ** 4 # number of weights to use
interaction = False # whether to enable poly2 feature interactions
# D, training/validation
epoch = 1 # learn training data for N passes
holdafter = 26 # data after date N (exclusive) are used as validation
holdout = None # use every N training instance for holdout validation
##############################################################################
# class, function, generator definitions #####################################
##############################################################################
class ftrl_proximal(object):
''' Our main algorithm: Follow the regularized leader - proximal
In short,
this is an adaptive-learning-rate sparse logistic-regression with
efficient L1-L2-regularization
Reference:
http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf
'''
def __init__(self, alpha, beta, L1, L2, D, interaction):
# parameters
self.alpha = alpha
self.beta = beta
self.L1 = L1
self.L2 = L2
# feature related parameters
self.D = D
self.interaction = interaction
# model
# n: squared sum of past gradients
# z: weights
# w: lazy weights
self.n = [0.] * D
self.z = [0.] * D
self.w = {}
def _indices(self, x):
''' A helper generator that yields the indices in x
The purpose of this generator is to make the following
code a bit cleaner when doing feature interaction.
'''
# first yield index of the bias term
yield 0
# then yield the normal indices
for index in x:
yield index
# now yield interactions (if applicable)
if self.interaction:
D = self.D
L = len(x)
x = sorted(x)
for i in range(L):
for j in range(i+1, L):
# one-hot encode interactions with hash trick
yield abs(hash(str(x[i]) + '_' + str(x[j]))) % D
def predict(self, x):
''' Get probability estimation on x
INPUT:
x: features
OUTPUT:
probability of p(y = 1 | x; w)
'''
# parameters
alpha = self.alpha
beta = self.beta
L1 = self.L1
L2 = self.L2
# model
n = self.n
z = self.z
w = {}
# wTx is the inner product of w and x
wTx = 0.
for i in self._indices(x):
sign = -1. if z[i] < 0 else 1. # get sign of z[i]
# build w on the fly using z and n, hence the name - lazy weights
# we are doing this at prediction instead of update time is because
# this allows us for not storing the complete w
if sign * z[i] <= L1:
# w[i] vanishes due to L1 regularization
w[i] = 0.
else:
# apply prediction time L1, L2 regularization to z and get w
w[i] = (sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2)
wTx += w[i]
# cache the current w for update stage
self.w = w
# bounded sigmoid function, this is the probability estimation
return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))
def update(self, x, p, y):
''' Update model using x, p, y
INPUT:
x: feature, a list of indices
p: click probability prediction of our model
y: answer
MODIFIES:
self.n: increase by squared gradient
self.z: weights
'''
# parameter
alpha = self.alpha
# model
n = self.n
z = self.z
w = self.w
# gradient under logloss
g = p - y
# update z and n
for i in self._indices(x):
sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha
z[i] += g - sigma * w[i]
n[i] += g * g
def logloss(p, y):
''' FUNCTION: Bounded logloss
INPUT:
p: our prediction
y: real answer
OUTPUT:
logarithmic loss of p given y
'''
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1. else -log(1. - p)
def data(path, D):
''' GENERATOR: Apply hash-trick to the original csv row
and for simplicity, we one-hot-encode everything
INPUT:
path: path to training or testing file
D: the max index that we can hash to
YIELDS:
ID: id of the instance, mainly useless
x: a list of hashed and one-hot-encoded 'indices'
we only need the index since all values are either 0 or 1
y: y = 1 if we have a click, else we have y = 0
'''
for t, row in enumerate(DictReader(open(path))):
# process id
ID = row['id']
del row['id']
# process clicks
y = 0.
if 'click' in row:
if row['click'] == '1':
y = 1.
del row['click']
# extract date
date = int(row['hour'][4:6])
# turn hour really into hour, it was originally YYMMDDHH
row['hour'] = row['hour'][6:]
# build x
x = []
for key in row:
value = row[key]
# one-hot encode everything with hash trick
index = abs(hash(key + '_' + value)) % D
x.append(index)
yield t, date, ID, x, y
##############################################################################
# start training #############################################################
##############################################################################
start = datetime.now()
# initialize ourselves a learner
learner = ftrl_proximal(alpha, beta, L1, L2, D, interaction)
# start training
for e in range(epoch):
loss = 0.
count = 0
for t, date, ID, x, y in data(train, D): # data is a generator
# t: just a instance counter
# date: you know what this is
# ID: id provided in original data
# x: features
# y: label (click)
# step 1, get prediction from learner
p = learner.predict(x)
if (holdafter and date > holdafter) or (holdout and t % holdout == 0):
# step 2-1, calculate validation loss
# we do not train with the validation data so that our
# validation loss is an accurate estimation
#
# holdafter: train instances from day 1 to day N
# validate with instances from day N + 1 and after
#
# holdout: validate with every N instance, train with others
loss += logloss(p, y)
count += 1
else:
# step 2-2, update learner with label (click) information
learner.update(x, p, y)
print('Epoch %d finished, validation logloss: %f, elapsed time: %s' % (
e, loss/count, str(datetime.now() - start)))
##############################################################################
# start testing, and build Kaggle's submission file ##########################
##############################################################################
with open(submission, 'w') as outfile:
outfile.write('id,click\n')
for t, date, ID, x, y in data(test, D):
p = learner.predict(x)
outfile.write('%s,%s\n' % (ID, str(p)))
| |
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import logging
import os
import shutil
import sys
import tempfile
import zmq.auth
from zmq.auth.ioloop import IOLoopAuthenticator
from zmq.auth.thread import ThreadAuthenticator
from zmq.eventloop import ioloop, zmqstream
from zmq.tests import (BaseZMQTestCase, SkipTest)
class BaseAuthTestCase(BaseZMQTestCase):
def setUp(self):
if zmq.zmq_version_info() < (4,0):
raise SkipTest("security is new in libzmq 4.0")
try:
zmq.curve_keypair()
except zmq.ZMQError:
raise SkipTest("security requires libzmq to have curve support")
super(BaseAuthTestCase, self).setUp()
# enable debug logging while we run tests
logging.getLogger('zmq.auth').setLevel(logging.DEBUG)
self.auth = self.make_auth()
self.auth.start()
self.base_dir, self.public_keys_dir, self.secret_keys_dir = self.create_certs()
def make_auth(self):
raise NotImplementedError()
def tearDown(self):
if self.auth:
self.auth.stop()
self.auth = None
self.remove_certs(self.base_dir)
super(BaseAuthTestCase, self).tearDown()
def create_certs(self):
"""Create CURVE certificates for a test"""
# Create temporary CURVE keypairs for this test run. We create all keys in a
# temp directory and then move them into the appropriate private or public
# directory.
base_dir = tempfile.mkdtemp()
keys_dir = os.path.join(base_dir, 'certificates')
public_keys_dir = os.path.join(base_dir, 'public_keys')
secret_keys_dir = os.path.join(base_dir, 'private_keys')
os.mkdir(keys_dir)
os.mkdir(public_keys_dir)
os.mkdir(secret_keys_dir)
server_public_file, server_secret_file = zmq.auth.create_certificates(keys_dir, "server")
client_public_file, client_secret_file = zmq.auth.create_certificates(keys_dir, "client")
for key_file in os.listdir(keys_dir):
if key_file.endswith(".key"):
shutil.move(os.path.join(keys_dir, key_file),
os.path.join(public_keys_dir, '.'))
for key_file in os.listdir(keys_dir):
if key_file.endswith(".key_secret"):
shutil.move(os.path.join(keys_dir, key_file),
os.path.join(secret_keys_dir, '.'))
return (base_dir, public_keys_dir, secret_keys_dir)
def remove_certs(self, base_dir):
"""Remove certificates for a test"""
shutil.rmtree(base_dir)
def load_certs(self, secret_keys_dir):
"""Return server and client certificate keys"""
server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
return server_public, server_secret, client_public, client_secret
class TestThreadAuthentication(BaseAuthTestCase):
"""Test authentication running in a thread"""
def make_auth(self):
return ThreadAuthenticator(self.context)
def can_connect(self, server, client):
"""Check if client can connect to server using tcp transport"""
result = False
iface = 'tcp://127.0.0.1'
port = server.bind_to_random_port(iface)
client.connect("%s:%i" % (iface, port))
msg = [b"Hello World"]
if server.poll(1000, zmq.POLLOUT):
server.send_multipart(msg)
if client.poll(1000):
rcvd_msg = client.recv_multipart()
self.assertEqual(rcvd_msg, msg)
result = True
return result
def test_null(self):
"""threaded auth - NULL"""
# A default NULL connection should always succeed, and not
# go through our authentication infrastructure at all.
self.auth.stop()
self.auth = None
# use a new context, so ZAP isn't inherited
self.context = self.Context()
server = self.socket(zmq.PUSH)
client = self.socket(zmq.PULL)
self.assertTrue(self.can_connect(server, client))
# By setting a domain we switch on authentication for NULL sockets,
# though no policies are configured yet. The client connection
# should still be allowed.
server = self.socket(zmq.PUSH)
server.zap_domain = b'global'
client = self.socket(zmq.PULL)
self.assertTrue(self.can_connect(server, client))
def test_blacklist(self):
"""threaded auth - Blacklist"""
# Blacklist 127.0.0.1, connection should fail
self.auth.deny('127.0.0.1')
server = self.socket(zmq.PUSH)
# By setting a domain we switch on authentication for NULL sockets,
# though no policies are configured yet.
server.zap_domain = b'global'
client = self.socket(zmq.PULL)
self.assertFalse(self.can_connect(server, client))
def test_whitelist(self):
"""threaded auth - Whitelist"""
# Whitelist 127.0.0.1, connection should pass"
self.auth.allow('127.0.0.1')
server = self.socket(zmq.PUSH)
# By setting a domain we switch on authentication for NULL sockets,
# though no policies are configured yet.
server.zap_domain = b'global'
client = self.socket(zmq.PULL)
self.assertTrue(self.can_connect(server, client))
def test_plain(self):
"""threaded auth - PLAIN"""
# Try PLAIN authentication - without configuring server, connection should fail
server = self.socket(zmq.PUSH)
server.plain_server = True
client = self.socket(zmq.PULL)
client.plain_username = b'admin'
client.plain_password = b'Password'
self.assertFalse(self.can_connect(server, client))
# Try PLAIN authentication - with server configured, connection should pass
server = self.socket(zmq.PUSH)
server.plain_server = True
client = self.socket(zmq.PULL)
client.plain_username = b'admin'
client.plain_password = b'Password'
self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
self.assertTrue(self.can_connect(server, client))
# Try PLAIN authentication - with bogus credentials, connection should fail
server = self.socket(zmq.PUSH)
server.plain_server = True
client = self.socket(zmq.PULL)
client.plain_username = b'admin'
client.plain_password = b'Bogus'
self.assertFalse(self.can_connect(server, client))
# Remove authenticator and check that a normal connection works
self.auth.stop()
self.auth = None
server = self.socket(zmq.PUSH)
client = self.socket(zmq.PULL)
self.assertTrue(self.can_connect(server, client))
client.close()
server.close()
def test_curve(self):
"""threaded auth - CURVE"""
self.auth.allow('127.0.0.1')
certs = self.load_certs(self.secret_keys_dir)
server_public, server_secret, client_public, client_secret = certs
#Try CURVE authentication - without configuring server, connection should fail
server = self.socket(zmq.PUSH)
server.curve_publickey = server_public
server.curve_secretkey = server_secret
server.curve_server = True
client = self.socket(zmq.PULL)
client.curve_publickey = client_public
client.curve_secretkey = client_secret
client.curve_serverkey = server_public
self.assertFalse(self.can_connect(server, client))
#Try CURVE authentication - with server configured to CURVE_ALLOW_ANY, connection should pass
self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
server = self.socket(zmq.PUSH)
server.curve_publickey = server_public
server.curve_secretkey = server_secret
server.curve_server = True
client = self.socket(zmq.PULL)
client.curve_publickey = client_public
client.curve_secretkey = client_secret
client.curve_serverkey = server_public
self.assertTrue(self.can_connect(server, client))
# Try CURVE authentication - with server configured, connection should pass
self.auth.configure_curve(domain='*', location=self.public_keys_dir)
server = self.socket(zmq.PUSH)
server.curve_publickey = server_public
server.curve_secretkey = server_secret
server.curve_server = True
client = self.socket(zmq.PULL)
client.curve_publickey = client_public
client.curve_secretkey = client_secret
client.curve_serverkey = server_public
self.assertTrue(self.can_connect(server, client))
# Remove authenticator and check that a normal connection works
self.auth.stop()
self.auth = None
# Try connecting using NULL and no authentication enabled, connection should pass
server = self.socket(zmq.PUSH)
client = self.socket(zmq.PULL)
self.assertTrue(self.can_connect(server, client))
def with_ioloop(method, expect_success=True):
"""decorator for running tests with an IOLoop"""
def test_method(self):
r = method(self)
loop = self.io_loop
if expect_success:
self.pullstream.on_recv(self.on_message_succeed)
else:
self.pullstream.on_recv(self.on_message_fail)
loop.call_later(1, self.attempt_connection)
loop.call_later(1.2, self.send_msg)
if expect_success:
loop.call_later(2, self.on_test_timeout_fail)
else:
loop.call_later(2, self.on_test_timeout_succeed)
loop.start()
if self.fail_msg:
self.fail(self.fail_msg)
return r
return test_method
def should_auth(method):
return with_ioloop(method, True)
def should_not_auth(method):
return with_ioloop(method, False)
class TestIOLoopAuthentication(BaseAuthTestCase):
"""Test authentication running in ioloop"""
def setUp(self):
self.fail_msg = None
self.io_loop = ioloop.IOLoop()
super(TestIOLoopAuthentication, self).setUp()
self.server = self.socket(zmq.PUSH)
self.client = self.socket(zmq.PULL)
self.pushstream = zmqstream.ZMQStream(self.server, self.io_loop)
self.pullstream = zmqstream.ZMQStream(self.client, self.io_loop)
def make_auth(self):
return IOLoopAuthenticator(self.context, io_loop=self.io_loop)
def tearDown(self):
if self.auth:
self.auth.stop()
self.auth = None
self.io_loop.close(all_fds=True)
super(TestIOLoopAuthentication, self).tearDown()
def attempt_connection(self):
"""Check if client can connect to server using tcp transport"""
iface = 'tcp://127.0.0.1'
port = self.server.bind_to_random_port(iface)
self.client.connect("%s:%i" % (iface, port))
def send_msg(self):
"""Send a message from server to a client"""
msg = [b"Hello World"]
self.pushstream.send_multipart(msg)
def on_message_succeed(self, frames):
"""A message was received, as expected."""
if frames != [b"Hello World"]:
self.fail_msg = "Unexpected message received"
self.io_loop.stop()
def on_message_fail(self, frames):
"""A message was received, unexpectedly."""
self.fail_msg = 'Received messaged unexpectedly, security failed'
self.io_loop.stop()
def on_test_timeout_succeed(self):
"""Test timer expired, indicates test success"""
self.io_loop.stop()
def on_test_timeout_fail(self):
"""Test timer expired, indicates test failure"""
self.fail_msg = 'Test timed out'
self.io_loop.stop()
@should_auth
def test_none(self):
"""ioloop auth - NONE"""
# A default NULL connection should always succeed, and not
# go through our authentication infrastructure at all.
# no auth should be running
self.auth.stop()
self.auth = None
@should_auth
def test_null(self):
"""ioloop auth - NULL"""
# By setting a domain we switch on authentication for NULL sockets,
# though no policies are configured yet. The client connection
# should still be allowed.
self.server.zap_domain = b'global'
@should_not_auth
def test_blacklist(self):
"""ioloop auth - Blacklist"""
# Blacklist 127.0.0.1, connection should fail
self.auth.deny('127.0.0.1')
self.server.zap_domain = b'global'
@should_auth
def test_whitelist(self):
"""ioloop auth - Whitelist"""
# Whitelist 127.0.0.1, which overrides the blacklist, connection should pass"
self.auth.allow('127.0.0.1')
self.server.setsockopt(zmq.ZAP_DOMAIN, b'global')
@should_not_auth
def test_plain_unconfigured_server(self):
"""ioloop auth - PLAIN, unconfigured server"""
self.client.plain_username = b'admin'
self.client.plain_password = b'Password'
# Try PLAIN authentication - without configuring server, connection should fail
self.server.plain_server = True
@should_auth
def test_plain_configured_server(self):
"""ioloop auth - PLAIN, configured server"""
self.client.plain_username = b'admin'
self.client.plain_password = b'Password'
# Try PLAIN authentication - with server configured, connection should pass
self.server.plain_server = True
self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
@should_not_auth
def test_plain_bogus_credentials(self):
"""ioloop auth - PLAIN, bogus credentials"""
self.client.plain_username = b'admin'
self.client.plain_password = b'Bogus'
self.server.plain_server = True
self.auth.configure_plain(domain='*', passwords={'admin': 'Password'})
@should_not_auth
def test_curve_unconfigured_server(self):
"""ioloop auth - CURVE, unconfigured server"""
certs = self.load_certs(self.secret_keys_dir)
server_public, server_secret, client_public, client_secret = certs
self.auth.allow('127.0.0.1')
self.server.curve_publickey = server_public
self.server.curve_secretkey = server_secret
self.server.curve_server = True
self.client.curve_publickey = client_public
self.client.curve_secretkey = client_secret
self.client.curve_serverkey = server_public
@should_auth
def test_curve_allow_any(self):
"""ioloop auth - CURVE, CURVE_ALLOW_ANY"""
certs = self.load_certs(self.secret_keys_dir)
server_public, server_secret, client_public, client_secret = certs
self.auth.allow('127.0.0.1')
self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY)
self.server.curve_publickey = server_public
self.server.curve_secretkey = server_secret
self.server.curve_server = True
self.client.curve_publickey = client_public
self.client.curve_secretkey = client_secret
self.client.curve_serverkey = server_public
@should_auth
def test_curve_configured_server(self):
"""ioloop auth - CURVE, configured server"""
self.auth.allow('127.0.0.1')
certs = self.load_certs(self.secret_keys_dir)
server_public, server_secret, client_public, client_secret = certs
self.auth.configure_curve(domain='*', location=self.public_keys_dir)
self.server.curve_publickey = server_public
self.server.curve_secretkey = server_secret
self.server.curve_server = True
self.client.curve_publickey = client_public
self.client.curve_secretkey = client_secret
self.client.curve_serverkey = server_public
| |
"""
The test cases in bluebottle_salesforce are intended to be used for integration
with Django ORM and Salesforce for Onepercentclub.
"""
import logging
from bluebottle.bluebottle_utils.tests import UserTestsMixin
import requests
from datetime import datetime
from django.test import TestCase
from django.conf import settings
from django.core.management import call_command
from salesforce import auth
from requests.exceptions import ConnectionError
from django.utils import unittest
from .models import SalesforceOrganization, SalesforceContact, SalesforceDonation, SalesforceProject
logger = logging.getLogger(__name__)
# Define variables
test_email = 'TestEmail@1procentclub.nl'
# Test some settings and skip tests if these settings are not available.
try:
# Test if a Salesforce database is configured.
salesforce_db_conf = getattr(settings, 'DATABASES').get('salesforce')
salesforce_db_conf.get('ENGINE')
salesforce_db_conf.get('CONSUMER_KEY')
salesforce_db_conf.get('CONSUMER_SECRET')
salesforce_db_conf.get('USER')
salesforce_db_conf.get('PASSWORD')
# Test if the salesforce server is reachable to see if we're online.
requests.get(salesforce_db_conf.get('HOST'))
run_salesforce_tests = True
except (ConnectionError, AttributeError):
run_salesforce_tests = False
@unittest.skipUnless(run_salesforce_tests, 'Salesforce settings not set or not online')
class OAuthTest(TestCase):
"""
Test cases verify authentication is working using Django-Salesforce auth with oauth 2.0
"""
def validate_oauth(self, d):
"""
Validate input file for oauth 2.0 in secrets.py
"""
for key in ('access_token', 'id', 'instance_url', 'issued_at', 'signature'):
if key not in d:
self.fail("Missing %s key in returned oauth data." % key)
elif not d[key]:
self.fail("Empty value for %s key in returned oauth data." % key)
def test_token_renewal(self):
"""
Authenticate with Salesforce in real life using oauth 2.0
"""
auth.authenticate(settings.DATABASES[settings.SALESFORCE_DB_ALIAS])
self.validate_oauth(auth.oauth_data)
old_data = auth.oauth_data
auth.expire_token()
self.assertEqual(auth.oauth_data, None)
auth.authenticate(settings.DATABASES[settings.SALESFORCE_DB_ALIAS])
self.validate_oauth(auth.oauth_data)
self.assertEqual(old_data['access_token'], auth.oauth_data['access_token'])
@unittest.skipUnless(run_salesforce_tests, 'Salesforce settings not set or not online')
class SalesforceOrganizationTest(TestCase):
"""
Test cases for Salesforce account object.
"""
def setUp(self):
"""
Create our test account record.
"""
self.test_organization = SalesforceOrganization(name="UserAccount",
description="Unittest Account",
email_address=test_email,
organization_type="Business")
self.test_organization.save()
def test_organization_retrieve(self):
"""
Get the test account record.
"""
organization = SalesforceOrganization.objects.get(email_address=test_email)
self.assertEqual(organization.name, 'UserAccount')
self.assertEqual(organization.description, 'Unittest Account')
self.assertEqual(organization.organization_type, 'Business')
def tearDown(self):
"""
Clean up our test account record.
"""
self.test_organization.delete()
@unittest.skipUnless(run_salesforce_tests, 'Salesforce settings not set or not online')
class SalesforceContactTest(TestCase):
"""
Test cases for Salesforce Contact object.
"""
def setUp(self):
"""
Create our test Contact record.
"""
self.test_contact = SalesforceContact(first_name="User",
last_name="Unittest Contact",
email=test_email)
# In the future the below will be used
#Account = "ORG_INDIVIDUAL"
self.test_contact.save()
def test_contact_retrieve(self):
"""
Get the test Contact record.
"""
contact = SalesforceContact.objects.get(email=test_email)
self.assertEqual(contact.first_name, 'User')
self.assertEqual(contact.last_name, 'Unittest Contact')
def tearDown(self):
"""
Clean up our test contact record.
"""
self.test_contact.delete()
@unittest.skipUnless(run_salesforce_tests, 'Salesforce settings not set or not online')
class SalesforceDonationTest(TestCase):
"""
Test cases for Salesforce Opportunity object.
"""
def setUp(self):
"""
Create our test Opportunity record.
"""
self.test_donation = SalesforceDonation(name="Donation name",
close_date=datetime.strptime("2008-05-05", "%Y-%m-%d"),
stage_name="New")
self.test_donation.save()
def test_donation_retrieve(self):
"""
Get the test Opportunity record.
"""
donation = SalesforceDonation.objects.get(name="Donation name")
self.assertEqual(donation.name, 'Donation name')
self.assertEqual(donation.stage_name, 'New')
def tearDown(self):
"""
Clean up our test Opportunity record.
"""
self.test_donation.delete()
@unittest.skipUnless(run_salesforce_tests, 'Salesforce settings not set or not online')
class SalesforceProjectTest(TestCase):
"""
Test cases for Salesforce project object.
"""
def setUp(self):
"""
Create our test project record.
"""
self.test_project = SalesforceProject(project_name="ProjectTest",
project_url="http://tweakers.net",
external_id="2468")
self.test_project.save()
def test_project_retrieve(self):
"""
Get the test project record.
"""
project = SalesforceProject.objects.get(external_id=2468)
self.assertEqual(project.project_name, 'ProjectTest')
self.assertEqual(project.project_url, 'http://tweakers.net')
def tearDown(self):
"""
Clean up our test project record.
"""
self.test_project.delete()
@unittest.skipUnless(run_salesforce_tests, 'Salesforce settings not set or not online')
class SyncToSalesforceIntegrationTest(UserTestsMixin, TestCase):
def smoke_test_sync_script(self):
#FIXME: We need more objects created here for the test to work.
# Need to have data for each model that we want to run the smoke test on.
self.create_user()
# Run the sync test.
call_command('sync_to_salesforce', test_run=True)
| |
from __future__ import absolute_import
from six.moves import range
__author__ = 'noe'
from pyemma._ext.sklearn.base import BaseEstimator as _BaseEstimator
from pyemma._ext.sklearn.parameter_search import ParameterGrid
from pyemma.util.log import getLogger
from pyemma.util import types as _types
import inspect
import joblib
# imports for external usage
from pyemma._ext.sklearn.base import clone as clone_estimator
def get_estimator(estimator):
""" Returns an estimator object given an estimator object or class
Parameters
----------
estimator : Estimator class or object
Returns
-------
estimator : Estimator object
"""
if inspect.isclass(estimator):
estimator = estimator() # construct the estimator with default settings
return estimator
def param_grid(pargrid):
""" Generates an iterable over all possible parameter combinations from the grid
Parameters
----------
pargrid : dictionary with lists where multiple values are wanted
Examples
--------
Generates parameter sets with different lag times:
>>> grid = param_grid({'lag':[1,2,5,10,20,50]})
>>> for p in grid: print(p)
{'lag': 1}
{'lag': 2}
{'lag': 5}
{'lag': 10}
{'lag': 20}
{'lag': 50}
Generates parameter sets with all combinations of several parameter values:
>>> grid = param_grid({'lag':[1,10,100], 'reversible':[False,True]})
>>> for p in grid: print(p)
{'reversible': False, 'lag': 1}
{'reversible': True, 'lag': 1}
{'reversible': False, 'lag': 10}
{'reversible': True, 'lag': 10}
{'reversible': False, 'lag': 100}
{'reversible': True, 'lag': 100}
"""
return ParameterGrid(pargrid)
def _call_member(obj, name, args=None, failfast=True):
""" Calls the specified method, property or attribute of the given object
Parameters
----------
obj : object
The object that will be used
name : str
Name of method, property or attribute
args : dict, optional, default=None
Arguments to be passed to the method (if any)
failfast : bool
If True, will raise an exception when trying a method that doesn't exist. If False, will simply return None
in that case
"""
try:
method = getattr(obj, name)
except AttributeError as e:
if failfast:
raise e
else:
return None
if str(type(method)) == '<type \'instancemethod\'>': # call function
if args is None:
return method()
else:
return method(*args)
elif str(type(method)) == '<type \'property\'>': # call property
return method
else: # now it's an Attribute, so we can just return its value
return method
def _estimate_param_scan_worker(estimator, params, X, evaluate, evaluate_args,
failfast, progress_reporter=None):
# run estimation
model = estimator.estimate(X, **params)
# deal with results
res = []
# deal with result
if evaluate is None: # we want full models
res.append(model)
elif _types.is_iterable(evaluate): # we want to evaluate function(s) of the model
values = [] # the function values the model
for ieval in range(len(evaluate)):
# get method/attribute name and arguments to be evaluated
name = evaluate[ieval]
args = None
if evaluate_args is not None:
args = evaluate_args[ieval]
# evaluate
try:
value = _call_member(model, name, args=args) # try calling method/property/attribute
except AttributeError as e: # couldn't find method/property/attribute
if failfast:
raise e # raise an AttributeError
else:
value = None # we just ignore it and return None
values.append(value)
# if we only have one value, unpack it
if len(values) == 1:
values = values[0]
else:
raise ValueError('Invalid setting for evaluate: '+str(evaluate))
if len(res) == 1:
res = res[0]
return res
def estimate_param_scan(estimator, X, param_sets, evaluate=None, evaluate_args=None, failfast=True,
return_estimators=False, n_jobs=1, progress_reporter=None):
# TODO: parallelize. For options see http://scikit-learn.org/stable/modules/grid_search.html
# TODO: allow to specify method parameters in evaluate
""" Runs multiple estimations using a list of parameter settings
Parameters
----------
estimator : Estimator object or class
An estimator object that provides an estimate(X, **params) function.
If only a class is provided here, the Estimator objects will be
constructed with default parameter settings, and the parameter settings
from param_sets for each estimation. If you want to specify other
parameter settings for those parameters not specified in param_sets,
construct an Estimator before and pass the object.
param_sets : iterable over dictionaries
An iterable that provides parameter settings. Each element defines a
parameter set, for which an estimation will be run using these
parameters in estimate(X, **params). All other parameter settings will
be taken from the default settings in the estimator object.
evaluate : str or list of str
The given methods or properties will be called on the estimated
models, and their results will be returned instead of the full models.
This may be useful for reducing memory overhead.
failfast : bool
If True, will raise an exception when trying a method that doesn't
exist. If False, will simply return None.
Return
------
models : list of model objects or evaluated function values
A list of estimated models in the same order as param_sets. If evaluate
is given, each element will contain the results from these method
evaluations.
estimators (optional) : list of estimator ojbects. These are returned only
if return_estimators=True
Examples
--------
Estimate a maximum likelihood Markov model at lag times 1, 2, 3.
>>> from pyemma.msm.estimators import MaximumLikelihoodMSM
>>>
>>> dtraj = [0,0,1,2,1,0,1,0,1,2,2,0,0,0,1,1,2,1,0,0,1,2,1,0,0,0,1,1,0,1,2] # mini-trajectory
>>> param_sets=param_grid({'lag': [1,2,3]})
>>>
>>> estimate_param_scan(MaximumLikelihoodMSM, dtraj, param_sets, evaluate='timescales')
[array([ 1.24113167, 0.77454377]), array([ 2.65266703, 1.42909841]), array([ 5.34810395, 1.14784446])]
Try also getting samples of the timescales
>>> estimate_param_scan(MaximumLikelihoodMSM, dtraj, param_sets, evaluate=['timescales', 'timescales_samples'])
[[array([ 1.24113167, 0.77454377]), None], [array([ 2.65266703, 1.42909841]), None], [array([ 5.34810395, 1.14784446]), None],
We get Nones because the MaximumLikelihoodMSM estimator doesn't provide timescales_samples. Use for example
a Bayesian estimator for that.
"""
# make sure we have an estimator object
estimator = get_estimator(estimator)
# if we want to return estimators, make clones. Otherwise just copy references.
# For parallel processing we always need clones
if return_estimators or n_jobs > 1 or n_jobs is None:
estimators = [clone_estimator(estimator) for _ in param_sets]
else:
estimators = [estimator for _ in param_sets]
# if we evaluate, make sure we have a list of functions to evaluate
if _types.is_string(evaluate):
evaluate = [evaluate]
# iterate over parameter settings
pool = joblib.Parallel(n_jobs=n_jobs)
task_iter = (joblib.delayed(_estimate_param_scan_worker)(estimators[i],
param_sets[i], X,
evaluate,
evaluate_args,
failfast,
progress_reporter)
for i in range(len(param_sets)))
# container for model or function evaluations
res = pool(task_iter)
# done
if return_estimators:
return res, estimators
else:
return res
class Estimator(_BaseEstimator):
""" Base class for pyEMMA estimators
"""
def __create_logger(self):
name = "%s[%s]" % (self.__class__.__name__, hex(id(self)))
self._logger = getLogger(name)
@property
def logger(self):
""" The logger for this Estimator """
try:
return self._logger
except AttributeError:
self.__create_logger()
return self._logger
def estimate(self, X, **params):
""" Estimates the model given the data X
Parameters
----------
X : object
A reference to the data from which the model will be estimated
**params : New estimation parameter values. The parameters must that have been announced in the
__init__ method of this estimator. The present settings will overwrite the settings of parameters
given in the __init__ method, i.e. the parameter values after this call will be those that have been
used for this estimation. Use this option if only one or a few parameters change with respect to
the __init__ settings for this run, and if you don't need to remember the original settings of these
changed parameters.
Returns
-------
model : object
The estimated model.
"""
# set params
if params:
self.set_params(**params)
self._model = self._estimate(X)
return self._model
def _estimate(self, X):
raise NotImplementedError('You need to overload the _estimate() method in your Estimator implementation!')
def fit(self, X):
""" For compatibility with sklearn.
:param X:
:return:
"""
self.estimate(X)
@property
def model(self):
try:
return self._model
except AttributeError:
raise AttributeError('Model has not yet been estimated. Call estimate(X) or fit(X) first')
| |
from basic import S, C
from expr import Expr
from sympify import _sympify, sympify
from cache import cacheit
# from add import Add /cyclic/
# from mul import Mul /cyclic/
# from function import Lambda, WildFunction /cyclic/
class AssocOp(Expr):
""" Associative operations, can separate noncommutative and
commutative parts.
(a op b) op c == a op (b op c) == a op b op c.
Base class for Add and Mul.
"""
# for performance reason, we don't let is_commutative go to assumptions,
# and keep it right here
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, *args, **assumptions):
if assumptions.get('evaluate') is False:
args = map(_sympify, args)
obj = Expr.__new__(cls, *args, **assumptions)
obj.is_commutative = all(arg.is_commutative for arg in args)
return obj
if len(args)==0:
return cls.identity()
if len(args)==1:
return _sympify(args[0])
c_part, nc_part, order_symbols = cls.flatten(map(_sympify, args))
if len(c_part) + len(nc_part) <= 1:
if c_part: obj = c_part[0]
elif nc_part: obj = nc_part[0]
else: obj = cls.identity()
else:
obj = Expr.__new__(cls, *(c_part + nc_part), **assumptions)
obj.is_commutative = not nc_part
if order_symbols is not None:
obj = C.Order(obj, *order_symbols)
return obj
def _new_rawargs(self, *args):
"""create new instance of own class with args exactly as provided by caller
This is handy when we want to optimize things, e.g.
>>> from sympy import Mul, symbols
>>> from sympy.abc import x, y
>>> e = Mul(3,x,y)
>>> e.args
(3, x, y)
>>> Mul(*e.args[1:])
x*y
>>> e._new_rawargs(*e.args[1:]) # the same as above, but faster
x*y
"""
obj = Expr.__new__(type(self), *args) # NB no assumptions for Add/Mul
obj.is_commutative = self.is_commutative
return obj
@classmethod
def identity(cls):
from mul import Mul
from add import Add
if cls is Mul: return S.One
if cls is Add: return S.Zero
if cls is C.Composition:
from symbol import Symbol
s = Symbol('x',dummy=True)
return Lambda(s,s)
raise NotImplementedError("identity not defined for class %r" % (cls.__name__))
@classmethod
def flatten(cls, seq):
# apply associativity, no commutativity property is used
new_seq = []
while seq:
o = seq.pop(0)
if o.__class__ is cls: # classes must match exactly
seq = list(o[:]) + seq
continue
new_seq.append(o)
# c_part, nc_part, order_symbols
return [], new_seq, None
_eval_subs = Expr._seq_subs
def _matches_commutative(self, expr, repl_dict={}, evaluate=False):
"""
Matches Add/Mul "pattern" to an expression "expr".
repl_dict ... a dictionary of (wild: expression) pairs, that get
returned with the results
evaluate .... if True, then repl_dict is first substituted into the
pattern, and then _matches_commutative is run
This function is the main workhorse for Add/Mul.
For instance:
>> from sympy import symbols, Wild, sin
>> a = Wild("a")
>> b = Wild("b")
>> c = Wild("c")
>> x, y, z = symbols("x y z")
>> (a+b*c)._matches_commutative(x+y*z)
{a_: x, b_: y, c_: z}
In the example above, "a+b*c" is the pattern, and "x+y*z" is the
expression. Some more examples:
>> (a+b*c)._matches_commutative(sin(x)+y*z)
{a_: sin(x), b_: y, c_: z}
>> (a+sin(b)*c)._matches_commutative(x+sin(y)*z)
{a_: x, b_: y, c_: z}
The repl_dict contains parts, that were already matched, and the
"evaluate=True" kwarg tells _matches_commutative to substitute this
repl_dict into pattern. For example here:
>> (a+b*c)._matches_commutative(x+y*z, repl_dict={a: x}, evaluate=True)
{a_: x, b_: y, c_: z}
_matches_commutative substitutes "x" for "a" in the pattern and calls
itself again with the new pattern "x+b*c" and evaluate=False (default):
>> (x+b*c)._matches_commutative(x+y*z, repl_dict={a: x})
{a_: x, b_: y, c_: z}
the only function of the repl_dict now is just to return it in the
result, e.g. if you omit it:
>> (x+b*c)._matches_commutative(x+y*z)
{b_: y, c_: z}
the "a: x" is not returned in the result, but otherwise it is
equivalent.
"""
# apply repl_dict to pattern to eliminate fixed wild parts
if evaluate:
return self.subs(repl_dict.items()).matches(expr, repl_dict)
# handle simple patterns
d = self._matches_simple(expr, repl_dict)
if d is not None:
return d
# eliminate exact part from pattern: (2+a+w1+w2).matches(expr) -> (w1+w2).matches(expr-a-2)
wild_part = []
exact_part = []
from function import WildFunction
from symbol import Wild
for p in self.args:
if p.atoms(Wild, WildFunction):
# not all Wild should stay Wilds, for example:
# (w2+w3).matches(w1) -> (w1+w3).matches(w1) -> w3.matches(0)
if (not p in repl_dict) and (not p in expr):
wild_part.append(p)
continue
exact_part.append(p)
if exact_part:
newpattern = self.__class__(*wild_part)
newexpr = self.__class__._combine_inverse(expr, self.__class__(*exact_part))
return newpattern.matches(newexpr, repl_dict)
# now to real work ;)
if isinstance(expr, self.__class__):
expr_list = list(expr.args)
else:
expr_list = [expr]
while expr_list:
last_op = expr_list.pop()
tmp = wild_part[:]
while tmp:
w = tmp.pop()
d1 = w.matches(last_op, repl_dict)
if d1 is not None:
d2 = self.subs(d1.items()).matches(expr, d1)
if d2 is not None:
return d2
return
def _eval_template_is_attr(self, is_attr):
# return True if all elements have the property
r = True
for t in self.args:
a = getattr(t, is_attr)
if a is None: return
if r and not a: r = False
return r
_eval_evalf = Expr._seq_eval_evalf
class ShortCircuit(Exception):
pass
class LatticeOp(AssocOp):
"""
Join/meet operations of an algebraic lattice[1].
These binary operations are associative (op(op(a, b), c) = op(a, op(b, c))),
commutative (op(a, b) = op(b, a)) and idempotent (op(a, a) = op(a) = a).
Common examples are AND, OR, Union, Intersection, max or min. They have an
identity element (op(identity, a) = a) and an absorbing element
conventionally called zero (op(zero, a) = zero).
This is an abstract base class, concrete derived classes must declare
attributes zero and identity. All defining properties are then respected.
>>> from sympy import Integer
>>> from sympy.core.operations import LatticeOp
>>> class my_join(LatticeOp):
... zero = Integer(0)
... identity = Integer(1)
>>> my_join(2, 3) == my_join(3, 2)
True
>>> my_join(2, my_join(3, 4)) == my_join(2, 3, 4)
True
>>> my_join(0, 1, 4, 2, 3, 4)
0
>>> my_join(1, 2)
2
References:
[1] - http://en.wikipedia.org/wiki/Lattice_(order)
"""
is_commutative = True
def __new__(cls, *args, **assumptions):
args = (sympify(arg) for arg in args)
try:
_args = frozenset(cls._new_args_filter(args))
except ShortCircuit:
return cls.zero
if not _args:
return cls.identity
elif len(_args) == 1:
return set(_args).pop()
else:
obj = Expr.__new__(cls, _args, **assumptions)
obj._argset = _args
return obj
@classmethod
def _new_args_filter(cls, arg_sequence):
"""Generator filtering args"""
for arg in arg_sequence:
if arg == cls.zero:
raise ShortCircuit(arg)
elif arg == cls.identity:
continue
elif arg.func == cls:
for x in arg.iter_basic_args():
yield x
else:
yield arg
@property
def args(self):
return tuple(self._argset)
@staticmethod
def _compare_pretty(a, b):
return cmp(str(a), str(b))
| |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.chat.v1.service.user.user_channel import UserChannelList
class UserList(ListResource):
def __init__(self, version, service_sid):
"""
Initialize the UserList
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:returns: twilio.rest.ip_messaging.v1.service.user.UserList
:rtype: twilio.rest.ip_messaging.v1.service.user.UserList
"""
super(UserList, self).__init__(version)
# Path Solution
self._solution = {
'service_sid': service_sid,
}
self._uri = '/Services/{service_sid}/Users'.format(**self._solution)
def create(self, identity, role_sid=values.unset, attributes=values.unset,
friendly_name=values.unset):
"""
Create a new UserInstance
:param unicode identity: The identity
:param unicode role_sid: The role_sid
:param unicode attributes: The attributes
:param unicode friendly_name: The friendly_name
:returns: Newly created UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserInstance
"""
data = values.of({
'Identity': identity,
'RoleSid': role_sid,
'Attributes': attributes,
'FriendlyName': friendly_name,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return UserInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
)
def stream(self, limit=None, page_size=None):
"""
Streams UserInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.user.UserInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists UserInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.user.UserInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of UserInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserPage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return UserPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a UserContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.user.UserContext
:rtype: twilio.rest.ip_messaging.v1.service.user.UserContext
"""
return UserContext(
self._version,
service_sid=self._solution['service_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a UserContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.user.UserContext
:rtype: twilio.rest.ip_messaging.v1.service.user.UserContext
"""
return UserContext(
self._version,
service_sid=self._solution['service_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V1.UserList>'
class UserPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the UserPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The service_sid
:returns: twilio.rest.ip_messaging.v1.service.user.UserPage
:rtype: twilio.rest.ip_messaging.v1.service.user.UserPage
"""
super(UserPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UserInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.ip_messaging.v1.service.user.UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserInstance
"""
return UserInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V1.UserPage>'
class UserContext(InstanceContext):
def __init__(self, version, service_sid, sid):
"""
Initialize the UserContext
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.user.UserContext
:rtype: twilio.rest.ip_messaging.v1.service.user.UserContext
"""
super(UserContext, self).__init__(version)
# Path Solution
self._solution = {
'service_sid': service_sid,
'sid': sid,
}
self._uri = '/Services/{service_sid}/Users/{sid}'.format(**self._solution)
# Dependents
self._user_channels = None
def fetch(self):
"""
Fetch a UserInstance
:returns: Fetched UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return UserInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the UserInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, role_sid=values.unset, attributes=values.unset,
friendly_name=values.unset):
"""
Update the UserInstance
:param unicode role_sid: The role_sid
:param unicode attributes: The attributes
:param unicode friendly_name: The friendly_name
:returns: Updated UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserInstance
"""
data = values.of({
'RoleSid': role_sid,
'Attributes': attributes,
'FriendlyName': friendly_name,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return UserInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
@property
def user_channels(self):
"""
Access the user_channels
:returns: twilio.rest.ip_messaging.v1.service.user.user_channel.UserChannelList
:rtype: twilio.rest.ip_messaging.v1.service.user.user_channel.UserChannelList
"""
if self._user_channels is None:
self._user_channels = UserChannelList(
self._version,
service_sid=self._solution['service_sid'],
user_sid=self._solution['sid'],
)
return self._user_channels
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V1.UserContext {}>'.format(context)
class UserInstance(InstanceResource):
def __init__(self, version, payload, service_sid, sid=None):
"""
Initialize the UserInstance
:returns: twilio.rest.ip_messaging.v1.service.user.UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserInstance
"""
super(UserInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'service_sid': payload['service_sid'],
'attributes': payload['attributes'],
'friendly_name': payload['friendly_name'],
'role_sid': payload['role_sid'],
'identity': payload['identity'],
'is_online': payload['is_online'],
'is_notifiable': payload['is_notifiable'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'joined_channels_count': deserialize.integer(payload['joined_channels_count']),
'links': payload['links'],
'url': payload['url'],
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UserContext for this UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserContext
"""
if self._context is None:
self._context = UserContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The service_sid
:rtype: unicode
"""
return self._properties['service_sid']
@property
def attributes(self):
"""
:returns: The attributes
:rtype: unicode
"""
return self._properties['attributes']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def role_sid(self):
"""
:returns: The role_sid
:rtype: unicode
"""
return self._properties['role_sid']
@property
def identity(self):
"""
:returns: The identity
:rtype: unicode
"""
return self._properties['identity']
@property
def is_online(self):
"""
:returns: The is_online
:rtype: bool
"""
return self._properties['is_online']
@property
def is_notifiable(self):
"""
:returns: The is_notifiable
:rtype: bool
"""
return self._properties['is_notifiable']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def joined_channels_count(self):
"""
:returns: The joined_channels_count
:rtype: unicode
"""
return self._properties['joined_channels_count']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a UserInstance
:returns: Fetched UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the UserInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, role_sid=values.unset, attributes=values.unset,
friendly_name=values.unset):
"""
Update the UserInstance
:param unicode role_sid: The role_sid
:param unicode attributes: The attributes
:param unicode friendly_name: The friendly_name
:returns: Updated UserInstance
:rtype: twilio.rest.ip_messaging.v1.service.user.UserInstance
"""
return self._proxy.update(
role_sid=role_sid,
attributes=attributes,
friendly_name=friendly_name,
)
@property
def user_channels(self):
"""
Access the user_channels
:returns: twilio.rest.ip_messaging.v1.service.user.user_channel.UserChannelList
:rtype: twilio.rest.ip_messaging.v1.service.user.user_channel.UserChannelList
"""
return self._proxy.user_channels
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V1.UserInstance {}>'.format(context)
| |
import cgi
from mitmproxy import flow
from mitmproxy.net import http
from mitmproxy import version
from mitmproxy.net import tcp
from mitmproxy import connections # noqa
class HTTPRequest(http.Request):
"""
A mitmproxy HTTP request.
"""
# This is a very thin wrapper on top of :py:class:`mitmproxy.net.http.Request` and
# may be removed in the future.
def __init__(
self,
first_line_format,
method,
scheme,
host,
port,
path,
http_version,
headers,
content,
timestamp_start=None,
timestamp_end=None,
is_replay=False,
):
http.Request.__init__(
self,
first_line_format,
method,
scheme,
host,
port,
path,
http_version,
headers,
content,
timestamp_start,
timestamp_end,
)
# Is this request replayed?
self.is_replay = is_replay
self.stream = None
def get_state(self):
state = super().get_state()
state.update(
is_replay=self.is_replay
)
return state
def set_state(self, state):
self.is_replay = state.pop("is_replay")
super().set_state(state)
@classmethod
def wrap(self, request):
"""
Wraps an existing :py:class:`mitmproxy.net.http.Request`.
"""
req = HTTPRequest(
first_line_format=request.data.first_line_format,
method=request.data.method,
scheme=request.data.scheme,
host=request.data.host,
port=request.data.port,
path=request.data.path,
http_version=request.data.http_version,
headers=request.data.headers,
content=request.data.content,
timestamp_start=request.data.timestamp_start,
timestamp_end=request.data.timestamp_end,
)
return req
def __hash__(self):
return id(self)
class HTTPResponse(http.Response):
"""
A mitmproxy HTTP response.
"""
# This is a very thin wrapper on top of :py:class:`mitmproxy.net.http.Response` and
# may be removed in the future.
def __init__(
self,
http_version,
status_code,
reason,
headers,
content,
timestamp_start=None,
timestamp_end=None,
is_replay=False
):
http.Response.__init__(
self,
http_version,
status_code,
reason,
headers,
content,
timestamp_start=timestamp_start,
timestamp_end=timestamp_end,
)
# Is this request replayed?
self.is_replay = is_replay
self.stream = None
@classmethod
def wrap(self, response):
"""
Wraps an existing :py:class:`mitmproxy.net.http.Response`.
"""
resp = HTTPResponse(
http_version=response.data.http_version,
status_code=response.data.status_code,
reason=response.data.reason,
headers=response.data.headers,
content=response.data.content,
timestamp_start=response.data.timestamp_start,
timestamp_end=response.data.timestamp_end,
)
return resp
class HTTPFlow(flow.Flow):
"""
An HTTPFlow is a collection of objects representing a single HTTP
transaction.
"""
def __init__(self, client_conn, server_conn, live=None, mode="regular"):
super().__init__("http", client_conn, server_conn, live)
self.request = None # type: HTTPRequest
""" :py:class:`HTTPRequest` object """
self.response = None # type: HTTPResponse
""" :py:class:`HTTPResponse` object """
self.error = None # type: flow.Error
""" :py:class:`Error` object
Note that it's possible for a Flow to have both a response and an error
object. This might happen, for instance, when a response was received
from the server, but there was an error sending it back to the client.
"""
self.server_conn = server_conn # type: connections.ServerConnection
""" :py:class:`ServerConnection` object """
self.client_conn = client_conn # type: connections.ClientConnection
""":py:class:`ClientConnection` object """
self.intercepted = False # type: bool
""" Is this flow currently being intercepted? """
self.mode = mode
""" What mode was the proxy layer in when receiving this request? """
_stateobject_attributes = flow.Flow._stateobject_attributes.copy()
_stateobject_attributes.update(
request=HTTPRequest,
response=HTTPResponse,
mode=str
)
def __repr__(self):
s = "<HTTPFlow"
for a in ("request", "response", "error", "client_conn", "server_conn"):
if getattr(self, a, False):
s += "\r\n %s = {flow.%s}" % (a, a)
s += ">"
return s.format(flow=self)
def copy(self):
f = super().copy()
if self.request:
f.request = self.request.copy()
if self.response:
f.response = self.response.copy()
return f
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both request and
response of the flow. Encoded content will be decoded before
replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = self.request.replace(pattern, repl, *args, **kwargs)
if self.response:
c += self.response.replace(pattern, repl, *args, **kwargs)
return c
def make_error_response(status_code, message, headers=None):
response = http.status_codes.RESPONSES.get(status_code, "Unknown")
body = """
<html>
<head>
<title>%d %s</title>
</head>
<body>%s</body>
</html>
""".strip() % (status_code, response, cgi.escape(message))
body = body.encode("utf8", "replace")
if not headers:
headers = http.Headers(
Server=version.MITMPROXY,
Connection="close",
Content_Length=str(len(body)),
Content_Type="text/html"
)
return HTTPResponse(
b"HTTP/1.1",
status_code,
response,
headers,
body,
)
def make_connect_request(address):
address = tcp.Address.wrap(address)
return HTTPRequest(
"authority", b"CONNECT", None, address.host, address.port, None, b"HTTP/1.1",
http.Headers(), b""
)
def make_connect_response(http_version):
# Do not send any response headers as it breaks proxying non-80 ports on
# Android emulators using the -http-proxy option.
return HTTPResponse(
http_version,
200,
b"Connection established",
http.Headers(),
b"",
)
expect_continue_response = HTTPResponse(
b"HTTP/1.1", 100, b"Continue", http.Headers(), b""
)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Paul Hoehne 03/26/2015 Initial development
#
"""
Host related classes for manipulating MarkLogic hosts
"""
from __future__ import unicode_literals, print_function, absolute_import
import json, logging, time
from marklogic.connection import Connection
from marklogic.exceptions import *
class Host:
"""
The Host class encapsulates a MarkLogic host.
"""
def __init__(self,name=None,connection=None,save_connection=True):
"""
Create a host.
"""
if name is None:
self._config = {}
else:
self._config = {'host-name': name}
self.name = name
self.etag = None
if save_connection:
self.connection = connection
else:
self.connection = None
self.logger = logging.getLogger("marklogic.host")
self._just_initialized = False
def host_name(self):
"""
Returns the host name of the cluster member
:return: The member host name
"""
return self._config['host-name']
def set_host_name(self, name):
self._config['host-name'] = name
def group_name(self):
"""
The cluster member's group
:return: Host's Group
"""
return self._config['group']
def set_group_name(self, name):
self._config['group'] = name
def bind_port(self):
"""
The bind port of the cluster member
:return: The host's bind port
"""
return self._config['bind-port']
def set_bind_port(self, port):
self._config['bind-port'] = port
def foreign_bind_port(self):
"""
The foreign bind port.
:return: The Host's foreign bind port
"""
return self._config['foreign-bind-port']
def set_foreign_bind_port(self, port):
self._config['foreign-bind-port'] = port
def zone(self):
"""
The zone
:return: The zone
"""
return self._config['zone']
def set_zone(self, zone):
self._config['zone'] = zone
def bootstrap_host(self):
"""
Indicates if this is the bootstrap host
:return:Bootstrap host indicator
"""
return self._config['boostrap-host']
def just_initialized(self):
"""
Indicates if this host was just initialized. This method will
only return True if the host was just initialized (i.e, returned
by MarkLogic.instance_init()).
:return:True or False
"""
return self._just_initialized
def _set_just_initialized(self):
"""
Internal method used to specify that the host was just initialized.
:return: The host object
"""
self._just_initialized = True
return self
def read(self, connection=None):
"""
Loads the host from the MarkLogic server. This will refresh
the properties of the object.
:param connection: The connection to a MarkLogic server
:return: The host object
"""
if connection is None:
connection = self.connection
host = Host.lookup(connection, self.name)
if host is not None:
self._config = host._config
self.etag = host.etag
return self
def update(self, connection=None):
"""
Save the configuration changes with the given connection.
:param connection:The server connection
:return: The host object
"""
if connection is None:
connection = self.connection
uri = connection.uri("hosts", self.name)
struct = self.marshal()
response = connection.put(uri, payload=struct, etag=self.etag)
# In case we renamed it
self.name = self._config['host-name']
if 'etag' in response.headers:
self.etag = response.headers['etag']
return self
def restart(self, connection=None):
"""
Restart the host.
:param connection:The server connection
:return: The host object
"""
if connection is None:
connection = self.connection
uri = connection.uri("hosts", self.name, properties=None)
struct = {'operation':'restart'}
response = connection.post(uri, payload=struct)
return self
def shutdown(self, connection=None):
"""
Shutdown the host.
:param connection:The server connection
:return: None
"""
if connection is None:
connection = self.connection
uri = connection.uri("hosts", self.name, properties=None)
struct = {'operation':'shutdown'}
response = connection.post(uri, payload=struct)
return None
@classmethod
def lookup(cls, connection, name):
"""
Look up an individual host within the cluster.
:param name: The name of the host
:param connection: A connection to a MarkLogic server
:return: The host information
"""
uri = connection.uri("hosts", name)
response = connection.get(uri)
if response.status_code == 200:
result = Host.unmarshal(json.loads(response.text))
if 'etag' in response.headers:
result.etag = response.headers['etag']
return result
else:
return None
@classmethod
def list(cls, connection):
"""
Lists the names of hosts available on this cluster.
:param connection: A connection to a MarkLogic server
:return: A list of host names
"""
uri = connection.uri("hosts")
response = connection.get(uri)
if response.status_code == 200:
response_json = json.loads(response.text)
host_count = response_json['host-default-list']['list-items']['list-count']['value']
result = []
if host_count > 0:
for item in response_json['host-default-list']['list-items']['list-item']:
result.append(item['nameref'])
else:
raise UnexpectedManagementAPIResponse(response.text)
return result
@classmethod
def unmarshal(cls, config):
result = Host()
result._config = config
result.name = result._config['host-name']
return result
def marshal(self):
struct = { }
for key in self._config:
struct[key] = self._config[key]
return struct
def join_cluster(self, cluster, cluster_connection=None):
if cluster_connection is None:
cluster_connection = cluster.connection
xml = self._get_server_config()
cfgzip = cluster._post_server_config(xml,cluster_connection)
connection = Connection(self.host_name(), cluster_connection.auth)
self._post_cluster_config(cfgzip,connection)
def _get_server_config(self):
"""
Obtain the server configuration. This is the data necessary for
the first part of the handshake necessary to join a host to a
cluster. The returned data is not intended for introspection.
:return: The config. This is always XML.
"""
connection = Connection(self.host_name(), None)
uri = "http://{0}:8001/admin/v1/server-config".format(connection.host)
response = connection.get(uri, accept="application/xml")
if response.status_code != 200:
raise UnexpectedManagementAPIResponse(response.text)
return response.text # this is always XML
def _post_cluster_config(self,cfgzip,connection):
"""
Send the cluster configuration to the the server that's joining
the cluster. This is the second half of
the handshake necessary to join a host to a cluster.
:param connection: The connection credentials to use
:param cfgzip: The ZIP payload from post_server_config()
"""
uri = "{0}://{1}:8001/admin/v1/cluster-config" \
.format(connection.protocol, connection.host)
response = connection.post(uri, payload=cfgzip,
content_type="application/zip")
if response.status_code != 202:
raise UnexpectedManagementAPIResponse(response.text)
data = json.loads(response.text)
| |
#
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, self list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, self list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from self software without specific prior written permission.
#
# self SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# self SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from antlr4.BufferedTokenStream import TokenStream
from antlr4.CommonTokenFactory import TokenFactory
from antlr4.error.ErrorStrategy import DefaultErrorStrategy
from antlr4.InputStream import InputStream
from antlr4.Recognizer import Recognizer
from antlr4.RuleContext import RuleContext
from antlr4.ParserRuleContext import ParserRuleContext
from antlr4.Token import Token
from antlr4.Lexer import Lexer
from antlr4.atn.ATNDeserializer import ATNDeserializer
from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions
from antlr4.error.Errors import UnsupportedOperationException, RecognitionException
from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
from antlr4.tree.Tree import ParseTreeListener, TerminalNode, ErrorNode
class TraceListener(ParseTreeListener):
def __init__(self, parser):
self._parser = parser
def enterEveryRule(self, ctx):
print("enter " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text)
def visitTerminal(self, node):
print("consume " + str(node.symbol) + " rule " + self._parser.ruleNames[self._parser._ctx.getRuleIndex()])
def visitErrorNode(self, node):
pass
def exitEveryRule(self, ctx):
print("exit " + self._parser.ruleNames[ctx.getRuleIndex()] + ", LT(1)=" + self._parser._input.LT(1).text)
# self is all the parsing support code essentially; most of it is error recovery stuff.#
class Parser (Recognizer):
# self field maps from the serialized ATN string to the deserialized {@link ATN} with
# bypass alternatives.
#
# @see ATNDeserializationOptions#isGenerateRuleBypassTransitions()
#
bypassAltsAtnCache = dict()
def __init__(self, input:TokenStream):
super().__init__()
# The input stream.
self._input = None
# The error handling strategy for the parser. The default value is a new
# instance of {@link DefaultErrorStrategy}.
self._errHandler = DefaultErrorStrategy()
self._precedenceStack = list()
self._precedenceStack.append(0)
# The {@link ParserRuleContext} object for the currently executing rule.
# self is always non-null during the parsing process.
self._ctx = None
# Specifies whether or not the parser should construct a parse tree during
# the parsing process. The default value is {@code true}.
self.buildParseTrees = True
# When {@link #setTrace}{@code (true)} is called, a reference to the
# {@link TraceListener} is stored here so it can be easily removed in a
# later call to {@link #setTrace}{@code (false)}. The listener itself is
# implemented as a parser listener so self field is not directly used by
# other parser methods.
self._tracer = None
# The list of {@link ParseTreeListener} listeners registered to receive
# events during the parse.
self._parseListeners = None
# The number of syntax errors reported during parsing. self value is
# incremented each time {@link #notifyErrorListeners} is called.
self._syntaxErrors = 0
self.setInputStream(input)
def __setattr__(self, key, value):
object.__setattr__(self, key, value)
# reset the parser's state#
def reset(self):
if self._input is not None:
self._input.seek(0)
self._errHandler.reset(self)
self._ctx = None
self._syntaxErrors = 0
self.setTrace(False)
self._precedenceStack = list()
self._precedenceStack.append(0)
if self._interp is not None:
self._interp.reset()
# Match current input symbol against {@code ttype}. If the symbol type
# matches, {@link ANTLRErrorStrategy#reportMatch} and {@link #consume} are
# called to complete the match process.
#
# <p>If the symbol type does not match,
# {@link ANTLRErrorStrategy#recoverInline} is called on the current error
# strategy to attempt recovery. If {@link #getBuildParseTree} is
# {@code true} and the token index of the symbol returned by
# {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
# the parse tree by calling {@link ParserRuleContext#addErrorNode}.</p>
#
# @param ttype the token type to match
# @return the matched symbol
# @throws RecognitionException if the current input symbol did not match
# {@code ttype} and the error strategy could not recover from the
# mismatched symbol
def match(self, ttype:int):
t = self.getCurrentToken()
if t.type==ttype:
self._errHandler.reportMatch(self)
self.consume()
else:
t = self._errHandler.recoverInline(self)
if self.buildParseTrees and t.tokenIndex==-1:
# we must have conjured up a new token during single token insertion
# if it's not the current symbol
self._ctx.addErrorNode(t)
return t
# Match current input symbol as a wildcard. If the symbol type matches
# (i.e. has a value greater than 0), {@link ANTLRErrorStrategy#reportMatch}
# and {@link #consume} are called to complete the match process.
#
# <p>If the symbol type does not match,
# {@link ANTLRErrorStrategy#recoverInline} is called on the current error
# strategy to attempt recovery. If {@link #getBuildParseTree} is
# {@code true} and the token index of the symbol returned by
# {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
# the parse tree by calling {@link ParserRuleContext#addErrorNode}.</p>
#
# @return the matched symbol
# @throws RecognitionException if the current input symbol did not match
# a wildcard and the error strategy could not recover from the mismatched
# symbol
def matchWildcard(self):
t = self.getCurrentToken()
if t.type > 0:
self._errHandler.reportMatch(self)
self.consume()
else:
t = self._errHandler.recoverInline(self)
if self._buildParseTrees and t.tokenIndex == -1:
# we must have conjured up a new token during single token insertion
# if it's not the current symbol
self._ctx.addErrorNode(t)
return t
def getParseListeners(self):
return list() if self._parseListeners is None else self._parseListeners
# Registers {@code listener} to receive events during the parsing process.
#
# <p>To support output-preserving grammar transformations (including but not
# limited to left-recursion removal, automated left-factoring, and
# optimized code generation), calls to listener methods during the parse
# may differ substantially from calls made by
# {@link ParseTreeWalker#DEFAULT} used after the parse is complete. In
# particular, rule entry and exit events may occur in a different order
# during the parse than after the parser. In addition, calls to certain
# rule entry methods may be omitted.</p>
#
# <p>With the following specific exceptions, calls to listener events are
# <em>deterministic</em>, i.e. for identical input the calls to listener
# methods will be the same.</p>
#
# <ul>
# <li>Alterations to the grammar used to generate code may change the
# behavior of the listener calls.</li>
# <li>Alterations to the command line options passed to ANTLR 4 when
# generating the parser may change the behavior of the listener calls.</li>
# <li>Changing the version of the ANTLR Tool used to generate the parser
# may change the behavior of the listener calls.</li>
# </ul>
#
# @param listener the listener to add
#
# @throws NullPointerException if {@code} listener is {@code null}
#
def addParseListener(self, listener:ParseTreeListener):
if listener is None:
raise ReferenceError("listener")
if self._parseListeners is None:
self._parseListeners = []
self._parseListeners.append(listener)
#
# Remove {@code listener} from the list of parse listeners.
#
# <p>If {@code listener} is {@code null} or has not been added as a parse
# listener, self method does nothing.</p>
# @param listener the listener to remove
#
def removeParseListener(self, listener:ParseTreeListener):
if self._parseListeners is not None:
self._parseListeners.remove(listener)
if len(self._parseListeners)==0:
self._parseListeners = None
# Remove all parse listeners.
def removeParseListeners(self):
self._parseListeners = None
# Notify any parse listeners of an enter rule event.
def triggerEnterRuleEvent(self):
if self._parseListeners is not None:
for listener in self._parseListeners:
listener.enterEveryRule(self._ctx)
self._ctx.enterRule(listener)
#
# Notify any parse listeners of an exit rule event.
#
# @see #addParseListener
#
def triggerExitRuleEvent(self):
if self._parseListeners is not None:
# reverse order walk of listeners
for listener in reversed(self._parseListeners):
self._ctx.exitRule(listener)
listener.exitEveryRule(self._ctx)
def getTokenFactory(self):
return self._input.tokenSource._factory
# Tell our token source and error strategy about a new way to create tokens.#
def setTokenFactory(self, factory:TokenFactory):
self._input.tokenSource._factory = factory
# The ATN with bypass alternatives is expensive to create so we create it
# lazily.
#
# @throws UnsupportedOperationException if the current parser does not
# implement the {@link #getSerializedATN()} method.
#
def getATNWithBypassAlts(self):
serializedAtn = self.getSerializedATN()
if serializedAtn is None:
raise UnsupportedOperationException("The current parser does not support an ATN with bypass alternatives.")
result = self.bypassAltsAtnCache.get(serializedAtn, None)
if result is None:
deserializationOptions = ATNDeserializationOptions()
deserializationOptions.generateRuleBypassTransitions = True
result = ATNDeserializer(deserializationOptions).deserialize(serializedAtn)
self.bypassAltsAtnCache[serializedAtn] = result
return result
# The preferred method of getting a tree pattern. For example, here's a
# sample use:
#
# <pre>
# ParseTree t = parser.expr();
# ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
# ParseTreeMatch m = p.match(t);
# String id = m.get("ID");
# </pre>
#
def compileParseTreePattern(self, pattern:str, patternRuleIndex:int, lexer:Lexer = None):
if lexer is None:
if self.getTokenStream() is not None:
tokenSource = self.getTokenStream().getTokenSource()
if isinstance( tokenSource, Lexer ):
lexer = tokenSource
if lexer is None:
raise UnsupportedOperationException("Parser can't discover a lexer to use")
m = ParseTreePatternMatcher(lexer, self)
return m.compile(pattern, patternRuleIndex)
def getInputStream(self):
return self.getTokenStream()
def setInputStream(self, input:InputStream):
self.setTokenStream(input)
def getTokenStream(self):
return self._input
# Set the token stream and reset the parser.#
def setTokenStream(self, input:TokenStream):
self._input = None
self.reset()
self._input = input
# Match needs to return the current input symbol, which gets put
# into the label for the associated token ref; e.g., x=ID.
#
def getCurrentToken(self):
return self._input.LT(1)
def notifyErrorListeners(self, msg:str, offendingToken:Token = None, e:RecognitionException = None):
if offendingToken is None:
offendingToken = self.getCurrentToken()
self._syntaxErrors += 1
line = offendingToken.line
column = offendingToken.column
listener = self.getErrorListenerDispatch()
listener.syntaxError(self, offendingToken, line, column, msg, e)
#
# Consume and return the {@linkplain #getCurrentToken current symbol}.
#
# <p>E.g., given the following input with {@code A} being the current
# lookahead symbol, self function moves the cursor to {@code B} and returns
# {@code A}.</p>
#
# <pre>
# A B
# ^
# </pre>
#
# If the parser is not in error recovery mode, the consumed symbol is added
# to the parse tree using {@link ParserRuleContext#addChild(Token)}, and
# {@link ParseTreeListener#visitTerminal} is called on any parse listeners.
# If the parser <em>is</em> in error recovery mode, the consumed symbol is
# added to the parse tree using
# {@link ParserRuleContext#addErrorNode(Token)}, and
# {@link ParseTreeListener#visitErrorNode} is called on any parse
# listeners.
#
def consume(self):
o = self.getCurrentToken()
if o.type != Token.EOF:
self.getInputStream().consume()
hasListener = self._parseListeners is not None and len(self._parseListeners)>0
if self.buildParseTrees or hasListener:
if self._errHandler.inErrorRecoveryMode(self):
node = self._ctx.addErrorNode(o)
else:
node = self._ctx.addTokenNode(o)
if hasListener:
for listener in self._parseListeners:
listener.visitTerminal(node)
return o
def addContextToParseTree(self):
# add current context to parent if we have a parent
if self._ctx.parentCtx is not None:
self._ctx.parentCtx.addChild(self._ctx)
# Always called by generated parsers upon entry to a rule. Access field
# {@link #_ctx} get the current context.
#
def enterRule(self, localctx:ParserRuleContext , state:int , ruleIndexint ):
self.state = state
self._ctx = localctx
self._ctx.start = self._input.LT(1)
if self.buildParseTrees:
self.addContextToParseTree()
if self._parseListeners is not None:
self.triggerEnterRuleEvent()
def exitRule(self):
self._ctx.stop = self._input.LT(-1)
# trigger event on _ctx, before it reverts to parent
if self._parseListeners is not None:
self.triggerExitRuleEvent()
self.state = self._ctx.invokingState
self._ctx = self._ctx.parentCtx
def enterOuterAlt(self, localctx:ParserRuleContext, altNum:int):
# if we have new localctx, make sure we replace existing ctx
# that is previous child of parse tree
if self.buildParseTrees and self._ctx != localctx:
if self._ctx.parentCtx is not None:
self._ctx.parentCtx.removeLastChild()
self._ctx.parentCtx.addChild(localctx)
self._ctx = localctx
# Get the precedence level for the top-most precedence rule.
#
# @return The precedence level for the top-most precedence rule, or -1 if
# the parser context is not nested within a precedence rule.
#
def getPrecedence(self):
if len(self._precedenceStack)==0:
return -1
else:
return self._precedenceStack[-1]
def enterRecursionRule(self, localctx:ParserRuleContext, state:int, ruleIndex:int, precedence:int):
self.state = state
self._precedenceStack.append(precedence)
self._ctx = localctx
self._ctx.start = self._input.LT(1)
if self._parseListeners is not None:
self.triggerEnterRuleEvent() # simulates rule entry for left-recursive rules
#
# Like {@link #enterRule} but for recursive rules.
#
def pushNewRecursionContext(self, localctx:ParserRuleContext, state:int, ruleIndex:int):
previous = self._ctx
previous.parentCtx = localctx
previous.invokingState = state
previous.stop = self._input.LT(-1)
self._ctx = localctx
self._ctx.start = previous.start
if self.buildParseTrees:
self._ctx.addChild(previous)
if self._parseListeners is not None:
self.triggerEnterRuleEvent() # simulates rule entry for left-recursive rules
def unrollRecursionContexts(self, parentCtx:ParserRuleContext):
self._precedenceStack.pop()
self._ctx.stop = self._input.LT(-1)
retCtx = self._ctx # save current ctx (return value)
# unroll so _ctx is as it was before call to recursive method
if self._parseListeners is not None:
while self._ctx is not parentCtx:
self.triggerExitRuleEvent()
self._ctx = self._ctx.parentCtx
else:
self._ctx = parentCtx
# hook into tree
retCtx.parentCtx = parentCtx
if self.buildParseTrees and parentCtx is not None:
# add return ctx into invoking rule's tree
parentCtx.addChild(retCtx)
def getInvokingContext(self, ruleIndex:int):
ctx = self._ctx
while ctx is not None:
if ctx.ruleIndex == ruleIndex:
return ctx
ctx = ctx.parentCtx
return None
def precpred(self, localctx:RuleContext , precedence:int):
return precedence >= self._precedenceStack[-1]
def inContext(self, context:str):
# TODO: useful in parser?
return False
#
# Checks whether or not {@code symbol} can follow the current state in the
# ATN. The behavior of self method is equivalent to the following, but is
# implemented such that the complete context-sensitive follow set does not
# need to be explicitly constructed.
#
# <pre>
# return getExpectedTokens().contains(symbol);
# </pre>
#
# @param symbol the symbol type to check
# @return {@code true} if {@code symbol} can follow the current state in
# the ATN, otherwise {@code false}.
#
def isExpectedToken(self, symbol:int):
atn = self._interp.atn
ctx = self._ctx
s = atn.states[self.state]
following = atn.nextTokens(s)
if symbol in following:
return True
if not Token.EPSILON in following:
return False
while ctx is not None and ctx.invokingState>=0 and Token.EPSILON in following:
invokingState = atn.states[ctx.invokingState]
rt = invokingState.transitions[0]
following = atn.nextTokens(rt.followState)
if symbol in following:
return True
ctx = ctx.parentCtx
if Token.EPSILON in following and symbol == Token.EOF:
return True
else:
return False
# Computes the set of input symbols which could follow the current parser
# state and context, as given by {@link #getState} and {@link #getContext},
# respectively.
#
# @see ATN#getExpectedTokens(int, RuleContext)
#
def getExpectedTokens(self):
return self._interp.atn.getExpectedTokens(self.state, self._ctx)
def getExpectedTokensWithinCurrentRule(self):
atn = self._interp.atn
s = atn.states[self.state]
return atn.nextTokens(s)
# Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.#
def getRuleIndex(self, ruleName:str):
ruleIndex = self.getRuleIndexMap().get(ruleName, None)
if ruleIndex is not None:
return ruleIndex
else:
return -1
# Return List<String> of the rule names in your parser instance
# leading up to a call to the current rule. You could override if
# you want more details such as the file/line info of where
# in the ATN a rule is invoked.
#
# this is very useful for error messages.
#
def getRuleInvocationStack(self, p:RuleContext=None):
if p is None:
p = self._ctx
stack = list()
while p is not None:
# compute what follows who invoked us
ruleIndex = p.getRuleIndex()
if ruleIndex<0:
stack.append("n/a")
else:
stack.append(self.ruleNames[ruleIndex])
p = p.parentCtx
return stack
# For debugging and other purposes.#
def getDFAStrings(self):
return [ str(dfa) for dfa in self._interp.decisionToDFA]
# For debugging and other purposes.#
def dumpDFA(self):
seenOne = False
for i in range(0, len(self._interp.decisionToDFA)):
dfa = self._interp.decisionToDFA[i]
if len(dfa.states)>0:
if seenOne:
print()
print("Decision " + str(dfa.decision) + ":")
print(dfa.toString(self.literalNames, self.symbolicNames), end='')
seenOne = True
def getSourceName(self):
return self._input.sourceName
# During a parse is sometimes useful to listen in on the rule entry and exit
# events as well as token matches. self is for quick and dirty debugging.
#
def setTrace(self, trace:bool):
if not trace:
self.removeParseListener(self._tracer)
self._tracer = None
else:
if self._tracer is not None:
self.removeParseListener(self._tracer)
self._tracer = TraceListener(self)
self.addParseListener(self._tracer)
| |
"""DDNS without TSIG"""
# pylint: disable=invalid-name,line-too-long
import pytest
import misc
import srv_control
import srv_msg
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.tsig
@pytest.mark.forward_reverse_remove
def test_ddns4_tsig_sha1_forw_and_rev_release():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'forge.sha1.key')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'forge.sha1.key')
srv_control.add_keys('forge.sha1.key', 'HMAC-SHA1', 'PN4xKZ/jDobCMlo4rpr70w==')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(21)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_save_option_count(1, 'server_id')
srv_msg.client_add_saved_option_count(1)
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_sets_value('Client', 'FQDN_domain_name', 'aa.four.example.com.')
srv_msg.client_sets_value('Client', 'FQDN_flags', 'S')
srv_msg.client_does_include('Client', 'fqdn')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(81)
srv_msg.response_check_option_content(81, 'flags', 1)
srv_msg.response_check_option_content(81, 'fqdn', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.client_save_option('server_id')
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
misc.test_procedure()
srv_msg.client_add_saved_option_count(1)
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.10')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.tsig
@pytest.mark.forward_reverse_remove
def test_ddns4_tsig_forw_and_rev_release_notenabled():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'forge.sha1.key')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'forge.sha1.key')
srv_control.add_keys('forge.sha1.key', 'HMAC-SHA1', 'PN4xKZ/jDobCMlo4rpr70w==')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(21)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_save_option_count(1, 'server_id')
srv_msg.client_add_saved_option_count(1)
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_sets_value('Client', 'FQDN_domain_name', 'aa.four.example.com.')
srv_msg.client_sets_value('Client', 'FQDN_flags', 'S')
srv_msg.client_does_include('Client', 'fqdn')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(81)
srv_msg.response_check_option_content(81, 'flags', 1)
srv_msg.response_check_option_content(81, 'fqdn', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.client_save_option('server_id')
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
misc.test_procedure()
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', False)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'forge.sha1.key')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'forge.sha1.key')
srv_control.add_keys('forge.sha1.key', 'HMAC-SHA1', 'PN4xKZ/jDobCMlo4rpr70w==')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_add_saved_option_count(1)
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.10')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.tsig
@pytest.mark.reverse_remove
def test_ddns4_tsig_sha1_rev_release():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'forge.sha1.key')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'forge.sha1.key')
srv_control.add_keys('forge.sha1.key', 'HMAC-SHA1', 'PN4xKZ/jDobCMlo4rpr70w==')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(21)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_save_option_count(1, 'server_id')
srv_msg.client_add_saved_option_count(1)
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_sets_value('Client', 'FQDN_domain_name', 'aa.four.example.com.')
srv_msg.client_does_include('Client', 'fqdn')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(81)
srv_msg.response_check_option_content(81, 'fqdn', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.client_save_option('server_id')
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
misc.test_procedure()
srv_msg.client_add_saved_option_count(1)
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.10')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the Tensorboard audio plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import shutil
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard.backend import application
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.audio import summary
from tensorboard.plugins.audio import audio_plugin
class AudioPluginTest(tf.test.TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
# We use numpy.random to generate audio. We seed to avoid non-determinism
# in this test.
numpy.random.seed(42)
# Create old-style audio summaries for run "foo".
tf.reset_default_graph()
sess = tf.Session()
placeholder = tf.placeholder(tf.float32)
tf.summary.audio(name="baz", tensor=placeholder, sample_rate=44100)
merged_summary_op = tf.summary.merge_all()
foo_directory = os.path.join(self.log_dir, "foo")
writer = tf.summary.FileWriter(foo_directory)
writer.add_graph(sess.graph)
for step in xrange(2):
# The floats (sample data) range from -1 to 1.
writer.add_summary(sess.run(merged_summary_op, feed_dict={
placeholder: numpy.random.rand(42, 22050) * 2 - 1
}), global_step=step)
writer.close()
# Create new-style audio summaries for run "bar".
tf.reset_default_graph()
sess = tf.Session()
audio_placeholder = tf.placeholder(tf.float32)
labels_placeholder = tf.placeholder(tf.string)
summary.op("quux", audio_placeholder, sample_rate=44100,
labels=labels_placeholder,
description="how do you pronounce that, anyway?")
merged_summary_op = tf.summary.merge_all()
bar_directory = os.path.join(self.log_dir, "bar")
writer = tf.summary.FileWriter(bar_directory)
writer.add_graph(sess.graph)
for step in xrange(2):
# The floats (sample data) range from -1 to 1.
writer.add_summary(sess.run(merged_summary_op, feed_dict={
audio_placeholder: numpy.random.rand(42, 11025, 1) * 2 - 1,
labels_placeholder: [
tf.compat.as_bytes('step **%s**, sample %s' % (step, sample))
for sample in xrange(42)
],
}), global_step=step)
writer.close()
# Start a server with the plugin.
multiplexer = event_multiplexer.EventMultiplexer({
"foo": foo_directory,
"bar": bar_directory,
})
context = base_plugin.TBContext(
logdir=self.log_dir, multiplexer=multiplexer)
self.plugin = audio_plugin.AudioPlugin(context)
wsgi_app = application.TensorBoardWSGIApp(
self.log_dir, [self.plugin], multiplexer, reload_interval=0)
self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
def _DeserializeResponse(self, byte_content):
"""Deserializes byte content that is a JSON encoding.
Args:
byte_content: The byte content of a response.
Returns:
The deserialized python object decoded from JSON.
"""
return json.loads(byte_content.decode("utf-8"))
def testRoutesProvided(self):
"""Tests that the plugin offers the correct routes."""
routes = self.plugin.get_plugin_apps()
self.assertIsInstance(routes["/audio"], collections.Callable)
self.assertIsInstance(routes["/individualAudio"], collections.Callable)
self.assertIsInstance(routes["/tags"], collections.Callable)
def testOldStyleAudioRoute(self):
"""Tests that the /audio routes returns correct old-style data."""
response = self.server.get(
"/data/plugin/audio/audio?run=foo&tag=baz/audio/0&sample=0")
self.assertEqual(200, response.status_code)
# Verify that the correct entries are returned.
entries = self._DeserializeResponse(response.get_data())
self.assertEqual(2, len(entries))
# Verify that the 1st entry is correct.
entry = entries[0]
self.assertEqual("audio/wav", entry["contentType"])
self.assertEqual("", entry["label"])
self.assertEqual(0, entry["step"])
parsed_query = urllib.parse.parse_qs(entry["query"])
self.assertListEqual(["foo"], parsed_query["run"])
self.assertListEqual(["baz/audio/0"], parsed_query["tag"])
self.assertListEqual(["0"], parsed_query["sample"])
self.assertListEqual(["0"], parsed_query["index"])
# Verify that the 2nd entry is correct.
entry = entries[1]
self.assertEqual("audio/wav", entry["contentType"])
self.assertEqual("", entry["label"])
self.assertEqual(1, entry["step"])
parsed_query = urllib.parse.parse_qs(entry["query"])
self.assertListEqual(["foo"], parsed_query["run"])
self.assertListEqual(["baz/audio/0"], parsed_query["tag"])
self.assertListEqual(["0"], parsed_query["sample"])
self.assertListEqual(["1"], parsed_query["index"])
def testNewStyleAudioRoute(self):
"""Tests that the /audio routes returns correct new-style data."""
response = self.server.get(
"/data/plugin/audio/audio?run=bar&tag=quux/audio_summary&sample=0")
self.assertEqual(200, response.status_code)
# Verify that the correct entries are returned.
entries = self._DeserializeResponse(response.get_data())
self.assertEqual(2, len(entries))
# Verify that the 1st entry is correct.
entry = entries[0]
self.assertEqual("audio/wav", entry["contentType"])
self.assertEqual(
"<p>step <strong>%s</strong>, sample 0</p>" % entry["step"],
entry["label"])
self.assertEqual(0, entry["step"])
parsed_query = urllib.parse.parse_qs(entry["query"])
self.assertListEqual(["bar"], parsed_query["run"])
self.assertListEqual(["quux/audio_summary"], parsed_query["tag"])
self.assertListEqual(["0"], parsed_query["sample"])
self.assertListEqual(["0"], parsed_query["index"])
# Verify that the 2nd entry is correct.
entry = entries[1]
self.assertEqual("audio/wav", entry["contentType"])
self.assertEqual(
"<p>step <strong>%s</strong>, sample 0</p>" % entry["step"],
entry["label"])
self.assertEqual(1, entry["step"])
parsed_query = urllib.parse.parse_qs(entry["query"])
self.assertListEqual(["bar"], parsed_query["run"])
self.assertListEqual(["quux/audio_summary"], parsed_query["tag"])
self.assertListEqual(["0"], parsed_query["sample"])
self.assertListEqual(["1"], parsed_query["index"])
def testOldStyleIndividualAudioRoute(self):
"""Tests fetching an individual audio clip from an old-style summary."""
response = self.server.get(
"/data/plugin/audio/individualAudio"
"?run=foo&tag=baz/audio/0&sample=0&index=0")
self.assertEqual(200, response.status_code)
self.assertEqual("audio/wav", response.headers.get("content-type"))
def testNewStyleIndividualAudioRoute(self):
"""Tests fetching an individual audio clip from an old-style summary."""
response = self.server.get(
"/data/plugin/audio/individualAudio"
"?run=bar&tag=quux/audio_summary&sample=0&index=0")
self.assertEqual(200, response.status_code)
self.assertEqual("audio/wav", response.headers.get("content-type"))
def testTagsRoute(self):
"""Tests that the /tags route offers the correct run to tag mapping."""
response = self.server.get("/data/plugin/audio/tags")
self.assertEqual(200, response.status_code)
self.assertDictEqual({
"foo": {
"baz/audio/0": {
"displayName": "baz/audio/0",
"description": "",
"samples": 1,
},
"baz/audio/1": {
"displayName": "baz/audio/1",
"description": "",
"samples": 1,
},
"baz/audio/2": {
"displayName": "baz/audio/2",
"description": "",
"samples": 1,
},
},
"bar": {
"quux/audio_summary": {
"displayName": "quux",
"description": "<p>how do you pronounce that, anyway?</p>",
"samples": 3, # 42 inputs, but max_outputs=3
},
},
}, self._DeserializeResponse(response.get_data()))
if __name__ == "__main__":
tf.test.main()
| |
from gym.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import TorchCategorical, \
TorchDeterministic
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, \
try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://blog.openai.com/better-exploration-with-parameter-noise/
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev (float): The initial stddev to use for the noise.
random_timesteps (int): The number of timesteps to act completely
randomly (see [1]).
sub_exploration (Optional[dict]): Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev")
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device))
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = \
self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = \
self._tf_add_stored_noise_op()
self.tf_remove_noise_op = \
self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [(0, 1.0), (random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01)],
"outside_value": 0.01
}
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(self,
*,
timestep: Optional[int] = None,
explore: bool = None,
tf_sess: Optional["tf.Session"] = None):
explore = explore if explore is not None else \
self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(self, *,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution,
timestep=timestep,
explore=explore)
@override(Exploration)
def on_episode_start(self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self,
policy,
*,
environment=None,
episode=None,
tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions(
obs_batch=sample_batch[SampleBatch.CUR_OBS],
# TODO(sven): What about state-ins and seq-lens?
prev_action_batch=sample_batch.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=sample_batch.get(SampleBatch.PREV_REWARDS),
explore=self.weights_are_currently_noisy)
# Categorical case (e.g. DQN).
if policy.dist_class in (Categorical, TorchCategorical):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif policy.dist_class in [Deterministic, TorchDeterministic]:
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions(
obs_batch=sample_batch[SampleBatch.CUR_OBS],
prev_action_batch=sample_batch.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=sample_batch.get(SampleBatch.PREV_REWARDS),
explore=not self.weights_are_currently_noisy)
# Categorical case (e.g. DQN).
if policy.dist_class in (Categorical, TorchCategorical):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif policy.dist_class in [Deterministic, TorchDeterministic]:
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if policy.dist_class in (Categorical, TorchCategorical):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist *
np.log(noise_free_action_dist /
(noisy_action_dist + SMALL_NUMBER)), 1))
current_epsilon = self.sub_exploration.get_state(
sess=tf_sess)["cur_epsilon"]
delta = -np.log(1 - current_epsilon +
current_epsilon / self.action_space.n)
elif policy.dist_class in [Deterministic, TorchDeterministic]:
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist)))
current_scale = self.sub_exploration.get_state(
sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * \
current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework in ["tfe", "tf2"]:
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()),
std=self.stddev).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape,
stddev=self.stddev,
dtype=tf.float32)))
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override (bool): If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework in ["tf2", "tfe"]:
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework in ["tf2", "tfe"]:
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {"cur_stddev": self.stddev_val}
@override(Exploration)
def set_state(self, state: dict,
sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
else:
self.stddev = self.stddev_val
| |
"""Run MNIST-rot"""
import argparse
import os
import random
import sys
import time
import urllib2
import zipfile
sys.path.append('../')
import numpy as np
import tensorflow as tf
from mnist_model import deep_mnist
def download2FileAndExtract(url, folder, fileName):
print('Downloading rotated MNIST...')
add_folder(folder)
zipFileName = folder + fileName
request = urllib2.urlopen(url)
with open(zipFileName, "wb") as f :
f.write(request.read())
if not zipfile.is_zipfile(zipFileName):
print('ERROR: ' + zipFileName + ' is not a valid zip file.')
sys.exit(1)
print('Extracting...')
wd = os.getcwd()
os.chdir(folder)
archive = zipfile.ZipFile('.'+fileName, mode='r')
archive.extractall()
archive.close()
os.chdir(wd)
print('Successfully retrieved rotated rotated MNIST dataset.')
def settings(args):
# Download MNIST if it doesn't exist
args.dataset = 'rotated_mnist'
if not os.path.exists(args.data_dir + '/mnist_rotation_new.zip'):
download2FileAndExtract("https://www.dropbox.com/s/0fxwai3h84dczh0/mnist_rotation_new.zip?dl=1",
args.data_dir, "/mnist_rotation_new.zip")
# Load dataset
mnist_dir = args.data_dir + '/mnist_rotation_new'
train = np.load(mnist_dir + '/rotated_train.npz')
valid = np.load(mnist_dir + '/rotated_valid.npz')
test = np.load(mnist_dir + '/rotated_test.npz')
data = {}
if args.combine_train_val:
data['train_x'] = np.vstack((train['x'], valid['x']))
data['train_y'] = np.hstack((train['y'], valid['y']))
else:
data['train_x'] = train['x']
data['train_y'] = train['y']
data['valid_x'] = valid['x']
data['valid_y'] = valid['y']
data['test_x'] = test['x']
data['test_y'] = test['y']
# Other options
if args.default_settings:
args.n_epochs = 200
args.batch_size = 46
args.learning_rate = 0.0076
args.std_mult = 0.7
args.delay = 12
args.phase_preconditioner = 7.8
args.filter_gain = 2
args.filter_size = 5
args.n_rings = 4
args.n_filters = 8
args.display_step = len(data['train_x'])/46
args.is_classification = True
args.dim = 28
args.crop_shape = 0
args.n_channels = 1
args.n_classes = 10
args.lr_div = 10.
args.log_path = add_folder('./logs')
args.checkpoint_path = add_folder('./checkpoints') + '/model.ckpt'
return args, data
def add_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
print('Created {:s}'.format(folder_name))
return folder_name
def minibatcher(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def get_learning_rate(args, current, best, counter, learning_rate):
"""If have not seen accuracy improvement in delay epochs, then divide
learning rate by 10
"""
if current > best:
best = current
counter = 0
elif counter > args.delay:
learning_rate = learning_rate / args.lr_div
counter = 0
else:
counter += 1
return (best, counter, learning_rate)
def main(args):
"""The magic happens here"""
tf.reset_default_graph()
##### SETUP AND LOAD DATA #####
args, data = settings(args)
##### BUILD MODEL #####
## Placeholders
x = tf.placeholder(tf.float32, [args.batch_size,784], name='x')
y = tf.placeholder(tf.int64, [args.batch_size], name='y')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
train_phase = tf.placeholder(tf.bool, name='train_phase')
# Construct model and optimizer
pred = deep_mnist(args, x, train_phase)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y))
# Evaluation criteria
correct_pred = tf.equal(tf.argmax(pred, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Optimizer
optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
grads_and_vars = optim.compute_gradients(loss)
modified_gvs = []
# We precondition the phases, for faster descent, in the same way as biases
for g, v in grads_and_vars:
if 'psi' in v.name:
g = args.phase_preconditioner*g
modified_gvs.append((g, v))
train_op = optim.apply_gradients(modified_gvs)
##### TRAIN ####
# Configure tensorflow session
init_global = tf.global_variables_initializer()
init_local = tf.local_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = False
lr = args.learning_rate
saver = tf.train.Saver()
sess = tf.Session(config=config)
sess.run([init_global, init_local], feed_dict={train_phase : True})
start = time.time()
epoch = 0
step = 0.
counter = 0
best = 0.
print('Starting training loop...')
while epoch < args.n_epochs:
# Training steps
batcher = minibatcher(data['train_x'], data['train_y'], args.batch_size, shuffle=True)
train_loss = 0.
train_acc = 0.
for i, (X, Y) in enumerate(batcher):
feed_dict = {x: X, y: Y, learning_rate: lr, train_phase: True}
__, loss_, accuracy_ = sess.run([train_op, loss, accuracy], feed_dict=feed_dict)
train_loss += loss_
train_acc += accuracy_
sys.stdout.write('{:d}/{:d}\r'.format(i, data['train_x'].shape[0]/args.batch_size))
sys.stdout.flush()
train_loss /= (i+1.)
train_acc /= (i+1.)
if not args.combine_train_val:
batcher = minibatcher(data['valid_x'], data['valid_y'], args.batch_size)
valid_acc = 0.
for i, (X, Y) in enumerate(batcher):
feed_dict = {x: X, y: Y, train_phase: False}
accuracy_ = sess.run(accuracy, feed_dict=feed_dict)
valid_acc += accuracy_
sys.stdout.write('Validating\r')
sys.stdout.flush()
valid_acc /= (i+1.)
print('[{:04d} | {:0.1f}] Loss: {:04f}, Train Acc.: {:04f}, Validation Acc.: {:04f}, Learning rate: {:.2e}'.format(epoch,
time.time()-start, train_loss, train_acc, valid_acc, lr))
else:
print('[{:04d} | {:0.1f}] Loss: {:04f}, Train Acc.: {:04f}, Learning rate: {:.2e}'.format(epoch,
time.time()-start, train_loss, train_acc, lr))
# Save model
if epoch % 10 == 0:
saver.save(sess, args.checkpoint_path)
print('Model saved')
# Updates to the training scheme
#best, counter, lr = get_learning_rate(args, valid_acc, best, counter, lr)
lr = args.learning_rate * np.power(0.1, epoch / 50)
epoch += 1
# TEST
batcher = minibatcher(data['test_x'], data['test_y'], args.batch_size)
test_acc = 0.
for i, (X, Y) in enumerate(batcher):
feed_dict = {x: X, y: Y, train_phase: False}
accuracy_ = sess.run(accuracy, feed_dict=feed_dict)
test_acc += accuracy_
sys.stdout.write('Testing\r')
sys.stdout.flush()
test_acc /= (i+1.)
print('Test Acc.: {:04f}'.format(test_acc))
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", help="data directory", default='./data')
parser.add_argument("--default_settings", help="use default settings", type=bool, default=True)
parser.add_argument("--combine_train_val", help="combine the training and validation sets for testing", type=bool, default=False)
main(parser.parse_args())
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
#
# IDL Node
#
# IDL Node defines the IDLAttribute and IDLNode objects which are constructed
# by the parser as it processes the various 'productions'. The IDLAttribute
# objects are assigned to the IDLNode's property dictionary instead of being
# applied as children of The IDLNodes, so they do not exist in the final tree.
# The AST of IDLNodes is the output from the parsing state and will be used
# as the source data by the various generators.
#
#
# CopyToList
#
# Takes an input item, list, or None, and returns a new list of that set.
def CopyToList(item):
# If the item is 'Empty' make it an empty list
if not item:
item = []
# If the item is not a list
if type(item) is not type([]):
item = [item]
# Make a copy we can modify
return list(item)
# IDLSearch
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLSearch(object):
def __init__(self):
self.depth = 0
def Enter(self, node):
pass
def Exit(self, node):
pass
# IDLAttribute
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLAttribute(object):
def __init__(self, name, value):
self._cls = 'Property'
self.name = name
self.value = value
def __str__(self):
return '%s=%s' % (self.name, self.value)
def GetClass(self):
return self._cls
#
# IDLNode
#
# This class implements the AST tree, providing the associations between
# parents and children. It also contains a namepsace and propertynode to
# allow for look-ups. IDLNode is derived from IDLRelease, so it is
# version aware.
#
class IDLNode(object):
def __init__(self, cls, filename, lineno, pos, children=None):
self._cls = cls
self._properties = {
'ERRORS' : [],
'WARNINGS': [],
'FILENAME': filename,
'LINENO' : lineno,
'POSSITION' : pos,
}
self._children = []
self._parent = None
self.AddChildren(children)
#
#
#
# Return a string representation of this node
def __str__(self):
name = self.GetProperty('NAME','')
return '%s(%s)' % (self._cls, name)
def GetLogLine(self, msg):
filename, lineno = self.GetFileAndLine()
return '%s(%d) : %s\n' % (filename, lineno, msg)
# Log an error for this object
def Error(self, msg):
self.GetProperty('ERRORS').append(msg)
sys.stderr.write(self.GetLogLine('error: ' + msg))
# Log a warning for this object
def Warning(self, msg):
self.GetProperty('WARNINGS').append(msg)
sys.stdout.write(self.GetLogLine('warning:' + msg))
# Return file and line number for where node was defined
def GetFileAndLine(self):
return self.GetProperty('FILENAME'), self.GetProperty('LINENO')
def GetClass(self):
return self._cls
def GetName(self):
return self.GetProperty('NAME')
def GetParent(self):
return self._parent
def Traverse(self, search, filter_nodes):
if self._cls in filter_nodes:
return ''
search.Enter(self)
search.depth += 1
for child in self._children:
child.Traverse(search, filter_nodes)
search.depth -= 1
search.Exit(self)
def Tree(self, filter_nodes=None, accept_props=None):
class DumpTreeSearch(IDLSearch):
def __init__(self, props):
IDLSearch.__init__(self)
self.out = []
self.props = props
def Enter(self, node):
tab = ''.rjust(self.depth * 2)
self.out.append(tab + str(node))
if self.props:
proplist = []
for key, value in node.GetProperties().iteritems():
if key in self.props:
proplist.append(tab + ' %s: %s' % (key, str(value)))
if proplist:
self.out.append(tab + ' PROPERTIES')
self.out.extend(proplist)
if filter_nodes == None:
filter_nodes = ['Comment', 'Copyright']
search = DumpTreeSearch(accept_props)
self.Traverse(search, filter_nodes)
return search.out
#
# Search related functions
#
# Check if node is of a given type
def IsA(self, *typelist):
if self._cls in typelist:
return True
return False
# Get a list of all children
def GetChildren(self):
return self._children
def GetListOf(self, *keys):
out = []
for child in self.GetChildren():
if child.GetClass() in keys:
out.append(child)
return out
def GetOneOf(self, *keys):
out = self.GetListOf(*keys)
if out:
return out[0]
return None
def AddChildren(self, children):
children = CopyToList(children)
for child in children:
if not child:
continue
if type(child) == IDLAttribute:
self.SetProperty(child.name, child.value)
continue
if type(child) == IDLNode:
child._parent = self
self._children.append(child)
continue
raise RuntimeError('Adding child of type .\n' % type(child).__name__)
#
# Property Functions
#
def SetProperty(self, name, val):
self._properties[name] = val
def GetProperty(self, name, default=None):
return self._properties.get(name, default)
def GetProperties(self):
return self._properties
| |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
# @author: Seetharama Ayyadevara, Freescale Semiconductor, Inc.
# @author: Kyle Mestery, Cisco Systems, Inc.
import distutils.version as dist_version
import sys
import time
import eventlet
from oslo.config import cfg
from neutron.agent import l2population_rpc
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import polling
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as logging_config
from neutron.common import constants as q_const
from neutron.common import legacy
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.rpc import common as rpc_common
from neutron.openstack.common.rpc import dispatcher
from neutron.plugins.openvswitch.common import config # noqa
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
# A placeholder for dead vlans.
DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1)
# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac'
# attributes set).
class LocalVLANMapping:
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class Port(object):
"""Represents a neutron port.
Class stores port data in a ORM-free way, so attributres are
still available even if a row has been deleted.
"""
def __init__(self, p):
self.id = p.id
self.network_id = p.network_id
self.device_id = p.device_id
self.admin_state_up = p.admin_state_up
self.status = p.status
def __eq__(self, other):
'''Compare only fields that will cause us to re-wire.'''
try:
return (self and other
and self.id == other.id
and self.admin_state_up == other.admin_state_up)
except Exception:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
class OVSPluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, context, plugin_rpc, root_helper):
self.context = context
self.plugin_rpc = plugin_rpc
self.root_helper = root_helper
self.init_firewall()
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackMixin):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier and is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual networks realized as a VLANs or flat network, a
veth is used to connect the local VLAN on the integration bridge
with the physical network bridge, with flow rules adding,
modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, integ_br, tun_br, local_ip,
bridge_mappings, root_helper,
polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False,
minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN)):
'''Constructor.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
:param minimize_polling: Optional, whether to minimize polling by
monitoring ovsdb for interface changes.
:param ovsdb_monitor_respawn_interval: Optional, when using polling
minimization, the number of seconds to wait before respawning
the ovsdb monitor.
'''
self.veth_mtu = veth_mtu
self.root_helper = root_helper
self.available_local_vlans = set(xrange(q_const.MIN_VLAN_TAG,
q_const.MAX_VLAN_TAG))
self.tunnel_types = tunnel_types or []
self.l2_pop = l2_population
self.agent_state = {
'binary': 'neutron-openvswitch-agent',
'host': cfg.CONF.host,
'topic': q_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': self.l2_pop},
'agent_type': q_const.AGENT_TYPE_OVS,
'start_flag': True}
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper)
self.setup_integration_br()
self.setup_physical_bridges(bridge_mappings)
self.local_vlan_map = {}
self.tun_br_ofports = {constants.TYPE_GRE: {},
constants.TYPE_VXLAN: {}}
self.polling_interval = polling_interval
self.minimize_polling = minimize_polling
self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval
if tunnel_types:
self.enable_tunneling = True
else:
self.enable_tunneling = False
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port
self._check_ovs_version()
if self.enable_tunneling:
self.setup_tunnel_br(tun_br)
# Collect additional bridges to monitor
self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
# Initialize iteration counter
self.iter_num = 0
# Perform rpc initialization only once all other configuration
# is complete
self.setup_rpc()
def _check_ovs_version(self):
if constants.TYPE_VXLAN in self.tunnel_types:
check_ovs_version(constants.MINIMUM_OVS_VXLAN_VERSION,
self.root_helper)
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
try:
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def setup_rpc(self):
mac = self.int_br.get_local_port_mac()
self.agent_id = '%s%s' % ('ovs', (mac.replace(":", "")))
self.topic = topics.AGENT
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# prepare sg_agent for Security group support
# before we enable RPC handler
self.sg_agent = OVSSecurityGroupAgent(self.context,
self.plugin_rpc,
self.root_helper)
# Handle updates from service
self.dispatcher = self.create_rpc_dispatcher()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[constants.TUNNEL, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION,
topics.UPDATE, cfg.CONF.host])
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in self.local_vlan_map.iteritems():
if vif_id in vlan_mapping.vif_ports:
return network_id
def network_delete(self, context, **kwargs):
LOG.debug(_("network_delete received"))
network_id = kwargs.get('network_id')
LOG.debug(_("Delete %s"), network_id)
# The network may not be defined on this agent
lvm = self.local_vlan_map.get(network_id)
if lvm:
self.reclaim_local_vlan(network_id)
else:
LOG.debug(_("Network %s not used on agent."), network_id)
def port_update(self, context, **kwargs):
LOG.debug(_("port_update received"))
port = kwargs.get('port')
# Validate that port is on OVS
vif_port = self.int_br.get_vif_port_by_id(port['id'])
if not vif_port:
return
if ext_sg.SECURITYGROUPS in port:
self.sg_agent.refresh_firewall()
network_type = kwargs.get('network_type')
segmentation_id = kwargs.get('segmentation_id')
physical_network = kwargs.get('physical_network')
self.treat_vif_port(vif_port, port['id'], port['network_id'],
network_type, physical_network,
segmentation_id, port['admin_state_up'])
try:
if port['admin_state_up']:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context, port['id'],
self.agent_id,
cfg.CONF.host)
else:
# update plugin about port status
self.plugin_rpc.update_device_down(self.context, port['id'],
self.agent_id,
cfg.CONF.host)
except rpc_common.Timeout:
LOG.error(_("RPC timeout while updating port %s"), port['id'])
def tunnel_update(self, context, **kwargs):
LOG.debug(_("tunnel_update received"))
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_id = kwargs.get('tunnel_id', tunnel_ip)
if not tunnel_id:
tunnel_id = tunnel_ip
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_("No tunnel_type specified, cannot create tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = '%s-%s' % (tunnel_type, tunnel_id)
if not self.l2_pop:
self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type)
def fdb_add(self, context, fdb_entries):
LOG.debug(_("fdb_add received"))
for network_id, values in fdb_entries.items():
lvm = self.local_vlan_map.get(network_id)
if not lvm:
# Agent doesn't manage any port in this network
continue
agent_ports = values.get('ports')
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
self.tun_br.defer_apply_on()
for agent_ip, ports in agent_ports.items():
# Ensure we have a tunnel port with this remote agent
ofport = self.tun_br_ofports[
lvm.network_type].get(agent_ip)
if not ofport:
port_name = '%s-%s' % (lvm.network_type, agent_ip)
ofport = self.setup_tunnel_port(port_name, agent_ip,
lvm.network_type)
if ofport == 0:
continue
for port in ports:
self._add_fdb_flow(port, agent_ip, lvm, ofport)
self.tun_br.defer_apply_off()
def fdb_remove(self, context, fdb_entries):
LOG.debug(_("fdb_remove received"))
for network_id, values in fdb_entries.items():
lvm = self.local_vlan_map.get(network_id)
if not lvm:
# Agent doesn't manage any more ports in this network
continue
agent_ports = values.get('ports')
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
self.tun_br.defer_apply_on()
for agent_ip, ports in agent_ports.items():
ofport = self.tun_br_ofports[
lvm.network_type].get(agent_ip)
if not ofport:
continue
for port in ports:
self._del_fdb_flow(port, agent_ip, lvm, ofport)
self.tun_br.defer_apply_off()
def _add_fdb_flow(self, port_info, agent_ip, lvm, ofport):
if port_info == q_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
ofports = ','.join(lvm.tun_ofports)
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=lvm.vlan,
actions="strip_vlan,set_tunnel:%s,"
"output:%s" % (lvm.segmentation_id, ofports))
else:
# TODO(feleouet): add ARP responder entry
self.tun_br.add_flow(table=constants.UCAST_TO_TUN,
priority=2,
dl_vlan=lvm.vlan,
dl_dst=port_info[0],
actions="strip_vlan,set_tunnel:%s,output:%s" %
(lvm.segmentation_id, ofport))
def _del_fdb_flow(self, port_info, agent_ip, lvm, ofport):
if port_info == q_const.FLOODING_ENTRY:
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
ofports = ','.join(lvm.tun_ofports)
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=lvm.vlan,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(lvm.segmentation_id, ofports))
else:
# This local vlan doesn't require any more tunelling
self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN,
dl_vlan=lvm.vlan)
# Check if this tunnel port is still used
self.cleanup_tunnel_port(ofport, lvm.network_type)
else:
#TODO(feleouet): remove ARP responder entry
self.tun_br.delete_flows(table=constants.UCAST_TO_TUN,
dl_vlan=lvm.vlan,
dl_dst=port_info[0])
def fdb_update(self, context, fdb_entries):
LOG.debug(_("fdb_update received"))
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return dispatcher.RpcDispatcher([self])
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
if not self.available_local_vlans:
LOG.error(_("No local VLAN available for net-id=%s"), net_uuid)
return
lvid = self.available_local_vlans.pop()
LOG.info(_("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type,
physical_network,
segmentation_id)
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = ','.join(self.tun_br_ofports[network_type].values())
if ofports:
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=lvid,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(segmentation_id, ofports))
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
self.tun_br.add_flow(table=constants.TUN_TABLE[network_type],
priority=1,
tun_id=segmentation_id,
actions="mod_vlan_vid:%s,resubmit(,%s)" %
(lvid, constants.LEARN_FROM_TUN))
else:
LOG.error(_("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == constants.TYPE_FLAT:
if physical_network in self.phys_brs:
# outbound
br = self.phys_brs[physical_network]
br.add_flow(priority=4,
in_port=self.phys_ofports[physical_network],
dl_vlan=lvid,
actions="strip_vlan,normal")
# inbound
self.int_br.add_flow(
priority=3,
in_port=self.int_ofports[physical_network],
dl_vlan=0xffff,
actions="mod_vlan_vid:%s,normal" % lvid)
else:
LOG.error(_("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == constants.TYPE_VLAN:
if physical_network in self.phys_brs:
# outbound
br = self.phys_brs[physical_network]
br.add_flow(priority=4,
in_port=self.phys_ofports[physical_network],
dl_vlan=lvid,
actions="mod_vlan_vid:%s,normal" % segmentation_id)
# inbound
self.int_br.add_flow(priority=3,
in_port=self.
int_ofports[physical_network],
dl_vlan=segmentation_id,
actions="mod_vlan_vid:%s,normal" % lvid)
else:
LOG.error(_("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == constants.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
:param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id,
vif_ids) mapping.
'''
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug(_("Network %s not used on agent."), net_uuid)
return
LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan,
'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.delete_flows(
table=constants.TUN_TABLE[lvm.network_type],
tun_id=lvm.segmentation_id)
self.tun_br.delete_flows(dl_vlan=lvm.vlan)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(ofport, lvm.network_type)
elif lvm.network_type == constants.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.delete_flows(in_port=self.phys_ofports[lvm.
physical_network],
dl_vlan=lvm.vlan)
# inbound
br = self.int_br
br.delete_flows(in_port=self.int_ofports[lvm.physical_network],
dl_vlan=0xffff)
elif lvm.network_type == constants.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.delete_flows(in_port=self.phys_ofports[lvm.
physical_network],
dl_vlan=lvm.vlan)
# inbound
br = self.int_br
br.delete_flows(in_port=self.int_ofports[lvm.physical_network],
dl_vlan=lvm.segmentation_id)
elif lvm.network_type == constants.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network, segmentation_id):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovslib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
if net_uuid not in self.local_vlan_map:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
self.int_br.set_db_attribute("Port", port.port_name, "tag",
str(lvm.vlan))
if int(port.ofport) != -1:
self.int_br.delete_flows(in_port=port.ofport)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports.pop(vif_id, None)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
def port_dead(self, port):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
'''
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG)
self.int_br.add_flow(priority=2, in_port=port.ofport, actions="drop")
def setup_integration_br(self):
'''Setup the integration bridge.
Create patch ports and remove all existing flows.
:param bridge_name: the name of the integration bridge.
:returns: the integration bridge
'''
self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port)
self.int_br.remove_all_flows()
# switch all traffic using L2 learning
self.int_br.add_flow(priority=1, actions="normal")
def setup_ancillary_bridges(self, integ_br, tun_br):
'''Setup ancillary bridges - for example br-ex.'''
ovs_bridges = set(ovs_lib.get_bridges(self.root_helper))
# Remove all known bridges
ovs_bridges.remove(integ_br)
if self.enable_tunneling:
ovs_bridges.remove(tun_br)
br_names = [self.phys_brs[physical_network].br_name for
physical_network in self.phys_brs]
ovs_bridges.difference_update(br_names)
# Filter list of bridges to those that have external
# bridge-id's configured
br_names = []
for bridge in ovs_bridges:
id = ovs_lib.get_bridge_external_bridge_id(self.root_helper,
bridge)
if id != bridge:
br_names.append(bridge)
ovs_bridges.difference_update(br_names)
ancillary_bridges = []
for bridge in ovs_bridges:
br = ovs_lib.OVSBridge(bridge, self.root_helper)
LOG.info(_('Adding %s to list of bridges.'), bridge)
ancillary_bridges.append(br)
return ancillary_bridges
def setup_tunnel_br(self, tun_br):
'''Setup the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br: the name of the tunnel bridge.
'''
self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper)
self.tun_br.reset_bridge()
self.patch_tun_ofport = self.int_br.add_patch_port(
cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port)
self.patch_int_ofport = self.tun_br.add_patch_port(
cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port)
if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0:
LOG.error(_("Failed to create OVS patch port. Cannot have "
"tunneling enabled on this agent, since this version "
"of OVS does not support tunnels or patch ports. "
"Agent terminated!"))
exit(1)
self.tun_br.remove_all_flows()
# Table 0 (default) will sort incoming traffic depending on in_port
self.tun_br.add_flow(priority=1,
in_port=self.patch_int_ofport,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN)
self.tun_br.add_flow(priority=0, actions="drop")
# PATCH_LV_TO_TUN table will handle packets coming from patch_int
# unicasts go to table UCAST_TO_TUN where remote adresses are learnt
self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
actions="resubmit(,%s)" % constants.UCAST_TO_TUN)
# Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding
self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN,
dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
actions="resubmit(,%s)" % constants.FLOOD_TO_TUN)
# Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id
# for each tunnel type, and resubmit to table LEARN_FROM_TUN where
# remote mac adresses will be learnt
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop")
# LEARN_FROM_TUN table will have a single flow using a learn action to
# dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac
# adresses (assumes that lvid has already been set by a previous flow)
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
# Once remote mac adresses are learnt, packet is outputed to patch_int
self.tun_br.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, self.patch_int_ofport))
# Egress unicast will be handled in table UCAST_TO_TUN, where remote
# mac adresses will be learned. For now, just add a default flow that
# will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them
# as broadcasts/multicasts
self.tun_br.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
# FLOOD_TO_TUN will handle flooding in tunnels based on lvid,
# for now, add a default drop action
self.tun_br.add_flow(table=constants.FLOOD_TO_TUN,
priority=0,
actions="drop")
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths.
:param bridge_mappings: map physical network names to bridge names.
'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper(self.root_helper)
for physical_network, bridge in bridge_mappings.iteritems():
LOG.info(_("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
{'physical_network': physical_network,
'bridge': bridge})
# setup physical bridge
if not ip_lib.device_exists(bridge, self.root_helper):
LOG.error(_("Bridge %(bridge)s for physical network "
"%(physical_network)s does not exist. Agent "
"terminated!"),
{'physical_network': physical_network,
'bridge': bridge})
sys.exit(1)
br = ovs_lib.OVSBridge(bridge, self.root_helper)
br.remove_all_flows()
br.add_flow(priority=1, actions="normal")
self.phys_brs[physical_network] = br
# create veth to patch physical bridge with integration bridge
int_veth_name = constants.VETH_INTEGRATION_PREFIX + bridge
self.int_br.delete_port(int_veth_name)
phys_veth_name = constants.VETH_PHYSICAL_PREFIX + bridge
br.delete_port(phys_veth_name)
if ip_lib.device_exists(int_veth_name, self.root_helper):
ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete()
# Give udev a chance to process its rules here, to avoid
# race conditions between commands launched by udev rules
# and the subsequent call to ip_wrapper.add_veth
utils.execute(['/sbin/udevadm', 'settle', '--timeout=10'])
int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name,
phys_veth_name)
self.int_ofports[physical_network] = self.int_br.add_port(int_veth)
self.phys_ofports[physical_network] = br.add_port(phys_veth)
# block all untranslated traffic over veth between bridges
self.int_br.add_flow(priority=2,
in_port=self.int_ofports[physical_network],
actions="drop")
br.add_flow(priority=2,
in_port=self.phys_ofports[physical_network],
actions="drop")
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
if self.veth_mtu:
# set up mtu size for veth interfaces
int_veth.link.set_mtu(self.veth_mtu)
phys_veth.link.set_mtu(self.veth_mtu)
def update_ports(self, registered_ports):
ports = self.int_br.get_vif_port_set()
if ports == registered_ports:
return
self.int_br_device_count = len(ports)
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def update_ancillary_ports(self, registered_ports):
ports = set()
for bridge in self.ancillary_brs:
ports |= bridge.get_vif_port_set()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up):
if vif_port:
if admin_state_up:
self.port_bound(vif_port, network_id, network_type,
physical_network, segmentation_id)
else:
self.port_dead(vif_port)
else:
LOG.debug(_("No VIF port for port %s defined on agent."), port_id)
def setup_tunnel_port(self, port_name, remote_ip, tunnel_type):
ofport = self.tun_br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port)
ofport_int = -1
try:
ofport_int = int(ofport)
except (TypeError, ValueError):
LOG.exception(_("ofport should have a value that can be "
"interpreted as an integer"))
if ofport_int < 0:
LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
self.tun_br_ofports[tunnel_type][remote_ip] = ofport
# Add flow in default table to resubmit to the right
# tunelling table (lvid will be set in the latter)
self.tun_br.add_flow(priority=1,
in_port=ofport,
actions="resubmit(,%s)" %
constants.TUN_TABLE[tunnel_type])
ofports = ','.join(self.tun_br_ofports[tunnel_type].values())
if ofports and not self.l2_pop:
# Update flooding flows to include the new tunnel
for network_id, vlan_mapping in self.local_vlan_map.iteritems():
if vlan_mapping.network_type == tunnel_type:
self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
priority=1,
dl_vlan=vlan_mapping.vlan,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(vlan_mapping.segmentation_id,
ofports))
return ofport
def cleanup_tunnel_port(self, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
for remote_ip, ofport in self.tun_br_ofports[tunnel_type].items():
if ofport == tun_ofport:
port_name = '%s-%s' % (tunnel_type, remote_ip)
self.tun_br.delete_port(port_name)
self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
def treat_devices_added(self, devices):
resync = False
self.sg_agent.prepare_devices_filter(devices)
for device in devices:
LOG.info(_("Port %s added"), device)
try:
details = self.plugin_rpc.get_device_details(self.context,
device,
self.agent_id)
except Exception as e:
LOG.debug(_("Unable to get port details for "
"%(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
port = self.int_br.get_vif_port_by_id(details['device'])
if 'port_id' in details:
LOG.info(_("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'])
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.debug(_("Device %s not defined on plugin"), device)
if (port and int(port.ofport) != -1):
self.port_dead(port)
return resync
def treat_ancillary_devices_added(self, devices):
resync = False
for device in devices:
LOG.info(_("Ancillary Port %s added"), device)
try:
self.plugin_rpc.get_device_details(self.context, device,
self.agent_id)
except Exception as e:
LOG.debug(_("Unable to get port details for "
"%(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
return resync
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
for device in devices:
LOG.info(_("Attachment %s removed"), device)
try:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug(_("port_removed failed for %(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
self.port_unbound(device)
return resync
def treat_ancillary_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_("Attachment %s removed"), device)
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug(_("port_removed failed for %(device)s: %(e)s"),
{'device': device, 'e': e})
resync = True
continue
if details['exists']:
LOG.info(_("Port %s updated."), device)
# Nothing to do regarding local networking
else:
LOG.debug(_("Device %s not defined on plugin"), device)
return resync
def process_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
start = time.time()
resync_a = self.treat_devices_added(port_info['added'])
LOG.debug(_("process_network_ports - iteration:%(iter_num)d -"
"treat_devices_added completed in %(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
if 'removed' in port_info:
start = time.time()
resync_b = self.treat_devices_removed(port_info['removed'])
LOG.debug(_("process_network_ports - iteration:%(iter_num)d -"
"treat_devices_removed completed in %(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above opertaions fails => resync with plugin
return (resync_a | resync_b)
def process_ancillary_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info:
start = time.time()
resync_a = self.treat_ancillary_devices_added(port_info['added'])
LOG.debug(_("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_added "
"completed in %(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
if 'removed' in port_info:
start = time.time()
resync_b = self.treat_ancillary_devices_removed(
port_info['removed'])
LOG.debug(_("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_removed "
"completed in %(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above opertaions fails => resync with plugin
return (resync_a | resync_b)
def tunnel_sync(self):
resync = False
try:
for tunnel_type in self.tunnel_types:
details = self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type)
if not self.l2_pop:
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
tunnel_id = tunnel.get('id', tunnel['ip_address'])
tun_name = '%s-%s' % (tunnel_type, tunnel_id)
self.setup_tunnel_port(tun_name,
tunnel['ip_address'],
tunnel_type)
except Exception as e:
LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"),
{'local_ip': self.local_ip, 'e': e})
resync = True
return resync
def rpc_loop(self, polling_manager=None):
if not polling_manager:
polling_manager = polling.AlwaysPoll()
sync = True
ports = set()
ancillary_ports = set()
tunnel_sync = True
while True:
try:
start = time.time()
port_stats = {'regular': {'added': 0, 'removed': 0},
'ancillary': {'added': 0, 'removed': 0}}
LOG.debug(_("Agent rpc_loop - iteration:%d started"),
self.iter_num)
if sync:
LOG.info(_("Agent out of sync with plugin!"))
ports.clear()
ancillary_ports.clear()
sync = False
polling_manager.force_polling()
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_("Agent tunnel out of sync with plugin!"))
tunnel_sync = self.tunnel_sync()
if polling_manager.is_polling_required:
LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - "
"starting polling. Elapsed:%(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
port_info = self.update_ports(ports)
LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - "
"port information retrieved. "
"Elapsed:%(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# notify plugin about port deltas
if port_info:
LOG.debug(_("Agent loop has new devices!"))
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info)
LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -"
"ports processed. Elapsed:%(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ports = port_info['current']
port_stats['regular']['added'] = (
len(port_info.get('added', [])))
port_stats['regular']['removed'] = (
len(port_info.get('removed', [])))
# Treat ancillary devices if they exist
if self.ancillary_brs:
port_info = self.update_ancillary_ports(
ancillary_ports)
LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -"
"ancillary port info retrieved. "
"Elapsed:%(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
if port_info:
rc = self.process_ancillary_network_ports(
port_info)
LOG.debug(_("Agent rpc_loop - iteration:"
"%(iter_num)d - ancillary ports "
"processed. Elapsed:%(elapsed).3f"),
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ancillary_ports = port_info['current']
port_stats['ancillary']['added'] = (
len(port_info.get('added', [])))
port_stats['ancillary']['removed'] = (
len(port_info.get('removed', [])))
sync = sync | rc
polling_manager.polling_completed()
except Exception:
LOG.exception(_("Error in agent event loop"))
sync = True
tunnel_sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d "
"completed. Processed ports statistics: "
"%(port_stats)s. Elapsed:%(elapsed).3f"),
{'iter_num': self.iter_num,
'port_stats': port_stats,
'elapsed': elapsed})
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!"),
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def daemon_loop(self):
with polling.get_polling_manager(
self.minimize_polling,
self.root_helper,
self.ovsdb_monitor_respawn_interval) as pm:
self.rpc_loop(polling_manager=pm)
def check_ovs_version(min_required_version, root_helper):
LOG.debug(_("Checking OVS version for VXLAN support"))
installed_klm_version = ovs_lib.get_installed_ovs_klm_version()
installed_usr_version = ovs_lib.get_installed_ovs_usr_version(root_helper)
# First check the userspace version
if installed_usr_version:
if dist_version.StrictVersion(
installed_usr_version) < dist_version.StrictVersion(
min_required_version):
LOG.error(_('Failed userspace version check for Open '
'vSwitch with VXLAN support. To use '
'VXLAN tunnels with OVS, please ensure '
'the OVS version is %s '
'or newer!'), min_required_version)
sys.exit(1)
# Now check the kernel version
if installed_klm_version:
if dist_version.StrictVersion(
installed_klm_version) < dist_version.StrictVersion(
min_required_version):
LOG.error(_('Failed kernel version check for Open '
'vSwitch with VXLAN support. To use '
'VXLAN tunnels with OVS, please ensure '
'the OVS version is %s or newer!'),
min_required_version)
raise SystemExit(1)
else:
LOG.warning(_('Cannot determine kernel Open vSwitch version, '
'please ensure your Open vSwitch kernel module '
'is at least version %s to support VXLAN '
'tunnels.'), min_required_version)
else:
LOG.warning(_('Unable to determine Open vSwitch version. Please '
'ensure that its version is %s or newer to use VXLAN '
'tunnels with OVS.'), min_required_version)
raise SystemExit(1)
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
tun_br=config.OVS.tunnel_bridge,
local_ip=config.OVS.local_ip,
bridge_mappings=bridge_mappings,
root_helper=config.AGENT.root_helper,
polling_interval=config.AGENT.polling_interval,
minimize_polling=config.AGENT.minimize_polling,
tunnel_types=config.AGENT.tunnel_types,
veth_mtu=config.AGENT.veth_mtu,
l2_population=config.AGENT.l2_population,
)
# If enable_tunneling is TRUE, set tunnel_type to default to GRE
if config.OVS.enable_tunneling and not kwargs['tunnel_types']:
kwargs['tunnel_types'] = [constants.TYPE_GRE]
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specificed: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
def main():
eventlet.monkey_patch()
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF(project='neutron')
logging_config.setup_logging(cfg.CONF)
legacy.modernize_quantum_config(cfg.CONF)
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError as e:
LOG.error(_('%s Agent terminated!'), e)
sys.exit(1)
is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper']
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.set_default('ip_lib_force_root', True)
agent = OVSNeutronAgent(**agent_config)
# Start everything.
LOG.info(_("Agent initialized successfully, now running... "))
agent.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8442
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
from common_fixtures import * # NOQA
def create_environment_with_dns_services(testname, super_client, client,
service_scale,
consumed_service_scale,
port, cross_linking=False,
isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=False):
if not isnetworkModeHost_svc and not isnetworkModeHost_consumed_svc:
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
testname, client, service_scale, consumed_service_scale, port,
cross_linking)
else:
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns_hostnetwork(
testname, client, service_scale, consumed_service_scale, port,
cross_linking, isnetworkModeHost_svc,
isnetworkModeHost_consumed_svc)
service.activate()
consumed_service.activate()
consumed_service1.activate()
dns.activate()
service.addservicelink(serviceLink={"serviceId": dns.id})
dns.addservicelink(serviceLink={"serviceId": consumed_service.id})
dns.addservicelink(serviceLink={"serviceId": consumed_service1.id})
service = client.wait_success(service, 120)
consumed_service = client.wait_success(consumed_service, 120)
consumed_service1 = client.wait_success(consumed_service1, 120)
dns = client.wait_success(dns, 120)
assert service.state == "active"
assert consumed_service.state == "active"
assert consumed_service1.state == "active"
assert dns.state == "active"
validate_add_service_link(super_client, service, dns)
validate_add_service_link(super_client, dns, consumed_service)
validate_add_service_link(super_client, dns, consumed_service1)
return env, service, consumed_service, consumed_service1, dns
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsSvcActivateDnsConsumedSvcLink:
testname = "TestDnsSvcActivateDnsConsumedSvcLink"
port = "31100"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_activate_svc_dns_consumed_svc_link_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale, self.port)
logger.info("env is : %s", env)
logger.info("DNS service is: %s", dns)
logger.info("DNS name is: %s", dns.name)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.name]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_activate_svc_dns_consumed_svc_link_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dnsname = data[4]
logger.info("dns name is: %s", dnsname)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dnsname)
delete_all(client, [env])
@pytest.mark.skipif(True, reason='Needs QA debugging')
class TestDnsCrossLink:
testname = "TestDnsCrossLink"
port = "31101"
service_scale = 1
consumed_service_scale = 2
def test_dns_cross_link_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale,
self.port, True)
logger.info("env is : %s", env)
logger.info("DNS service is: %s", dns)
logger.info("DNS name is: %s", dns.name)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.name, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
def test_dns_cross_link_validate(self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dnsname = data[4]
logger.info("dns name is: %s", dnsname)
dns = client.list_service(uuid=data[5])[0]
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dnsname)
delete_all(client, [env, get_env(super_client, consumed_service),
get_env(super_client, consumed_service1), dns])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsActivateConsumedSvcLinkActivateSvc:
testname = "TestDnsActivateConsumedSvcLinkActivateSvc"
port = "31102"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_activate_consumed_svc_link_activate_svc_create(
self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
self.testname, client, self.service_scale,
self.consumed_service_scale,
self.port)
link_svc(super_client, service, [dns])
link_svc(super_client, dns, [consumed_service, consumed_service1])
service = activate_svc(client, service)
consumed_service = activate_svc(client, consumed_service)
consumed_service1 = activate_svc(client, consumed_service1)
dns = activate_svc(client, dns)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.name]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_activate_consumed_svc_link_activate_svc_validate(
self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dnsname = data[4]
logger.info("dns name is: %s", dnsname)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dnsname)
delete_all(client, [env])
@pytest.mark.P1
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsActivateSvcLinkActivateConsumedSvc:
testname = "TestDnsActivateSvcLinkActivateConsumedSvc"
port = "31103"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_activate_svc_link_activate_consumed_svc_create(self,
super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
self.testname, client, self.service_scale,
self.consumed_service_scale,
self.port)
service = activate_svc(client, service)
consumed_service = activate_svc(client, consumed_service)
consumed_service1 = activate_svc(client, consumed_service1)
link_svc(super_client, service, [dns])
link_svc(super_client, dns, [consumed_service, consumed_service1])
dns = activate_svc(client, dns)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_activate_svc_link_activate_consumed_svc_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsLinkActivateConsumedSvcActivateSvc:
testname = "TestDnsLinkActivateConsumedSvcActivateSvc"
port = "31104"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_link_activate_consumed_svc_activate_svc_create(self,
super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
self.testname, client, self.service_scale,
self.consumed_service_scale,
self.port)
dns = activate_svc(client, dns)
link_svc(super_client, service, [dns])
link_svc(super_client, dns, [consumed_service, consumed_service1])
service = activate_svc(client, service)
consumed_service = activate_svc(client, consumed_service)
consumed_service1 = activate_svc(client, consumed_service1)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_link_activate_consumed_svc_activate_svc_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsLinkWhenServicesStillActivating:
testname = "TestDnsLinkWhenServicesStillActivating"
port = "31106"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_link_when_services_still_activating_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
self.testname, client, self.service_scale,
self.consumed_service_scale,
self.port)
service.activate()
consumed_service.activate()
consumed_service1.activate()
dns.activate()
service.addservicelink(serviceLink={"serviceId": dns.id})
dns.addservicelink(serviceLink={"serviceId": consumed_service.id})
dns.addservicelink(serviceLink={"serviceId": consumed_service1.id})
service = client.wait_success(service, 120)
consumed_service = client.wait_success(consumed_service, 120)
consumed_service1 = client.wait_success(consumed_service1, 120)
dns = client.wait_success(dns, 120)
assert service.state == "active"
assert consumed_service.state == "active"
assert consumed_service1.state == "active"
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_link_when_services_still_activating_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_add_service_link(super_client, service, dns)
validate_add_service_link(super_client, dns, consumed_service)
validate_add_service_link(super_client, dns, consumed_service1)
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServiceScaleUp:
testname = "TestDnsServiceScaleUp"
port = "31107"
service_scale = 1
consumed_service_scale = 2
final_service_scale = 3
@pytest.mark.create
def test_dns_service_scale_up_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
service = client.update(service, scale=self.final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == self.final_service_scale
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_service_scale_up_validate(self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServicesScaleDown:
testname = "TestDnsServicesScaleDown"
port = "31108"
service_scale = 3
consumed_service_scale = 2
final_service_scale = 1
@pytest.mark.create
def test_dns_services_scale_down_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
service = client.update(service, scale=self.final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == self.final_service_scale
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_services_scale_down_validate(self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsConsumedServicesScaleUp:
testname = "TestDnsConsumedServicesScaleUp"
port = "31109"
service_scale = 1
consumed_service_scale = 2
final_consumed_service_scale = 4
@pytest.mark.create
def test_dns_consumed_services_scale_up_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
consumed_service = client.update(
consumed_service,
scale=self.final_consumed_service_scale,
name=consumed_service.name)
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
assert consumed_service.scale == self.final_consumed_service_scale
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_consumed_services_scale_up_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsConsumedServicesScaleDown:
testname = "TestDnsConsumedServicesScaleDown"
port = "3110"
service_scale = 2
consumed_service_scale = 3
final_consumed_svc_scale = 1
@pytest.mark.create
def test_dns_consumed_services_scale_down_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
consumed_service = client.update(consumed_service,
scale=self.final_consumed_svc_scale,
name=consumed_service.name)
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
assert consumed_service.scale == self.final_consumed_svc_scale
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_consumed_services_scale_down_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsConsumedServicesStopStartInstance:
testname = "TestDnsConsumedServicesStopStartInstance"
port = "3111"
service_scale = 1
consumed_service_scale = 3
@pytest.mark.create
def test_dns_consumed_services_stop_start_instance_create(self,
super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
container_name = env.name + "_" + consumed_service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Stop instance
container = client.wait_success(container.stop(), 120)
consumed_service = client.wait_success(consumed_service)
wait_for_scale_to_adjust(super_client, consumed_service)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_consumed_services_stop_start_instance_validate(
self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsConsumedServicesRestartInstance:
testname = "TestDnsConsumedServicesRestartInstance"
port = "3112"
service_scale = 1
consumed_service_scale = 3
@pytest.mark.create
def test_dns_consumed_services_restart_instance_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale, self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
container_name = env.name + "_" + consumed_service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Restart instance
container = client.wait_success(container.restart(), 120)
assert container.state == 'running'
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_consumed_services_restart_instance_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsConsumedServicesDeleteInstance:
testname = "TestDnsConsumedServicesDeleteInstance"
port = "3113"
service_scale = 1
consumed_service_scale = 3
@pytest.mark.create
def test_dns_consumed_services_delete_instance_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
container_name = env.name + "_" + consumed_service.name + "_1"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Delete instance
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
wait_for_scale_to_adjust(super_client, consumed_service)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_consumed_services_delete_instance_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsConsumedServicesDeactivateActivate:
testname = "TestDnsConsumedServicesDeactivateActivate"
port = "3114"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_consumed_services_deactivate_activate_create(self,
super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
consumed_service = consumed_service.deactivate()
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(super_client, consumed_service)
consumed_service = consumed_service.activate()
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_consumed_services_deactivate_activate_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServiceDeactivateActivate:
testname = "TestDnsServiceDeactivateActivate"
port = "3115"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_service_deactivate_activate_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(super_client, service,
[consumed_service, consumed_service1], self.port,
dns.name)
service = service.deactivate()
service = client.wait_success(service, 120)
assert service.state == "inactive"
wait_until_instances_get_stopped(super_client, service)
service = service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_service_deactivate_activate_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsDeactivateActivateEnvironment:
testname = "TestDnsDeactivateActivateEnvironment"
port = "3116"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_deactivate_activate_environment_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client, self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
env = env.deactivateservices()
service = client.wait_success(service, 120)
assert service.state == "inactive"
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(super_client, service)
wait_until_instances_get_stopped(super_client, consumed_service)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_deactivate_activate_environment_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsAddRemoceServicelinks:
testname = "TestDnsAddRemoceServicelinks"
port = "3117"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_add_remove_servicelinks_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale, self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port, dns.name)
# Add another service to environment
launch_config = {"imageUuid": WEB_IMAGE_UUID}
random_name = random_str()
consumed_service_name = random_name.replace("-", "")
consumed_service2 = client.create_service(name=consumed_service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
consumed_service2 = client.wait_success(consumed_service2)
assert consumed_service2.state == "inactive"
consumed_service2 = consumed_service2.activate()
consumed_service2 = client.wait_success(consumed_service2, 120)
assert consumed_service2.state == "active"
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, consumed_service2.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_add_remove_servicelinks_validate(self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
consumed_service2 = client.list_service(uuid=data[4])[0]
assert len(consumed_service2) > 0
logger.info("consumed service1 is: %s", format(consumed_service2))
dns = client.list_service(uuid=data[5])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
# Add another service link
dns.addservicelink(serviceLink={"serviceId": consumed_service2.id})
validate_add_service_link(super_client, dns, consumed_service2)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1,
consumed_service2], self.port, dns.name)
# Remove existing service link to the service
dns.removeservicelink(serviceLink={"serviceId": consumed_service.id})
validate_remove_service_link(super_client, dns, consumed_service)
validate_dns_service(
super_client, service, [consumed_service1, consumed_service2],
self.port, dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServicesDeleteServiceAddServive:
testname = "TestDnsServicesDeleteServiceAddServive"
port = "3118"
service_scale = 2
consumed_service_scale = 2
@pytest.mark.create
def test_dns_services_delete_service_add_service_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
# Delete Service
service = client.wait_success(client.delete(service))
assert service.state == "removed"
validate_remove_service_link(super_client, service, dns)
port1 = "31180"
# Add another service and link to dns service
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port1+":22/tcp"]}
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=1)
service1 = client.wait_success(service1)
assert service1.state == "inactive"
service1 = service1.activate()
service1 = client.wait_success(service1, 120)
assert service1.state == "active"
service1.addservicelink(serviceLink={"serviceId": dns.id})
validate_add_service_link(super_client, service1, dns)
data = [env.uuid, service1.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_services_delete_service_add_service_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service1 = client.list_service(uuid=data[1])[0]
assert len(service1) > 0
logger.info("service1 is: %s", format(service1))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service1, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServicesDeleteAndAddConsumedService:
testname = "TestDnsServicesDeleteAndAddConsumedService"
port = "3119"
service_scale = 2
consumed_service_scale = 2
@pytest.mark.create
def test_dns_services_delete_and_add_consumed_svc_create(self,
super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
# Delete consume service
consumed_service = client.wait_success(client.delete(consumed_service))
assert consumed_service.state == "removed"
validate_remove_service_link(super_client, dns, consumed_service)
validate_dns_service(super_client, service, [consumed_service1],
self.port,
dns.name)
# Add another consume service and link the service to this newly
# created service
launch_config = {"imageUuid": WEB_IMAGE_UUID}
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service2 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=1)
consumed_service2 = client.wait_success(consumed_service2)
assert consumed_service2.state == "inactive"
consumed_service2 = consumed_service2.activate()
consumed_service2 = client.wait_success(consumed_service2, 120)
assert consumed_service2.state == "active"
service_link = {"serviceId": consumed_service2.id}
dns.addservicelink(serviceLink=service_link)
validate_add_service_link(super_client, dns, consumed_service2)
data = [env.uuid, service.uuid, consumed_service2.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_services_delete_and_add_consumed_svc_validate(self,
super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service2 = client.list_service(uuid=data[2])[0]
assert len(consumed_service2) > 0
logger.info("consumed service1 is: %s", format(consumed_service2))
dns = client.list_service(uuid=data[3])[0]
logger.info("dns is: %s", dns)
validate_dns_service(super_client, service,
[consumed_service2],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServicesStopStartInstance:
testname = "TestDnsServicesStopStartInstance"
port = "3120"
service_scale = 2
consumed_service_scale = 2
@pytest.mark.create
def test_dns_services_stop_start_instance_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Stop service instance
service_instance = client.wait_success(service_instance.stop(), 120)
service = client.wait_success(service)
wait_for_scale_to_adjust(super_client, service)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_services_stop_start_instance_delete(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServicesRestartInstance:
testname = "TestDnsServicesRestartInstance"
port = "3121"
service_scale = 2
consumed_service_scale = 2
@pytest.mark.create
def test_dns_services_restart_instance_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Restart service instance
service_instance = client.wait_success(service_instance.restart(), 120)
assert service_instance.state == 'running'
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_services_restart_instance_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsServiceRestoreInstance:
testname = "TestDnsServiceRestoreInstance"
port = "3122"
service_scale = 2
consumed_service_scale = 2
@pytest.mark.create
def test_dns_service_restore_instance_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale, self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# delete service instance
service_instance = client.wait_success(client.delete(service_instance))
assert service_instance.state == 'removed'
wait_for_scale_to_adjust(super_client, service)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_service_restore_instance_validate(self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsDeactivateActivate:
testname = "TestDnsDeactivateActivate"
port = "3114"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_dns_deactivate_activate_create(self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
dns = dns.deactivate()
dns = client.wait_success(dns, 120)
assert dns.state == "inactive"
dns = dns.activate()
dns = client.wait_success(dns, 120)
assert dns.state == "active"
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_dns_deactivate_activate_validate(self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsAddRemoveServiceLinksUsingSet:
testname = "TestDnsAddRemoveServiceLinksUsingSet"
port = "3117"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_add_remove_servicelinks_using_set_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale,
self.port)
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
# Add another service to environment
launch_config = {"imageUuid": WEB_IMAGE_UUID}
random_name = random_str()
consumed_service_name = random_name.replace("-", "")
consumed_service1 = client.create_service(name=consumed_service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=2)
consumed_service1 = client.wait_success(consumed_service1)
assert consumed_service1.state == "inactive"
consumed_service1 = consumed_service1.activate()
consumed_service1 = client.wait_success(consumed_service1, 120)
assert consumed_service1.state == "active"
# Add another service link using setservicelinks
service_link1 = {"serviceId": consumed_service.id}
service_link2 = {"serviceId": consumed_service1.id}
dns.setservicelinks(serviceLinks=[service_link1, service_link2])
validate_add_service_link(super_client, dns, consumed_service1)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_add_remove_servicelinks_using_set_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(super_client, service,
[consumed_service, consumed_service1],
self.port, dns.name)
service_link2 = {"serviceId": consumed_service1.id}
# Remove existing service link to the service using setservicelinks
dns.setservicelinks(serviceLinks=[service_link2])
validate_remove_service_link(super_client, dns, consumed_service)
validate_dns_service(super_client, service, [consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P0
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsSvcConsumedServiceHostnetwork:
testname = "TestDnsSvcConsumedServiceHostnetwork"
port = "3118"
service_scale = 1
consumed_service_scale = 2
@pytest.mark.create
def test_dns_svc_consumed_service_hostnetwork_create(self, super_client,
client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(self.testname, super_client,
client,
self.service_scale,
self.consumed_service_scale,
self.port)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_svc_consumed_service_hostnetwork_validate(self, super_client,
client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.P1
@pytest.mark.DNS
@pytest.mark.incremental
class TestDnsSvcManagedConsumedServiceHostnetwork:
testname = "TestDnsSvcManagedConsumedServiceHostnetwork"
port = "3118"
service_scale = 1
consumed_service_scale = 1
@pytest.mark.create
def test_dns_svc_managed_cosumed_service_hostnetwork_create(
self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale, self.port,
isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=True)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
@pytest.mark.validate
def test_dns_svc_managed_cosumed_service_hostnetwork_validate(
self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1],
self.port,
dns.name)
delete_all(client, [env])
@pytest.mark.skipif(True, reason='Needs QA debugging')
class TestDnsSvcHostnetworkConsumedServiceHostnetwork:
testname = "TestDnsSvcHostnetworkConsumedServiceHostnetwork"
port = "3119"
service_scale = 1
consumed_service_scale = 1
def test_dns_svc_hostnetwork_consumed_service_hostnetwork_create(
self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale,
self.port, isnetworkModeHost_svc=True,
isnetworkModeHost_consumed_svc=True)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
def test_dns_svc_hostnetwork_consumed_service_hostnetwork_validate(
self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1], "33",
dns.name)
delete_all(client, [env])
@pytest.mark.skipif(True, reason='Needs QA debugging')
class TestDnsSvcHostnetworkConsumedServiceManagedNetwork:
testname = "TestDnsSvcHostnetworkConsumedServiceManagedNetwork"
port = "3119"
service_scale = 1
consumed_service_scale = 1
def test_dns_svc_hostnetwork_consumed_service_managednetwork_create(
self, super_client, client):
env, service, consumed_service, consumed_service1, dns = \
create_environment_with_dns_services(
self.testname, super_client, client, self.service_scale,
self.consumed_service_scale, self.port,
isnetworkModeHost_svc=True,
isnetworkModeHost_consumed_svc=False)
data = [env.uuid, service.uuid, consumed_service.uuid,
consumed_service1.uuid, dns.uuid]
logger.info("data to save: %s", data)
save(data, self)
def test_dns_svc_hostnetwork_consumed_service_managednetwork_validate(
self, super_client, client):
data = load(self)
env = client.list_environment(uuid=data[0])[0]
logger.info("env is: %s", format(env))
service = client.list_service(uuid=data[1])[0]
assert len(service) > 0
logger.info("service is: %s", format(service))
consumed_service = client.list_service(uuid=data[2])[0]
assert len(consumed_service) > 0
logger.info("consumed service is: %s", format(consumed_service))
consumed_service1 = client.list_service(uuid=data[3])[0]
assert len(consumed_service1) > 0
logger.info("consumed service1 is: %s", format(consumed_service1))
dns = client.list_service(uuid=data[4])[0]
assert len(dns) > 0
logger.info("dns is: %s", format(dns))
validate_dns_service(
super_client, service, [consumed_service, consumed_service1], "33",
dns.name)
delete_all(client, [env])
| |
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import fixtures
from lxml import etree
import six
from nova.compute import arch
from nova.virt.libvirt import config as vconfig
# Allow passing None to the various connect methods
# (i.e. allow the client to rely on default URLs)
allow_default_uri_connection = True
# Has libvirt connection been used at least once
connection_used = False
def _reset():
global allow_default_uri_connection
allow_default_uri_connection = True
# virDomainState
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
# NOTE(mriedem): These values come from include/libvirt/libvirt-domain.h
VIR_DOMAIN_XML_SECURE = 1
VIR_DOMAIN_XML_INACTIVE = 2
VIR_DOMAIN_XML_UPDATE_CPU = 4
VIR_DOMAIN_XML_MIGRATABLE = 8
VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
VIR_DOMAIN_BLOCK_REBASE_COPY = 8
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
VIR_DOMAIN_EVENT_DEFINED = 0
VIR_DOMAIN_EVENT_UNDEFINED = 1
VIR_DOMAIN_EVENT_STARTED = 2
VIR_DOMAIN_EVENT_SUSPENDED = 3
VIR_DOMAIN_EVENT_RESUMED = 4
VIR_DOMAIN_EVENT_STOPPED = 5
VIR_DOMAIN_EVENT_SHUTDOWN = 6
VIR_DOMAIN_EVENT_PMSUSPENDED = 7
VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
VIR_DOMAIN_AFFECT_CURRENT = 0
VIR_DOMAIN_AFFECT_LIVE = 1
VIR_DOMAIN_AFFECT_CONFIG = 2
VIR_CPU_COMPARE_ERROR = -1
VIR_CPU_COMPARE_INCOMPATIBLE = 0
VIR_CPU_COMPARE_IDENTICAL = 1
VIR_CPU_COMPARE_SUPERSET = 2
VIR_CRED_USERNAME = 1
VIR_CRED_AUTHNAME = 2
VIR_CRED_LANGUAGE = 3
VIR_CRED_CNONCE = 4
VIR_CRED_PASSPHRASE = 5
VIR_CRED_ECHOPROMPT = 6
VIR_CRED_NOECHOPROMPT = 7
VIR_CRED_REALM = 8
VIR_CRED_EXTERNAL = 9
VIR_MIGRATE_LIVE = 1
VIR_MIGRATE_PEER2PEER = 2
VIR_MIGRATE_TUNNELLED = 4
VIR_MIGRATE_UNDEFINE_SOURCE = 16
VIR_MIGRATE_NON_SHARED_INC = 128
VIR_NODE_CPU_STATS_ALL_CPUS = -1
VIR_DOMAIN_START_PAUSED = 1
# libvirtError enums
# (Intentionally different from what's in libvirt. We do this to check,
# that consumers of the library are using the symbolic names rather than
# hardcoding the numerical values)
VIR_FROM_QEMU = 100
VIR_FROM_DOMAIN = 200
VIR_FROM_NWFILTER = 330
VIR_FROM_REMOTE = 340
VIR_FROM_RPC = 345
VIR_FROM_NODEDEV = 666
VIR_ERR_NO_SUPPORT = 3
VIR_ERR_XML_DETAIL = 350
VIR_ERR_NO_DOMAIN = 420
VIR_ERR_OPERATION_FAILED = 510
VIR_ERR_OPERATION_INVALID = 55
VIR_ERR_OPERATION_TIMEOUT = 68
VIR_ERR_NO_NWFILTER = 620
VIR_ERR_SYSTEM_ERROR = 900
VIR_ERR_INTERNAL_ERROR = 950
VIR_ERR_CONFIG_UNSUPPORTED = 951
VIR_ERR_NO_NODE_DEVICE = 667
VIR_ERR_NO_SECRET = 66
# Readonly
VIR_CONNECT_RO = 1
# virConnectBaselineCPU flags
VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
# snapshotCreateXML flags
VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
# blockCommit flags
VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
# blockRebase flags
VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 8
VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
# secret type
VIR_SECRET_USAGE_TYPE_NONE = 0
VIR_SECRET_USAGE_TYPE_VOLUME = 1
VIR_SECRET_USAGE_TYPE_CEPH = 2
VIR_SECRET_USAGE_TYPE_ISCSI = 3
# Libvirt version
FAKE_LIBVIRT_VERSION = 9011
class HostInfo(object):
def __init__(self, arch=arch.X86_64, kB_mem=4096,
cpus=2, cpu_mhz=800, cpu_nodes=1,
cpu_sockets=1, cpu_cores=2,
cpu_threads=1, cpu_model="Penryn",
cpu_vendor="Intel", numa_topology='',
cpu_disabled=None):
"""Create a new Host Info object
:param arch: (string) indicating the CPU arch
(eg 'i686' or whatever else uname -m might return)
:param kB_mem: (int) memory size in KBytes
:param cpus: (int) the number of active CPUs
:param cpu_mhz: (int) expected CPU frequency
:param cpu_nodes: (int) the number of NUMA cell, 1 for unusual
NUMA topologies or uniform
:param cpu_sockets: (int) number of CPU sockets per node if nodes > 1,
total number of CPU sockets otherwise
:param cpu_cores: (int) number of cores per socket
:param cpu_threads: (int) number of threads per core
:param cpu_model: CPU model
:param cpu_vendor: CPU vendor
:param numa_topology: Numa topology
:param cpu_disabled: List of disabled cpus
"""
self.arch = arch
self.kB_mem = kB_mem
self.cpus = cpus
self.cpu_mhz = cpu_mhz
self.cpu_nodes = cpu_nodes
self.cpu_cores = cpu_cores
self.cpu_threads = cpu_threads
self.cpu_sockets = cpu_sockets
self.cpu_model = cpu_model
self.cpu_vendor = cpu_vendor
self.numa_topology = numa_topology
self.disabled_cpus_list = cpu_disabled or []
@classmethod
def _gen_numa_topology(self, cpu_nodes, cpu_sockets, cpu_cores,
cpu_threads, kb_mem, numa_mempages_list=None):
topology = vconfig.LibvirtConfigCapsNUMATopology()
cpu_count = 0
for cell_count in range(cpu_nodes):
cell = vconfig.LibvirtConfigCapsNUMACell()
cell.id = cell_count
cell.memory = kb_mem / cpu_nodes
for socket_count in range(cpu_sockets):
for cpu_num in range(cpu_cores * cpu_threads):
cpu = vconfig.LibvirtConfigCapsNUMACPU()
cpu.id = cpu_count
cpu.socket_id = cell_count
cpu.core_id = cpu_num // cpu_threads
cpu.siblings = set([cpu_threads *
(cpu_count // cpu_threads) + thread
for thread in range(cpu_threads)])
cell.cpus.append(cpu)
cpu_count += 1
# Set mempages per numa cell. if numa_mempages_list is empty
# we will set only the default 4K pages.
if numa_mempages_list:
mempages = numa_mempages_list[cell_count]
else:
mempages = vconfig.LibvirtConfigCapsNUMAPages()
mempages.size = 4
mempages.total = cell.memory / mempages.size
mempages = [mempages]
cell.mempages = mempages
topology.cells.append(cell)
return topology
def get_numa_topology(self):
return self.numa_topology
VIR_DOMAIN_JOB_NONE = 0
VIR_DOMAIN_JOB_BOUNDED = 1
VIR_DOMAIN_JOB_UNBOUNDED = 2
VIR_DOMAIN_JOB_COMPLETED = 3
VIR_DOMAIN_JOB_FAILED = 4
VIR_DOMAIN_JOB_CANCELLED = 5
def _parse_disk_info(element):
disk_info = {}
disk_info['type'] = element.get('type', 'file')
disk_info['device'] = element.get('device', 'disk')
driver = element.find('./driver')
if driver is not None:
disk_info['driver_name'] = driver.get('name')
disk_info['driver_type'] = driver.get('type')
source = element.find('./source')
if source is not None:
disk_info['source'] = source.get('file')
if not disk_info['source']:
disk_info['source'] = source.get('dev')
if not disk_info['source']:
disk_info['source'] = source.get('path')
target = element.find('./target')
if target is not None:
disk_info['target_dev'] = target.get('dev')
disk_info['target_bus'] = target.get('bus')
return disk_info
def disable_event_thread(self):
"""Disable nova libvirt driver event thread.
The Nova libvirt driver includes a native thread which monitors
the libvirt event channel. In a testing environment this becomes
problematic because it means we've got a floating thread calling
sleep(1) over the life of the unit test. Seems harmless? It's not,
because we sometimes want to test things like retry loops that
should have specific sleep paterns. An unlucky firing of the
libvirt thread will cause a test failure.
"""
# because we are patching a method in a class MonkeyPatch doesn't
# auto import correctly. Import explicitly otherwise the patching
# may silently fail.
import nova.virt.libvirt.host # noqa
def evloop(*args, **kwargs):
pass
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
evloop))
class libvirtError(Exception):
"""This class was copied and slightly modified from
`libvirt-python:libvirt-override.py`.
Since a test environment will use the real `libvirt-python` version of
`libvirtError` if it's installed and not this fake, we need to maintain
strict compatibility with the original class, including `__init__` args
and instance-attributes.
To create a libvirtError instance you should:
# Create an unsupported error exception
exc = libvirtError('my message')
exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
self.err is a tuple of form:
(error_code, error_domain, error_message, error_level, str1, str2,
str3, int1, int2)
Alternatively, you can use the `make_libvirtError` convenience function to
allow you to specify these attributes in one shot.
"""
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
vol=None):
Exception.__init__(self, defmsg)
self.err = None
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
class NWFilter(object):
def __init__(self, connection, xml):
self._connection = connection
self._xml = xml
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def undefine(self):
self._connection._remove_filter(self)
class NodeDevice(object):
def __init__(self, connection, xml=None):
self._connection = connection
self._xml = xml
if xml is not None:
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def attach(self):
pass
def dettach(self):
pass
def reset(self):
pass
class Domain(object):
def __init__(self, connection, xml, running=False, transient=False):
self._connection = connection
if running:
connection._mark_running(self)
self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
self._transient = transient
self._def = self._parse_definition(xml)
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
def _parse_definition(self, xml):
try:
tree = etree.fromstring(xml)
except etree.ParseError:
raise make_libvirtError(
libvirtError, "Invalid XML.",
error_code=VIR_ERR_XML_DETAIL,
error_domain=VIR_FROM_DOMAIN)
definition = {}
name = tree.find('./name')
if name is not None:
definition['name'] = name.text
uuid_elem = tree.find('./uuid')
if uuid_elem is not None:
definition['uuid'] = uuid_elem.text
else:
definition['uuid'] = str(uuid.uuid4())
vcpu = tree.find('./vcpu')
if vcpu is not None:
definition['vcpu'] = int(vcpu.text)
memory = tree.find('./memory')
if memory is not None:
definition['memory'] = int(memory.text)
os = {}
os_type = tree.find('./os/type')
if os_type is not None:
os['type'] = os_type.text
os['arch'] = os_type.get('arch', self._connection.host_info.arch)
os_kernel = tree.find('./os/kernel')
if os_kernel is not None:
os['kernel'] = os_kernel.text
os_initrd = tree.find('./os/initrd')
if os_initrd is not None:
os['initrd'] = os_initrd.text
os_cmdline = tree.find('./os/cmdline')
if os_cmdline is not None:
os['cmdline'] = os_cmdline.text
os_boot = tree.find('./os/boot')
if os_boot is not None:
os['boot_dev'] = os_boot.get('dev')
definition['os'] = os
features = {}
acpi = tree.find('./features/acpi')
if acpi is not None:
features['acpi'] = True
definition['features'] = features
devices = {}
device_nodes = tree.find('./devices')
if device_nodes is not None:
disks_info = []
disks = device_nodes.findall('./disk')
for disk in disks:
disks_info += [_parse_disk_info(disk)]
devices['disks'] = disks_info
nics_info = []
nics = device_nodes.findall('./interface')
for nic in nics:
nic_info = {}
nic_info['type'] = nic.get('type')
mac = nic.find('./mac')
if mac is not None:
nic_info['mac'] = mac.get('address')
source = nic.find('./source')
if source is not None:
if nic_info['type'] == 'network':
nic_info['source'] = source.get('network')
elif nic_info['type'] == 'bridge':
nic_info['source'] = source.get('bridge')
nics_info += [nic_info]
devices['nics'] = nics_info
definition['devices'] = devices
return definition
def create(self):
self.createWithFlags(0)
def createWithFlags(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
self._has_saved_state = False
def isActive(self):
return int(self._state == VIR_DOMAIN_RUNNING)
def undefine(self):
self._connection._undefine(self)
def undefineFlags(self, flags):
self.undefine()
if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
if self.hasManagedSaveImage(0):
self.managedSaveRemove()
def destroy(self):
self._state = VIR_DOMAIN_SHUTOFF
self._connection._mark_not_running(self)
def ID(self):
return self._id
def name(self):
return self._def['name']
def UUIDString(self):
return self._def['uuid']
def interfaceStats(self, device):
return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
def blockStats(self, device):
return [2, 10000242400, 234, 2343424234, 34]
def suspend(self):
self._state = VIR_DOMAIN_PAUSED
def shutdown(self):
self._state = VIR_DOMAIN_SHUTDOWN
self._connection._mark_not_running(self)
def reset(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
def info(self):
return [self._state,
long(self._def['memory']),
long(self._def['memory']),
self._def['vcpu'],
123456789]
def migrateToURI(self, desturi, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def attachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
self._def['devices']['disks'] += [disk_info]
return True
def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING):
raise make_libvirtError(
libvirtError,
"AFFECT_LIVE only allowed for running domains!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
self.attachDevice(xml)
def detachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
return disk_info in self._def['devices']['disks']
def detachDeviceFlags(self, xml, flags):
self.detachDevice(xml)
def XMLDesc(self, flags):
disks = ''
for disk in self._def['devices']['disks']:
disks += '''<disk type='%(type)s' device='%(device)s'>
<driver name='%(driver_name)s' type='%(driver_type)s'/>
<source file='%(source)s'/>
<target dev='%(target_dev)s' bus='%(target_bus)s'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>''' % disk
nics = ''
for nic in self._def['devices']['nics']:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
return '''<domain type='kvm'>
<name>%(name)s</name>
<uuid>%(uuid)s</uuid>
<memory>%(memory)s</memory>
<currentMemory>%(memory)s</currentMemory>
<vcpu>%(vcpu)s</vcpu>
<os>
<type arch='%(arch)s' machine='pc-0.12'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='localtime'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/kvm</emulator>
%(disks)s
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01'
function='0x1'/>
</controller>
%(nics)s
<serial type='file'>
<source path='dummy.log'/>
<target port='0'/>
</serial>
<serial type='pty'>
<source pty='/dev/pts/27'/>
<target port='1'/>
</serial>
<serial type='tcp'>
<source host="-1" service="-1" mode="bind"/>
</serial>
<console type='file'>
<source path='dummy.log'/>
<target port='0'/>
</console>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<graphics type='spice' port='-1' autoport='yes'/>
<video>
<model type='cirrus' vram='9216' heads='1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02'
function='0x0'/>
</video>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</memballoon>
</devices>
</domain>''' % {'name': self._def['name'],
'uuid': self._def['uuid'],
'memory': self._def['memory'],
'vcpu': self._def['vcpu'],
'arch': self._def['os']['arch'],
'disks': disks,
'nics': nics}
def managedSave(self, flags):
self._connection._mark_not_running(self)
self._has_saved_state = True
def managedSaveRemove(self, flags):
self._has_saved_state = False
def hasManagedSaveImage(self, flags):
return int(self._has_saved_state)
def resume(self):
self._state = VIR_DOMAIN_RUNNING
def snapshotCreateXML(self, xml, flags):
tree = etree.fromstring(xml)
name = tree.find('./name').text
snapshot = DomainSnapshot(name, self)
self._snapshots[name] = snapshot
return snapshot
def vcpus(self):
vcpus = ([], [])
for i in range(0, self._def['vcpu']):
vcpus[0].append((i, 1, 120405, i))
vcpus[1].append((True, True, True, True))
return vcpus
def memoryStats(self):
return {}
def maxMemory(self):
return self._def['memory']
def blockJobInfo(self, disk, flags):
return {}
def jobInfo(self):
return []
def jobStats(self, flags=0):
return {}
class DomainSnapshot(object):
def __init__(self, name, domain):
self._name = name
self._domain = domain
def delete(self, flags):
del self._domain._snapshots[self._name]
class Connection(object):
def __init__(self, uri=None, readonly=False, version=9011,
hv_version=1001000, host_info=None):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
else:
raise ValueError("URI was None, but fake libvirt is "
"configured to not accept this.")
uri_whitelist = ['qemu:///system',
'qemu:///session',
'lxc:///', # from LibvirtDriver._uri()
'xen:///', # from LibvirtDriver._uri()
'uml:///system',
'test:///default',
'parallels:///system']
if uri not in uri_whitelist:
raise make_libvirtError(
libvirtError,
"libvirt error: no connection driver "
"available for No connection for URI %s" % uri,
error_code=5, error_domain=0)
self.readonly = readonly
self._uri = uri
self._vms = {}
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
self._nodedevs = {}
self._event_callbacks = {}
self.fakeLibVersion = version
self.fakeVersion = hv_version
self.host_info = host_info or HostInfo()
def _add_filter(self, nwfilter):
self._nwfilters[nwfilter._name] = nwfilter
def _remove_filter(self, nwfilter):
del self._nwfilters[nwfilter._name]
def _add_nodedev(self, nodedev):
self._nodedevs[nodedev._name] = nodedev
def _remove_nodedev(self, nodedev):
del self._nodedevs[nodedev._name]
def _mark_running(self, dom):
self._running_vms[self._id_counter] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
self._id_counter += 1
def _mark_not_running(self, dom):
if dom._transient:
self._undefine(dom)
dom._id = -1
for (k, v) in six.iteritems(self._running_vms):
if v == dom:
del self._running_vms[k]
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
return
def _undefine(self, dom):
del self._vms[dom.name()]
if not dom._transient:
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
def getInfo(self):
return [self.host_info.arch,
self.host_info.kB_mem,
self.host_info.cpus,
self.host_info.cpu_mhz,
self.host_info.cpu_nodes,
self.host_info.cpu_sockets,
self.host_info.cpu_cores,
self.host_info.cpu_threads]
def numOfDomains(self):
return len(self._running_vms)
def listDomainsID(self):
return self._running_vms.keys()
def lookupByID(self, id):
if id in self._running_vms:
return self._running_vms[id]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching id %d' % id,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def lookupByName(self, name):
if name in self._vms:
return self._vms[name]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching name "%s"' % name,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def listAllDomains(self, flags):
vms = []
for vm in self._vms:
if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE:
if vm.state != VIR_DOMAIN_SHUTOFF:
vms.append(vm)
if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE:
if vm.state == VIR_DOMAIN_SHUTOFF:
vms.append(vm)
return vms
def _emit_lifecycle(self, dom, event, detail):
if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
return
cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
callback = cbinfo[0]
opaque = cbinfo[1]
callback(self, dom, event, detail, opaque)
def defineXML(self, xml):
dom = Domain(connection=self, running=False, transient=False, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
return dom
def createXML(self, xml, flags):
dom = Domain(connection=self, running=True, transient=True, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
return dom
def getType(self):
if self._uri == 'qemu:///system':
return 'QEMU'
def getLibVersion(self):
return self.fakeLibVersion
def getVersion(self):
return self.fakeVersion
def getHostname(self):
return 'compute1'
def domainEventRegisterAny(self, dom, eventid, callback, opaque):
self._event_callbacks[eventid] = [callback, opaque]
def registerCloseCallback(self, cb, opaque):
pass
def getCPUMap(self):
"""Return calculated CPU map from HostInfo, by default showing 2
online CPUs.
"""
active_cpus = self.host_info.cpus
total_cpus = active_cpus + len(self.host_info.disabled_cpus_list)
cpu_map = [True if cpu_num not in self.host_info.disabled_cpus_list
else False for cpu_num in range(total_cpus)]
return (total_cpus, cpu_map, active_cpus)
def getCapabilities(self):
"""Return spoofed capabilities."""
numa_topology = self.host_info.get_numa_topology()
if isinstance(numa_topology, vconfig.LibvirtConfigCapsNUMATopology):
numa_topology = numa_topology.to_xml()
return '''<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='%(sockets)s' cores='%(cores)s' threads='%(threads)s'/>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
</cpu>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
</uri_transports>
</migration_features>
%(topology)s
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<pae/>
<nonpae/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='armv7l'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-arm</emulator>
<machine>integratorcp</machine>
<machine>vexpress-a9</machine>
<machine>syborg</machine>
<machine>musicpal</machine>
<machine>mainstone</machine>
<machine>n800</machine>
<machine>n810</machine>
<machine>n900</machine>
<machine>cheetah</machine>
<machine>sx1</machine>
<machine>sx1-v1</machine>
<machine>beagle</machine>
<machine>beaglexm</machine>
<machine>tosa</machine>
<machine>akita</machine>
<machine>spitz</machine>
<machine>borzoi</machine>
<machine>terrier</machine>
<machine>connex</machine>
<machine>verdex</machine>
<machine>lm3s811evb</machine>
<machine>lm3s6965evb</machine>
<machine>realview-eb</machine>
<machine>realview-eb-mpcore</machine>
<machine>realview-pb-a8</machine>
<machine>realview-pbx-a9</machine>
<machine>versatilepb</machine>
<machine>versatileab</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mips'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mips</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mipsel'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mipsel</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='sparc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-sparc</emulator>
<machine>SS-5</machine>
<machine>leon3_generic</machine>
<machine>SS-10</machine>
<machine>SS-600MP</machine>
<machine>SS-20</machine>
<machine>Voyager</machine>
<machine>LX</machine>
<machine>SS-4</machine>
<machine>SPARCClassic</machine>
<machine>SPARCbook</machine>
<machine>SS-1000</machine>
<machine>SS-2000</machine>
<machine>SS-2</machine>
<domain type='qemu'>
</domain>
</arch>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='ppc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-ppc</emulator>
<machine>g3beige</machine>
<machine>virtex-ml507</machine>
<machine>mpc8544ds</machine>
<machine canonical='bamboo-0.13'>bamboo</machine>
<machine>bamboo-0.13</machine>
<machine>bamboo-0.12</machine>
<machine>ref405ep</machine>
<machine>taihu</machine>
<machine>mac99</machine>
<machine>prep</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
</capabilities>''' % {'sockets': self.host_info.cpu_sockets,
'cores': self.host_info.cpu_cores,
'threads': self.host_info.cpu_threads,
'topology': numa_topology}
def compareCPU(self, xml, flags):
tree = etree.fromstring(xml)
arch_node = tree.find('./arch')
if arch_node is not None:
if arch_node.text not in [arch.X86_64,
arch.I686]:
return VIR_CPU_COMPARE_INCOMPATIBLE
model_node = tree.find('./model')
if model_node is not None:
if model_node.text != self.host_info.cpu_model:
return VIR_CPU_COMPARE_INCOMPATIBLE
vendor_node = tree.find('./vendor')
if vendor_node is not None:
if vendor_node.text != self.host_info.cpu_vendor:
return VIR_CPU_COMPARE_INCOMPATIBLE
# The rest of the stuff libvirt implements is rather complicated
# and I don't think it adds much value to replicate it here.
return VIR_CPU_COMPARE_IDENTICAL
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
'idle': 1592705190000000,
'user': 26728850000000,
'iowait': 6121490000000}
else:
raise make_libvirtError(
libvirtError,
"invalid argument: Invalid cpu number",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def nwfilterLookupByName(self, name):
try:
return self._nwfilters[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nwfilter with matching name %s" % name,
error_code=VIR_ERR_NO_NWFILTER,
error_domain=VIR_FROM_NWFILTER)
def nwfilterDefineXML(self, xml):
nwfilter = NWFilter(self, xml)
self._add_filter(nwfilter)
def nodeDeviceLookupByName(self, name):
try:
return self._nodedevs[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nodedev with matching name %s" % name,
error_code=VIR_ERR_NO_NODE_DEVICE,
error_domain=VIR_FROM_NODEDEV)
def listDefinedDomains(self):
return []
def listDevices(self, cap, flags):
return []
def baselineCPU(self, cpu, flag):
"""Add new libvirt API."""
return """<cpu mode='custom' match='exact'>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
<feature policy='require' name='aes'/>
</cpu>"""
def secretLookupByUsage(self, usage_type_obj, usage_id):
pass
def secretDefineXML(self, xml):
pass
def openAuth(uri, auth, flags=0):
if type(auth) != list:
raise Exception("Expected a list for 'auth' parameter")
if type(auth[0]) != list:
raise Exception("Expected a function in 'auth[0]' parameter")
if not callable(auth[1]):
raise Exception("Expected a function in 'auth[1]' parameter")
return Connection(uri, (flags == VIR_CONNECT_RO))
def virEventRunDefaultImpl():
time.sleep(1)
def virEventRegisterDefaultImpl():
if connection_used:
raise Exception("virEventRegisterDefaultImpl() must be "
"called before connection is used.")
def registerErrorHandler(handler, ctxt):
pass
def make_libvirtError(error_class, msg, error_code=None,
error_domain=None, error_message=None,
error_level=None, str1=None, str2=None, str3=None,
int1=None, int2=None):
"""Convenience function for creating `libvirtError` exceptions which
allow you to specify arguments in constructor without having to manipulate
the `err` tuple directly.
We need to pass in `error_class` to this function because it may be
`libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
`libvirt-python` is installed.
"""
exc = error_class(msg)
exc.err = (error_code, error_domain, error_message, error_level,
str1, str2, str3, int1, int2)
return exc
virDomain = Domain
virNodeDevice = NodeDevice
virConnect = Connection
class FakeLibvirtFixture(fixtures.Fixture):
"""Performs global setup/stubbing for all libvirt tests.
"""
def setUp(self):
super(FakeLibvirtFixture, self).setUp()
disable_event_thread(self)
| |
import sys
if sys.version_info < (3, 7):
from ..graph_objs import Waterfall
from ..graph_objs import Volume
from ..graph_objs import Violin
from ..graph_objs import Treemap
from ..graph_objs import Table
from ..graph_objs import Surface
from ..graph_objs import Sunburst
from ..graph_objs import Streamtube
from ..graph_objs import Splom
from ..graph_objs import Scatterternary
from ..graph_objs import Scattersmith
from ..graph_objs import Scatterpolargl
from ..graph_objs import Scatterpolar
from ..graph_objs import Scattermapbox
from ..graph_objs import Scattergl
from ..graph_objs import Scattergeo
from ..graph_objs import Scattercarpet
from ..graph_objs import Scatter3d
from ..graph_objs import Scatter
from ..graph_objs import Sankey
from ..graph_objs import Pointcloud
from ..graph_objs import Pie
from ..graph_objs import Parcoords
from ..graph_objs import Parcats
from ..graph_objs import Ohlc
from ..graph_objs import Mesh3d
from ..graph_objs import Isosurface
from ..graph_objs import Indicator
from ..graph_objs import Image
from ..graph_objs import Icicle
from ..graph_objs import Histogram2dContour
from ..graph_objs import Histogram2d
from ..graph_objs import Histogram
from ..graph_objs import Heatmapgl
from ..graph_objs import Heatmap
from ..graph_objs import Funnelarea
from ..graph_objs import Funnel
from ..graph_objs import Densitymapbox
from ..graph_objs import Contourcarpet
from ..graph_objs import Contour
from ..graph_objs import Cone
from ..graph_objs import Choroplethmapbox
from ..graph_objs import Choropleth
from ..graph_objs import Carpet
from ..graph_objs import Candlestick
from ..graph_objs import Box
from ..graph_objs import Barpolar
from ..graph_objs import Bar
from ..graph_objs import Layout
from ..graph_objs import Frame
from ..graph_objs import Figure
from ..graph_objs import Data
from ..graph_objs import Annotations
from ..graph_objs import Frames
from ..graph_objs import AngularAxis
from ..graph_objs import Annotation
from ..graph_objs import ColorBar
from ..graph_objs import Contours
from ..graph_objs import ErrorX
from ..graph_objs import ErrorY
from ..graph_objs import ErrorZ
from ..graph_objs import Font
from ..graph_objs import Legend
from ..graph_objs import Line
from ..graph_objs import Margin
from ..graph_objs import Marker
from ..graph_objs import RadialAxis
from ..graph_objs import Scene
from ..graph_objs import Stream
from ..graph_objs import XAxis
from ..graph_objs import YAxis
from ..graph_objs import ZAxis
from ..graph_objs import XBins
from ..graph_objs import YBins
from ..graph_objs import Trace
from ..graph_objs import Histogram2dcontour
from ..graph_objs import waterfall
from ..graph_objs import volume
from ..graph_objs import violin
from ..graph_objs import treemap
from ..graph_objs import table
from ..graph_objs import surface
from ..graph_objs import sunburst
from ..graph_objs import streamtube
from ..graph_objs import splom
from ..graph_objs import scatterternary
from ..graph_objs import scattersmith
from ..graph_objs import scatterpolargl
from ..graph_objs import scatterpolar
from ..graph_objs import scattermapbox
from ..graph_objs import scattergl
from ..graph_objs import scattergeo
from ..graph_objs import scattercarpet
from ..graph_objs import scatter3d
from ..graph_objs import scatter
from ..graph_objs import sankey
from ..graph_objs import pointcloud
from ..graph_objs import pie
from ..graph_objs import parcoords
from ..graph_objs import parcats
from ..graph_objs import ohlc
from ..graph_objs import mesh3d
from ..graph_objs import isosurface
from ..graph_objs import indicator
from ..graph_objs import image
from ..graph_objs import icicle
from ..graph_objs import histogram2dcontour
from ..graph_objs import histogram2d
from ..graph_objs import histogram
from ..graph_objs import heatmapgl
from ..graph_objs import heatmap
from ..graph_objs import funnelarea
from ..graph_objs import funnel
from ..graph_objs import densitymapbox
from ..graph_objs import contourcarpet
from ..graph_objs import contour
from ..graph_objs import cone
from ..graph_objs import choroplethmapbox
from ..graph_objs import choropleth
from ..graph_objs import carpet
from ..graph_objs import candlestick
from ..graph_objs import box
from ..graph_objs import barpolar
from ..graph_objs import bar
from ..graph_objs import layout
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[
"..graph_objs.waterfall",
"..graph_objs.volume",
"..graph_objs.violin",
"..graph_objs.treemap",
"..graph_objs.table",
"..graph_objs.surface",
"..graph_objs.sunburst",
"..graph_objs.streamtube",
"..graph_objs.splom",
"..graph_objs.scatterternary",
"..graph_objs.scattersmith",
"..graph_objs.scatterpolargl",
"..graph_objs.scatterpolar",
"..graph_objs.scattermapbox",
"..graph_objs.scattergl",
"..graph_objs.scattergeo",
"..graph_objs.scattercarpet",
"..graph_objs.scatter3d",
"..graph_objs.scatter",
"..graph_objs.sankey",
"..graph_objs.pointcloud",
"..graph_objs.pie",
"..graph_objs.parcoords",
"..graph_objs.parcats",
"..graph_objs.ohlc",
"..graph_objs.mesh3d",
"..graph_objs.isosurface",
"..graph_objs.indicator",
"..graph_objs.image",
"..graph_objs.icicle",
"..graph_objs.histogram2dcontour",
"..graph_objs.histogram2d",
"..graph_objs.histogram",
"..graph_objs.heatmapgl",
"..graph_objs.heatmap",
"..graph_objs.funnelarea",
"..graph_objs.funnel",
"..graph_objs.densitymapbox",
"..graph_objs.contourcarpet",
"..graph_objs.contour",
"..graph_objs.cone",
"..graph_objs.choroplethmapbox",
"..graph_objs.choropleth",
"..graph_objs.carpet",
"..graph_objs.candlestick",
"..graph_objs.box",
"..graph_objs.barpolar",
"..graph_objs.bar",
"..graph_objs.layout",
],
[
"..graph_objs.Waterfall",
"..graph_objs.Volume",
"..graph_objs.Violin",
"..graph_objs.Treemap",
"..graph_objs.Table",
"..graph_objs.Surface",
"..graph_objs.Sunburst",
"..graph_objs.Streamtube",
"..graph_objs.Splom",
"..graph_objs.Scatterternary",
"..graph_objs.Scattersmith",
"..graph_objs.Scatterpolargl",
"..graph_objs.Scatterpolar",
"..graph_objs.Scattermapbox",
"..graph_objs.Scattergl",
"..graph_objs.Scattergeo",
"..graph_objs.Scattercarpet",
"..graph_objs.Scatter3d",
"..graph_objs.Scatter",
"..graph_objs.Sankey",
"..graph_objs.Pointcloud",
"..graph_objs.Pie",
"..graph_objs.Parcoords",
"..graph_objs.Parcats",
"..graph_objs.Ohlc",
"..graph_objs.Mesh3d",
"..graph_objs.Isosurface",
"..graph_objs.Indicator",
"..graph_objs.Image",
"..graph_objs.Icicle",
"..graph_objs.Histogram2dContour",
"..graph_objs.Histogram2d",
"..graph_objs.Histogram",
"..graph_objs.Heatmapgl",
"..graph_objs.Heatmap",
"..graph_objs.Funnelarea",
"..graph_objs.Funnel",
"..graph_objs.Densitymapbox",
"..graph_objs.Contourcarpet",
"..graph_objs.Contour",
"..graph_objs.Cone",
"..graph_objs.Choroplethmapbox",
"..graph_objs.Choropleth",
"..graph_objs.Carpet",
"..graph_objs.Candlestick",
"..graph_objs.Box",
"..graph_objs.Barpolar",
"..graph_objs.Bar",
"..graph_objs.Layout",
"..graph_objs.Frame",
"..graph_objs.Figure",
"..graph_objs.Data",
"..graph_objs.Annotations",
"..graph_objs.Frames",
"..graph_objs.AngularAxis",
"..graph_objs.Annotation",
"..graph_objs.ColorBar",
"..graph_objs.Contours",
"..graph_objs.ErrorX",
"..graph_objs.ErrorY",
"..graph_objs.ErrorZ",
"..graph_objs.Font",
"..graph_objs.Legend",
"..graph_objs.Line",
"..graph_objs.Margin",
"..graph_objs.Marker",
"..graph_objs.RadialAxis",
"..graph_objs.Scene",
"..graph_objs.Stream",
"..graph_objs.XAxis",
"..graph_objs.YAxis",
"..graph_objs.ZAxis",
"..graph_objs.XBins",
"..graph_objs.YBins",
"..graph_objs.Trace",
"..graph_objs.Histogram2dcontour",
],
)
if sys.version_info < (3, 7):
try:
import ipywidgets as _ipywidgets
from distutils.version import LooseVersion as _LooseVersion
if _LooseVersion(_ipywidgets.__version__) >= _LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
else:
__all__.append("FigureWidget")
orig_getattr = __getattr__
def __getattr__(import_name):
if import_name == "FigureWidget":
try:
import ipywidgets
from distutils.version import LooseVersion
if LooseVersion(ipywidgets.__version__) >= LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
return FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
return FigureWidget
return orig_getattr(import_name)
| |
#!/usr/bin/env python
#
# The MIT License ( MIT )
#
# Copyright ( c ) 2016 Davit Samvelyan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files ( the "Software" ), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from .interval_set import IntervalSet
import copy
class Interval( object ):
def __init__( self, b, e ):
self.begin = b
self.end = e
def __repr__(self):
return '[{0}, {1}]'.format( self.begin, self.end )
def __eq__( self, other ):
return ( self.TopAligns( other ) and
self.BottomAligns( other ) )
def __ne__( self, other ):
return not ( self == other )
def __lt__( self, other ):
if isinstance( other, int ):
return self.end < other
return self.end < other.begin
def __gt__( self, other ):
if isinstance( other, int ):
return self.begin > other
return self.begin > other.end
def __nonzero__( self ):
return self.__bool__()
def __bool__( self ):
return self.begin > 0 and self.begin <= self.end
def __len__( self ):
return self.end - self.begin + 1
def __iter__( self ):
if self:
yield self
def __contains__( self, other ):
if isinstance( other, Interval ):
return other.begin >= self.begin and other.end <= self.end
return other >= self.begin and other <= self.end
def Overlaps( self, other ):
return not ( self < other ) and not ( self > other )
def _SubtractInterval( self, other ):
if self in other:
# return invalid interval
return Interval.Empty()
i1 = None
i2 = None
if other.begin in self:
i1 = Interval(self.begin, other.begin - 1)
if other.end in self:
i2 = Interval(other.end + 1, self.end)
if i1 and i2:
return IntervalSet(i1, i2)
elif i1:
return i1
elif i2:
return i2
else:
return self
def __sub__( self, other ):
if isinstance( other, Interval ):
return self._SubtractInterval( other )
else:
result = copy.copy( self )
for i in other:
result -= i
return result
def _union( self, other ):
if ( self.Precedes( other ) or
self.Follows( other ) or
self.Overlaps( other ) ):
new_begin = min( self.begin, other.begin )
new_end = max( self.end, other.end )
return Interval(new_begin, new_end)
return IntervalSet(self, other)
__add__ = __or__ = _union
def __and__( self, other ):
if isinstance( other, IntervalSet ):
result = other & self
l = len( result )
if l > 1:
return result
elif l == 1:
return result._intervals[ 0 ]
else:
return Interval.Empty()
else:
if self.Overlaps( other ):
new_begin = max( self.begin, other.begin )
new_end = min( self.end, other.end )
return Interval(new_begin, new_end)
return Interval.Empty()
def TopAligns( self, other ):
return self.begin == other.begin
def BottomAligns( self, other ):
return self.end == other.end
def Follows( self, other ):
return self.begin == other.end + 1
def Precedes( self, other ):
return self.end + 1 == other.begin
def SingleLine( self ):
return self.begin == self.end
def EnlargeTopTo( self, size ):
l = len( self )
if l < size:
self.begin -= size - l
if self.begin <= 0:
self.begin = 1
return self
def EnlargeBottomTo( self, size ):
l = len( self )
if l < size:
self.end += size - l
return self
def LimitBottomBy( self, limit ):
if self.end > limit:
self.end = limit
return self
def MoveUpBy( self, count ):
if count >= self.begin:
count = self.begin - 1
self.begin -= count
self.end -= count
return self
def MoveDownBy( self, count ):
self.begin += count
self.end += count
return self
@staticmethod
def Empty():
return Interval(0, -1)
| |
# -*- coding: utf-8 -*-
# Copyright 2011 Tomo Krajina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdb
import logging as mod_logging
import datetime as mod_datetime
import xml.dom.minidom as mod_minidom
import gpx as mod_gpx
import utils as mod_utils
import re as mod_re
def parse_time(string):
if not string:
return None
try:
return mod_datetime.datetime.strptime(string, mod_gpx.DATE_FORMAT)
except Exception, e:
if mod_re.match('^.*\.\d+Z$', string):
string = mod_re.sub('\.\d+Z', 'Z', string)
try:
return mod_datetime.datetime.strptime(string, mod_gpx.DATE_FORMAT)
except Exception, e:
mod_logging.error('Invalid timestemp %s' % string)
return None
class AbstractXMLParser:
""" Common methods used in GPXParser and KMLParser """
gpx = None
xml = None
valid = None
error = None
def init(self, xml_or_file):
if hasattr(xml_or_file, 'read'):
self.xml = xml_or_file.read()
else:
if isinstance(xml_or_file, unicode):
self.xml = xml_or_file.encode('utf-8')
else:
self.xml = str(xml_or_file)
self.valid = False
self.error = None
self.gpx = mod_gpx.GPX()
def is_valid(self):
return self.valid
def get_error(self):
return self.error
def get_gpx(self):
return self.gpx
def get_node_data(self, node):
if not node:
return None
child_nodes = node.childNodes
if not child_nodes or len(child_nodes) == 0:
return None
return child_nodes[0].data
class GPXParser(AbstractXMLParser):
def __init__(self, xml_or_file=None):
self.init(xml_or_file)
self.gpx = mod_gpx.GPX()
def parse(self):
try:
dom = mod_minidom.parseString(self.xml)
self.__parse_dom(dom)
return self.gpx
except Exception, e:
mod_logging.debug('Error in:\n%s\n-----------\n' % self.xml)
mod_logging.exception(e)
self.error = str(e)
return None
def __parse_dom(self, dom):
root_nodes = dom.childNodes
root_node = None
for node in root_nodes:
if not root_node:
node_name = node.nodeName
if node_name == 'gpx':
root_node = node
for node in root_node.childNodes:
node_name = node.nodeName
if node_name == 'time':
time_str = self.get_node_data(node)
self.gpx.time = parse_time(time_str)
elif node_name == 'name':
self.gpx.name = self.get_node_data(node)
elif node_name == 'desc':
self.gpx.description = self.get_node_data(node)
elif node_name == 'author':
self.gpx.author = self.get_node_data(node)
elif node_name == 'email':
self.gpx.email = self.get_node_data(node)
elif node_name == 'url':
self.gpx.url = self.get_node_data(node)
elif node_name == 'urlname':
self.gpx.urlname = self.get_node_data(node)
elif node_name == 'keywords':
self.gpx.keywords = self.get_node_data(node)
elif node_name == 'bounds':
self._parse_bounds(node)
elif node_name == 'wpt':
self.gpx.waypoints.append(self._parse_waypoint(node))
elif node_name == 'rte':
self.gpx.routes.append(self._parse_route(node))
elif node_name == 'trk':
self.gpx.tracks.append(self.__parse_track(node))
else:
#print 'unknown %s' % node
pass
self.valid = True
def _parse_bounds(self, node):
if node.attributes.has_key('minlat'):
self.gpx.min_latitude = mod_utils.to_number(node.attributes['minlat'].nodeValue)
if node.attributes.has_key('maxlat'):
self.gpx.min_latitude = mod_utils.to_number(node.attributes['maxlat'].nodeValue)
if node.attributes.has_key('minlon'):
self.gpx.min_longitude = mod_utils.to_number(node.attributes['minlon'].nodeValue)
if node.attributes.has_key('maxlon'):
self.gpx.min_longitude = mod_utils.to_number(node.attributes['maxlon'].nodeValue)
def _parse_waypoint(self, node):
if not node.attributes.has_key('lat'):
raise mod_gpx.GPXException('Waypoint without latitude')
if not node.attributes.has_key('lon'):
raise mod_gpx.GPXException('Waypoint without longitude')
lat = mod_utils.to_number(node.attributes['lat'].nodeValue)
lon = mod_utils.to_number(node.attributes['lon'].nodeValue)
elevation_node = mod_utils.find_first_node(node, 'ele')
elevation = mod_utils.to_number(self.get_node_data(elevation_node), 0)
time_node = mod_utils.find_first_node(node, 'time')
time_str = self.get_node_data(time_node)
time = parse_time(time_str)
name_node = mod_utils.find_first_node(node, 'name')
name = self.get_node_data(name_node)
desc_node = mod_utils.find_first_node(node, 'desc')
desc = self.get_node_data(desc_node)
sym_node = mod_utils.find_first_node(node, 'sym')
sym = self.get_node_data(sym_node)
type_node = mod_utils.find_first_node(node, 'type')
type = self.get_node_data(type_node)
comment_node = mod_utils.find_first_node(node, 'cmt')
comment = self.get_node_data(comment_node)
hdop_node = mod_utils.find_first_node(node, 'hdop')
hdop = mod_utils.to_number(self.get_node_data(hdop_node))
vdop_node = mod_utils.find_first_node(node, 'vdop')
vdop = mod_utils.to_number(self.get_node_data(vdop_node))
pdop_node = mod_utils.find_first_node(node, 'pdop')
pdop = mod_utils.to_number(self.get_node_data(pdop_node))
return mod_gpx.GPXWaypoint(latitude=lat, longitude=lon, elevation=elevation,
time=time, name=name, description=desc, symbol=sym,
type=type, comment=comment, horizontal_dilution=hdop,
vertical_dilution=vdop, position_dilution=pdop)
def _parse_route(self, node):
name_node = mod_utils.find_first_node(node, 'name')
name = self.get_node_data(name_node)
description_node = mod_utils.find_first_node(node, 'desc')
description = self.get_node_data(description_node)
number_node = mod_utils.find_first_node(node, 'number')
number = mod_utils.to_number(self.get_node_data(number_node))
route = mod_gpx.GPXRoute(name, description, number)
child_nodes = node.childNodes
for child_node in child_nodes:
node_name = child_node.nodeName
if node_name == 'rtept':
route_point = self._parse_route_point(child_node)
route.points.append(route_point)
return route
def _parse_route_point(self, node):
if not node.attributes.has_key('lat'):
raise mod_gpx.GPXException('Waypoint without latitude')
if not node.attributes.has_key('lon'):
raise mod_gpx.GPXException('Waypoint without longitude')
lat = mod_utils.to_number(node.attributes['lat'].nodeValue)
lon = mod_utils.to_number(node.attributes['lon'].nodeValue)
elevation_node = mod_utils.find_first_node(node, 'ele')
elevation = mod_utils.to_number(self.get_node_data(elevation_node), 0)
time_node = mod_utils.find_first_node(node, 'time')
time_str = self.get_node_data(time_node)
time = parse_time(time_str)
name_node = mod_utils.find_first_node(node, 'name')
name = self.get_node_data(name_node)
desc_node = mod_utils.find_first_node(node, 'desc')
desc = self.get_node_data(desc_node)
sym_node = mod_utils.find_first_node(node, 'sym')
sym = self.get_node_data(sym_node)
type_node = mod_utils.find_first_node(node, 'type')
type = self.get_node_data(type_node)
comment_node = mod_utils.find_first_node(node, 'cmt')
comment = self.get_node_data(comment_node)
hdop_node = mod_utils.find_first_node(node, 'hdop')
hdop = mod_utils.to_number(self.get_node_data(hdop_node))
vdop_node = mod_utils.find_first_node(node, 'vdop')
vdop = mod_utils.to_number(self.get_node_data(vdop_node))
pdop_node = mod_utils.find_first_node(node, 'pdop')
pdop = mod_utils.to_number(self.get_node_data(pdop_node))
return mod_gpx.GPXRoutePoint(lat, lon, elevation, time, name, desc, sym, type, comment,
horizontal_dilution = hdop, vertical_dilution = vdop, position_dilution = pdop)
def __parse_track(self, node):
name_node = mod_utils.find_first_node(node, 'name')
name = self.get_node_data(name_node)
description_node = mod_utils.find_first_node(node, 'desc')
description = self.get_node_data(description_node)
number_node = mod_utils.find_first_node(node, 'number')
number = mod_utils.to_number(self.get_node_data(number_node))
track = mod_gpx.GPXTrack(name, description, number)
child_nodes = node.childNodes
for child_node in child_nodes:
if child_node.nodeName == 'trkseg':
track_segment = self.__parse_track_segment(child_node)
track.segments.append(track_segment)
return track
def __parse_track_segment(self, node):
track_segment = mod_gpx.GPXTrackSegment()
child_nodes = node.childNodes
n = 0
for child_node in child_nodes:
if child_node.nodeName == 'trkpt':
track_point = self.__parse_track_point(child_node)
track_segment.points.append(track_point)
n += 1
return track_segment
def __parse_track_point(self, node):
latitude = None
if node.attributes.has_key('lat'):
latitude = mod_utils.to_number(node.attributes['lat'].nodeValue)
longitude = None
if node.attributes.has_key('lon'):
longitude = mod_utils.to_number(node.attributes['lon'].nodeValue)
time_node = mod_utils.find_first_node(node, 'time')
time = parse_time(self.get_node_data(time_node))
elevation_node = mod_utils.find_first_node(node, 'ele')
elevation = mod_utils.to_number(self.get_node_data(elevation_node))
symbol_node = mod_utils.find_first_node(node, 'sym')
symbol = self.get_node_data(symbol_node)
comment_node = mod_utils.find_first_node(node, 'cmt')
comment = self.get_node_data(comment_node)
hdop_node = mod_utils.find_first_node(node, 'hdop')
hdop = mod_utils.to_number(self.get_node_data(hdop_node))
vdop_node = mod_utils.find_first_node(node, 'vdop')
vdop = mod_utils.to_number(self.get_node_data(vdop_node))
pdop_node = mod_utils.find_first_node(node, 'pdop')
pdop = mod_utils.to_number(self.get_node_data(pdop_node))
speed_node = mod_utils.find_first_node(node, 'speed')
speed = mod_utils.to_number(self.get_node_data(speed_node))
return mod_gpx.GPXTrackPoint(latitude=latitude, longitude=longitude, elevation=elevation, time=time,
symbol=symbol, comment=comment, horizontal_dilution=hdop, vertical_dilution=vdop,
position_dilution=pdop, speed=speed)
class KMLParser(AbstractXMLParser):
"""
Generic KML parser. Note that KML is a very generic format with much more than simple GPS tracks.
Since this library is meant for GPS tracks, this parser will try to parse only tracks and waypoints
from the KML file. Note, also, that KML doesn't know about routes.
The result is a GPX object.
NOTE THAT THIS IS AN EXPERIMENTAL FEATURE.
See http://code.google.com/apis/kml/documentation/kmlreference.html for more details.
"""
gpx = None
def __init__(self, xml_or_file=None):
self.init(xml_or_file)
def parse(self):
try:
dom = mod_minidom.parseString(self.xml)
self.__parse_dom(dom)
return self.gpx
except Exception, e:
mod_logging.debug('Error in:\n%s\n-----------\n' % self.xml)
mod_logging.exception(e)
self.error = str(e)
return None
def __parse_dom(self, xml):
# TODO
pass
if __name__ == '__main__':
file_name = 'test_files/aaa.gpx'
#file_name = 'test_files/blue_hills.gpx'
#file_name = 'test_files/test.gpx'
file = open(file_name, 'r')
gpx_xml = file.read()
file.close()
parser = mod_gpx.GPXParser(gpx_xml)
gpx = parser.parse()
print gpx.to_xml()
if parser.is_valid():
print 'TRACKS:'
for track in gpx.tracks:
print 'name%s, 2d:%s, 3d:%s' % (track.name, track.length_2d(), track.length_3d())
print '\tTRACK SEGMENTS:'
for track_segment in track.segments:
print '\t2d:%s, 3d:%s' % (track_segment.length_2d(), track_segment.length_3d())
print 'ROUTES:'
for route in gpx.routes:
print route.name
else:
print 'error: %s' % parser.get_error()
| |
#!/usr/bin/python
# Copyright 2017 John Bailey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import varints
import sys
class TestStringMethods(unittest.TestCase):
""" Test the leb128s varint format, using passing None """
def test_leb128s_none(self):
test_data = None
expected_result = None
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using a value of zero """
def test_leb128s_single_number_zero(self):
test_data = 0
expected_result = varints.varint_storage(0)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using minimum positive non-zero value """
def test_leb128s_single_number_non_zero(self):
test_data = 1
expected_result = varints.varint_storage(1)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using smallest negative non-zero value """
def test_leb128s_single_number_small_neg(self):
test_data = -1
expected_result = varints.varint_storage(0x7F)
ascii_result = varints.leb128s.encode(test_data)
self.assertEqual(ascii_result,expected_result)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using max positive value which can be
stored in a single byte """
def test_leb128s_single_number_pos_max(self):
test_data = 63
expected_result = varints.varint_storage(63)
ascii_result = varints.leb128s.encode(test_data)
self.assertEqual(ascii_result,expected_result)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using max negative value which can be
stored in a single byte """
def test_leb128s_single_number_neg_max(self):
test_data = -64
expected_result = varints.varint_storage(64)
ascii_result = varints.leb128s.encode(test_data)
self.assertEqual(ascii_result,expected_result)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using minimum positive value necessary for 2 byte
storage """
def test_leb128s_two_number_pos_min(self):
test_data = 64
expected_result = varints.varint_storage(0xC0) + \
varints.varint_storage(0)
ascii_result = varints.leb128s.encode(test_data)
self.assertEqual(ascii_result,expected_result)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using minimum negative value necessary for 2 byte
storage """
def test_leb128s_two_number_neg_min(self):
test_data = -65
expected_result = varints.varint_storage(0xBF) + \
varints.varint_storage(0x7F)
ascii_result = varints.leb128s.encode(test_data)
self.assertEqual(ascii_result,expected_result)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using maximum positive value necessary for 2 byte
storage """
def test_leb128s_two_number_pos_max(self):
test_data = 8191
expected_result = varints.varint_storage(255) + \
varints.varint_storage(63)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using maximum negative value necessary for 2 byte
storage """
def test_leb128s_two_number_neg_max(self):
test_data = -8192
expected_result = varints.varint_storage(0x80) + \
varints.varint_storage(0x40)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using minimum value necessary for 3 byte
storage """
def test_leb128s_three_number_min(self):
return
test_data = 16384
expected_result = varints.varint_storage(128) + \
varints.varint_storage(128) + \
varints.varint_storage(1)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using maximum value necessary for 3 byte
storage """
def test_leb128s_three_number_max(self):
return
test_data = 2097151
expected_result = varints.varint_storage(255) + \
varints.varint_storage(255) + \
varints.varint_storage(127)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using minimum value necessary for 4 byte
storage """
def test_leb128s_four_number_min(self):
return
test_data = 2097152
expected_result = varints.varint_storage(128) + \
varints.varint_storage(128) + \
varints.varint_storage(128) + \
varints.varint_storage(1)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using maximum value necessary for 4 byte
storage """
def test_leb128s_four_number_max(self):
return
test_data = 268435455
expected_result = varints.varint_storage(255) + \
varints.varint_storage(255) + \
varints.varint_storage(255) + \
varints.varint_storage(127)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using minimum value necessary for 5 byte
storage """
def test_leb128s_five_number_min(self):
return
test_data = 268435456
expected_result = varints.varint_storage(128) + \
varints.varint_storage(128) + \
varints.varint_storage(128) + \
varints.varint_storage(128) + \
varints.varint_storage(1)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
""" Test the leb128s varint format, using maximum value necessary for 5 byte
storage """
def test_leb128s_five_number_max(self):
return
test_data = 34359738367
expected_result = varints.varint_storage(255) + \
varints.varint_storage(255) + \
varints.varint_storage(255) + \
varints.varint_storage(255) + \
varints.varint_storage(127)
ascii_result = varints.leb128s.encode(test_data)
num_result = varints.leb128s.decode(ascii_result)
self.assertEqual(ascii_result,expected_result)
self.assertEqual(num_result,test_data)
if __name__ == '__main__':
unittest.main()
| |
import os
import tempfile
import shutil
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from django.core.files.uploadedfile import UploadedFile
from django.core.exceptions import ValidationError
from rest_framework.exceptions import ValidationError as DRF_ValidationError
from hs_core.testing import MockIRODSTestCaseMixin
from hs_core import hydroshare
from hs_core.hydroshare.utils import resource_post_create_actions
from hs_core.views.utils import remove_folder, move_or_rename_file_or_folder
from hs_app_timeseries.models import Site, Variable, Method, ProcessingLevel, TimeSeriesResult
from hs_file_types.models import TimeSeriesLogicalFile, GenericLogicalFile, TimeSeriesFileMetaData
from hs_file_types.models.timeseries import CVVariableType, CVVariableName, CVSpeciation, \
CVSiteType, CVElevationDatum, CVMethodType, CVMedium, CVUnitsType, CVStatus, \
CVAggregationStatistic
from utils import assert_time_series_file_type_metadata
class TimeSeriesFileTypeMetaDataTest(MockIRODSTestCaseMixin, TransactionTestCase):
def setUp(self):
super(TimeSeriesFileTypeMetaDataTest, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Time series File Type Metadata'
)
self.temp_dir = tempfile.mkdtemp()
self.sqlite_file_name = 'ODM2_Multi_Site_One_Variable.sqlite'
self.sqlite_file = 'hs_file_types/tests/data/{}'.format(self.sqlite_file_name)
target_temp_sqlite_file = os.path.join(self.temp_dir, self.sqlite_file_name)
shutil.copy(self.sqlite_file, target_temp_sqlite_file)
self.sqlite_file_obj = open(target_temp_sqlite_file, 'r')
self.sqlite_invalid_file_name = 'ODM2_invalid.sqlite'
self.sqlite_invalid_file = 'hs_file_types/tests/data/{}'.format(
self.sqlite_invalid_file_name)
target_temp_sqlite_invalid_file = os.path.join(self.temp_dir, self.sqlite_invalid_file_name)
shutil.copy(self.sqlite_invalid_file, target_temp_sqlite_invalid_file)
self.odm2_csv_file_name = 'ODM2_Multi_Site_One_Variable_Test.csv'
self.odm2_csv_file = 'hs_app_timeseries/tests/{}'.format(self.odm2_csv_file_name)
target_temp_csv_file = os.path.join(self.temp_dir, self.odm2_csv_file_name)
shutil.copy(self.odm2_csv_file, target_temp_csv_file)
self.odm2_csv_file_obj = open(target_temp_csv_file, 'r')
def tearDown(self):
super(TimeSeriesFileTypeMetaDataTest, self).tearDown()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_sqlite_set_file_type_to_timeseries(self):
# here we are using a valid sqlite file for setting it
# to TimeSeries file type which includes metadata extraction
self.sqlite_file_obj = open(self.sqlite_file, 'r')
self._create_composite_resource(title='Untitled Resource')
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# check that there is one GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 1)
# check that there is no TimeSeriesLogicalFile object
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 0)
# set the sqlite file to TimeSeries file type
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test extracted metadata
assert_time_series_file_type_metadata(self)
# test file level keywords
# res_file = self.composite_resource.files.first()
# logical_file = res_file.logical_file
# self.assertEqual(len(logical_file.metadata.keywords), 1)
# self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent')
self.composite_resource.delete()
def test_CSV_set_file_type_to_timeseries(self):
# here we are using a valid CSV file for setting it
# to TimeSeries file type which includes metadata extraction
self.odm2_csv_file_obj = open(self.odm2_csv_file, 'r')
file_to_upload = UploadedFile(file=self.odm2_csv_file_obj,
name=os.path.basename(self.odm2_csv_file_obj.name))
self._create_composite_resource(title='Untitled Resource', file_to_upload=file_to_upload)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# check that there is one GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 1)
# check that there is no TimeSeriesLogicalFile object
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 0)
# set the CSV file to TimeSeries file type
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that the ODM2.sqlite blank file got added to the resource
self.assertEqual(self.composite_resource.files.all().count(), 2)
csv_res_file = None
sqlite_res_file = None
for res_file in self.composite_resource.files.all():
if res_file.extension == '.sqlite':
sqlite_res_file = res_file
elif res_file.extension == '.csv':
csv_res_file = res_file
self.assertNotEqual(csv_res_file, None)
self.assertNotEqual(sqlite_res_file, None)
self.assertEqual(csv_res_file.logical_file_type_name, "TimeSeriesLogicalFile")
self.assertEqual(sqlite_res_file.logical_file_type_name, "TimeSeriesLogicalFile")
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 1)
logical_file = csv_res_file.logical_file
# test that both csv and sqlite files of the logical file are in a folder
csv_file_name = os.path.basename(self.odm2_csv_file_obj.name)
for res_file in logical_file.files.all():
self.assertEqual(res_file.file_folder, csv_file_name[:-4])
# since the uploaded csv file has 2 data columns, the metadata should have 2 series names
self.assertEqual(len(logical_file.metadata.series_names), 2)
csv_data_column_names = set(['Temp_DegC_Mendon', 'Temp_DegC_Paradise'])
self.assertEqual(set(logical_file.metadata.series_names), csv_data_column_names)
# since the uploaded csv file has 2 data columns, the metadata should have
# the attribute value_counts (dict) 2 elements
self.assertEqual(len(logical_file.metadata.value_counts), 2)
self.assertEqual(set(logical_file.metadata.value_counts.keys()), csv_data_column_names)
# there should be 20 data values for each series
self.assertEqual(logical_file.metadata.value_counts['Temp_DegC_Mendon'], '20')
self.assertEqual(logical_file.metadata.value_counts['Temp_DegC_Paradise'], '20')
# the dataset name (title) must be set the name of the CSV file
self.assertEqual(logical_file.dataset_name, csv_file_name[:-4])
# there should not be any file level abstract
self.assertEqual(logical_file.metadata.abstract, None)
# there should not be any file level keywords
self.assertEqual(logical_file.metadata.keywords, [])
# there should be 1 coverage element of type period at the file level
self.assertEqual(logical_file.metadata.coverages.all().count(), 1)
self.assertEqual(logical_file.metadata.coverages.filter(type='period').count(), 1)
self.assertEqual(logical_file.has_csv_file, True)
# at file level there should not be any site element
self.assertEqual(logical_file.metadata.sites.all().count(), 0)
# at file level there should not be any method element
self.assertEqual(logical_file.metadata.methods.all().count(), 0)
# at file level there should not be any variable element
self.assertEqual(logical_file.metadata.variables.all().count(), 0)
# at file level there should not an any site processing level
self.assertEqual(logical_file.metadata.processing_levels.all().count(), 0)
# at file level there should not be any result element
self.assertEqual(logical_file.metadata.time_series_results.all().count(), 0)
# resource title does not get updated when csv is set to file type
self.assertEqual(self.composite_resource.metadata.title.value, 'Untitled Resource')
# self._test_no_change_in_metadata()
# there should be 2 format elements - since the resource has a csv file and a sqlite file
self.assertEqual(self.composite_resource.metadata.formats.all().count(), 2)
# there should be 1 coverage element of type period
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 1)
self.assertEqual(self.composite_resource.metadata.coverages.filter(
type='period').count(), 1)
self.composite_resource.delete()
def test_set_file_type_to_sqlite_invalid_file(self):
# here we are using an invalid sqlite file for setting it
# to TimeSeries file type which should fail
self.sqlite_file_obj = open(self.sqlite_invalid_file, 'r')
self._create_composite_resource()
self._test_invalid_file()
self.composite_resource.delete()
def test_invalid_csv_file(self):
# This file contains invalid number of data column headings
invalid_csv_file_name = 'Invalid_Headings_Test_1.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file missing a data column heading
invalid_csv_file_name = 'Invalid_Headings_Test_2.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file has an additional data column heading
invalid_csv_file_name = 'Invalid_Headings_Test_3.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file contains a duplicate data column heading
invalid_csv_file_name = 'Invalid_Headings_Test_4.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file has no data column heading
invalid_csv_file_name = 'Invalid_Headings_Test_5.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file has not a CSV file
invalid_csv_file_name = 'Invalid_format_Test.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file has a bad datetime value
invalid_csv_file_name = 'Invalid_Data_Test_1.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file has a bad data value (not numeric)
invalid_csv_file_name = 'Invalid_Data_Test_2.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file is missing a data value
invalid_csv_file_name = 'Invalid_Data_Test_3.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file has a additional data value
invalid_csv_file_name = 'Invalid_Data_Test_4.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
# This file has no data values
invalid_csv_file_name = 'Invalid_Data_Test_5.csv'
self._test_invalid_csv_file(invalid_csv_file_name)
def test_sqlite_metadata_update(self):
# here we are using a valid sqlite file for setting it
# to TimeSeries file type which includes metadata extraction
# then we are testing update of the file level metadata elements
self.sqlite_file_obj = open(self.sqlite_file, 'r')
self._create_composite_resource(title='Untitled Resource')
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# check that there is one GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 1)
# check that there is no TimeSeriesLogicalFile object
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 0)
# set the sqlite file to TimeSeries file type
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# test updating site element
site = logical_file.metadata.sites.filter(site_code='USU-LBR-Paradise').first()
self.assertNotEqual(site, None)
site_name = 'Little Bear River at McMurdy Hollow near Paradise, Utah'
self.assertEqual(site.site_name, site_name)
self.assertEqual(site.elevation_m, 1445)
self.assertEqual(site.elevation_datum, 'NGVD29')
self.assertEqual(site.site_type, 'Stream')
self.assertFalse(logical_file.metadata.is_dirty)
site_name = 'Little Bear River at Logan, Utah'
site_data = {'site_name': site_name, 'elevation_m': site.elevation_m,
'elevation_datum': site.elevation_datum, 'site_type': site.site_type}
logical_file.metadata.update_element('Site', site.id, **site_data)
site = logical_file.metadata.sites.filter(site_code='USU-LBR-Paradise').first()
self.assertEqual(site.site_name, site_name)
self.assertTrue(logical_file.metadata.is_dirty)
# updating site lat/long should update the resource coverage as well as file level coverage
box_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
self.assertEqual(box_coverage.value['southlimit'], 41.495409)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
box_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
self.assertEqual(box_coverage.value['southlimit'], 41.495409)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
site_data['latitude'] = 40.7896
logical_file.metadata.update_element('Site', site.id, **site_data)
site = logical_file.metadata.sites.filter(site_code='USU-LBR-Paradise').first()
self.assertEqual(site.latitude, 40.7896)
# test that resource level coverage got updated
box_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first()
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
# this is the changed value for the southlimit as a result of changing the sit latitude
self.assertEqual(box_coverage.value['southlimit'], 40.7896)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
# test that file level coverage got updated
box_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 41.718473)
self.assertEqual(box_coverage.value['eastlimit'], -111.799324)
# this is the changed value for the southlimit as a result of changing the sit latitude
self.assertEqual(box_coverage.value['southlimit'], 40.7896)
self.assertEqual(box_coverage.value['westlimit'], -111.946402)
logical_file.metadata.is_dirty = False
logical_file.metadata.save()
# test updating variable element
variable = logical_file.metadata.variables.filter(variable_code='USU36').first()
self.assertNotEqual(variable, None)
self.assertEqual(variable.variable_name, 'Temperature')
self.assertEqual(variable.variable_type, 'Water Quality')
self.assertEqual(variable.no_data_value, -9999)
self.assertEqual(variable.speciation, 'Not Applicable')
self.assertEqual(variable.variable_definition, None)
var_def = 'Concentration of oxygen dissolved in water.'
variable_data = {'variable_definition': var_def}
logical_file.metadata.update_element('Variable', variable.id, **variable_data)
variable = logical_file.metadata.variables.filter(variable_code='USU36').first()
self.assertEqual(variable.variable_definition, var_def)
self.assertEqual(variable.variable_name, 'Temperature')
self.assertEqual(variable.variable_type, 'Water Quality')
self.assertEqual(variable.no_data_value, -9999)
self.assertEqual(variable.speciation, 'Not Applicable')
self.assertTrue(logical_file.metadata.is_dirty)
logical_file.metadata.is_dirty = False
logical_file.metadata.save()
# test updating method element
method = logical_file.metadata.methods.filter(method_code=28).first()
self.assertNotEqual(method, None)
self.assertEqual(method.method_name, 'Quality Control Level 1 Data Series created from raw '
'QC Level 0 data using ODM Tools.')
self.assertEqual(method.method_type, 'Instrument deployment')
self.assertEqual(method.method_description, 'Quality Control Level 1 Data Series created '
'from raw QC Level 0 data using ODM Tools.')
self.assertEqual(method.method_link, None)
method_link = "http://somesite.com"
method_data = {'method_link': method_link}
logical_file.metadata.update_element('Method', method.id, **method_data)
method = logical_file.metadata.methods.filter(method_code=28).first()
self.assertNotEqual(method, None)
self.assertEqual(method.method_name, 'Quality Control Level 1 Data Series created from raw '
'QC Level 0 data using ODM Tools.')
self.assertEqual(method.method_type, 'Instrument deployment')
self.assertEqual(method.method_description, 'Quality Control Level 1 Data Series created '
'from raw QC Level 0 data using ODM Tools.')
self.assertEqual(method.method_link, method_link)
self.assertTrue(logical_file.metadata.is_dirty)
logical_file.metadata.is_dirty = False
logical_file.metadata.save()
# test updating processing level element
pro_level = logical_file.metadata.processing_levels.filter(processing_level_code=1).first()
self.assertNotEqual(pro_level, None)
self.assertEqual(pro_level.definition, 'Quality controlled data')
explanation = 'Quality controlled data that have passed quality assurance procedures ' \
'such as routine estimation of timing and sensor calibration or visual ' \
'inspection and removal of obvious errors. An example is USGS published ' \
'streamflow records following parsing through USGS quality ' \
'control procedures.'
self.assertEqual(pro_level.explanation, explanation)
definition = "Uncontrolled data"
pro_level_data = {'definition': definition}
logical_file.metadata.update_element('ProcessingLevel', pro_level.id, **pro_level_data)
pro_level = logical_file.metadata.processing_levels.filter(processing_level_code=1).first()
self.assertNotEqual(pro_level, None)
self.assertEqual(pro_level.definition, definition)
explanation = 'Quality controlled data that have passed quality assurance procedures ' \
'such as routine estimation of timing and sensor calibration or visual ' \
'inspection and removal of obvious errors. An example is USGS published ' \
'streamflow records following parsing through USGS quality ' \
'control procedures.'
self.assertEqual(pro_level.explanation, explanation)
self.assertTrue(logical_file.metadata.is_dirty)
logical_file.metadata.is_dirty = False
logical_file.metadata.save()
# test updating time series result element
ts_result = logical_file.metadata.time_series_results.all().first()
self.assertNotEqual(ts_result, None)
self.assertEqual(ts_result.units_type, 'Temperature')
self.assertEqual(ts_result.units_name, 'degree celsius')
self.assertEqual(ts_result.units_abbreviation, 'degC')
self.assertEqual(ts_result.status, 'Unknown')
self.assertEqual(ts_result.sample_medium, 'Surface Water')
self.assertEqual(ts_result.value_count, 1441)
self.assertEqual(ts_result.aggregation_statistics, 'Average')
ts_data = {'status': 'Complete'}
logical_file.metadata.update_element('timeseriesresult', ts_result.id, **ts_data)
ts_result = logical_file.metadata.time_series_results.all().first()
self.assertNotEqual(ts_result, None)
self.assertEqual(ts_result.units_type, 'Temperature')
self.assertEqual(ts_result.units_name, 'degree celsius')
self.assertEqual(ts_result.units_abbreviation, 'degC')
self.assertEqual(ts_result.status, 'Complete')
self.assertEqual(ts_result.sample_medium, 'Surface Water')
self.assertEqual(ts_result.value_count, 1441)
self.assertEqual(ts_result.aggregation_statistics, 'Average')
self.assertTrue(logical_file.metadata.is_dirty)
self.composite_resource.delete()
def test_file_metadata_on_logical_file_delete(self):
# test that when the TimeSeriesLogicalFile instance is deleted
# all metadata associated with it also get deleted
self.sqlite_file_obj = open(self.sqlite_file, 'r')
self._create_composite_resource(title='Untitled Resource')
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# set the sqlite file to TimeSeries file type
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# file level metadata
# there should be Site metadata objects
self.assertTrue(Site.objects.count() > 0)
# there should be Variable metadata objects
self.assertTrue(Variable.objects.count() > 0)
# there should be Method metadata objects
self.assertTrue(Method.objects.count() > 0)
# there should be ProcessingLevel metadata objects
self.assertTrue(ProcessingLevel.objects.count() > 0)
# there should be TimeSeriesResult metadata objects
self.assertTrue(TimeSeriesResult.objects.count() > 0)
# CV lookup data
self.assertEqual(logical_file.metadata.cv_variable_types.all().count(), 23)
self.assertEqual(CVVariableType.objects.all().count(), 23)
self.assertEqual(logical_file.metadata.cv_variable_names.all().count(), 805)
self.assertEqual(CVVariableName.objects.all().count(), 805)
self.assertEqual(logical_file.metadata.cv_speciations.all().count(), 145)
self.assertEqual(CVSpeciation.objects.all().count(), 145)
self.assertEqual(logical_file.metadata.cv_elevation_datums.all().count(), 5)
self.assertEqual(CVElevationDatum.objects.all().count(), 5)
self.assertEqual(logical_file.metadata.cv_site_types.all().count(), 51)
self.assertEqual(CVSiteType.objects.all().count(), 51)
self.assertEqual(logical_file.metadata.cv_method_types.all().count(), 25)
self.assertEqual(CVMethodType.objects.all().count(), 25)
self.assertEqual(logical_file.metadata.cv_units_types.all().count(), 179)
self.assertEqual(CVUnitsType.objects.all().count(), 179)
self.assertEqual(logical_file.metadata.cv_statuses.all().count(), 4)
self.assertEqual(CVStatus.objects.all().count(), 4)
self.assertEqual(logical_file.metadata.cv_mediums.all().count(), 18)
self.assertEqual(CVMedium.objects.all().count(), 18)
self.assertEqual(logical_file.metadata.cv_aggregation_statistics.all().count(), 17)
self.assertEqual(CVAggregationStatistic.objects.all().count(), 17)
# delete the logical file
logical_file.logical_delete(self.user)
# test that we have no logical file of type TimeSeries
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 0)
self.assertEqual(TimeSeriesFileMetaData.objects.count(), 0)
# test that all file level metadata deleted
# there should be no Site metadata objects
self.assertTrue(Site.objects.count() == 0)
# there should be no Variable metadata objects
self.assertTrue(Variable.objects.count() == 0)
# there should be no Method metadata objects
self.assertTrue(Method.objects.count() == 0)
# there should be no ProcessingLevel metadata objects
self.assertTrue(ProcessingLevel.objects.count() == 0)
# there should be no TimeSeriesResult metadata objects
self.assertTrue(TimeSeriesResult.objects.count() == 0)
# there should not be any CV type records
self.assertEqual(CVVariableType.objects.all().count(), 0)
self.assertEqual(CVVariableName.objects.all().count(), 0)
self.assertEqual(CVSpeciation.objects.all().count(), 0)
self.assertEqual(CVElevationDatum.objects.all().count(), 0)
self.assertEqual(CVSiteType.objects.all().count(), 0)
self.assertEqual(CVMethodType.objects.all().count(), 0)
self.assertEqual(CVUnitsType.objects.all().count(), 0)
self.assertEqual(CVStatus.objects.all().count(), 0)
self.assertEqual(CVMedium.objects.all().count(), 0)
self.assertEqual(CVAggregationStatistic.objects.all().count(), 0)
self.composite_resource.delete()
def test_timeseries_file_type_folder_delete(self):
# when a file is set to TimeSeriesLogicalFile type
# system automatically creates folder using the name of the file
# that was used to set the file type
# Here we need to test that when that folder gets deleted, all files
# in that folder gets deleted, the logicalfile object gets deleted and
# the associated metadata objects get deleted
self.sqlite_file_obj = open(self.sqlite_file, 'r')
self._create_composite_resource(title='Untitled Resource')
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# set the sqlite file to TimeSeries file type
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
# test that we have one logical file of type TimeSeries
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 1)
self.assertEqual(TimeSeriesFileMetaData.objects.count(), 1)
# delete the folder for the logical file
folder_path = "data/contents/ODM2_Multi_Site_One_Variable"
remove_folder(self.user, self.composite_resource.short_id, folder_path)
# there should no content files
self.assertEqual(self.composite_resource.files.count(), 0)
# there should not be any timeseries logical file or metadata file
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 0)
self.assertEqual(TimeSeriesFileMetaData.objects.count(), 0)
# test that all file level metadata deleted
# there should be no Site metadata objects
self.assertTrue(Site.objects.count() == 0)
# there should be no Variable metadata objects
self.assertTrue(Variable.objects.count() == 0)
# there should be no Method metadata objects
self.assertTrue(Method.objects.count() == 0)
# there should be no ProcessingLevel metadata objects
self.assertTrue(ProcessingLevel.objects.count() == 0)
# there should be no TimeSeriesResult metadata objects
self.assertTrue(TimeSeriesResult.objects.count() == 0)
# there should not be any CV type records
self.assertEqual(CVVariableType.objects.all().count(), 0)
self.assertEqual(CVVariableName.objects.all().count(), 0)
self.assertEqual(CVSpeciation.objects.all().count(), 0)
self.assertEqual(CVElevationDatum.objects.all().count(), 0)
self.assertEqual(CVSiteType.objects.all().count(), 0)
self.assertEqual(CVMethodType.objects.all().count(), 0)
self.assertEqual(CVUnitsType.objects.all().count(), 0)
self.assertEqual(CVStatus.objects.all().count(), 0)
self.assertEqual(CVMedium.objects.all().count(), 0)
self.assertEqual(CVAggregationStatistic.objects.all().count(), 0)
self.composite_resource.delete()
def test_file_metadata_on_file_delete(self):
# test that when any file in TimeSeries logical file is deleted
# all metadata associated with TimeSeriesLogicalFile is deleted
# test for both .sqlite and .csv delete
# test with deleting of 'sqlite' file
self._test_file_metadata_on_file_delete(ext='.sqlite')
# TODO: test with deleting of 'csv' file - uncomment the following when we implement
# csv file
# self._test_file_metadata_on_file_delete(ext='.csv')
def test_file_rename_or_move(self):
# test that file can't be moved or renamed for any resource file
# that's part of the TimeSeries logical file object (LFO)
self.sqlite_file_obj = open(self.sqlite_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# extract metadata from the sqlite file
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test renaming of files that are associated with timeseries LFO - should raise exception
self.assertEqual(self.composite_resource.files.count(), 1)
base_path = "data/contents/ODM2_Multi_Site_One_Variable/{}"
src_path = base_path.format('ODM2_Multi_Site_One_Variable.sqlite')
tgt_path = base_path.format('ODM2_Multi_Site_One_Variable_1.sqlite')
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
# TODO: test for renaming csv file when we implement csv file
# test moving the files associated with timeseries LFO
tgt_path = 'data/contents/new_folder/ODM2_Multi_Site_One_Variable.sqlite'
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
# TODO: test for moving csv file when we implement csv file
self.composite_resource.delete()
def _test_file_metadata_on_file_delete(self, ext):
self.sqlite_file_obj = open(self.sqlite_file, 'r')
self._create_composite_resource()
res_file = self.composite_resource.files.first()
# set the sqlite file to TimeSeries file type
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that we have one logical file of type TimeSeries
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 1)
self.assertEqual(TimeSeriesFileMetaData.objects.count(), 1)
# delete content file specified by extension (ext parameter)
res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, ext)[0]
hydroshare.delete_resource_file(self.composite_resource.short_id,
res_file.id,
self.user)
# test that we don't have any logical file of type TimeSeries
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 0)
self.assertEqual(TimeSeriesFileMetaData.objects.count(), 0)
# test that all file level metadata deleted
# there should be no Site metadata objects
self.assertTrue(Site.objects.count() == 0)
# there should be no Variable metadata objects
self.assertTrue(Variable.objects.count() == 0)
# there should be no Method metadata objects
self.assertTrue(Method.objects.count() == 0)
# there should be no ProcessingLevel metadata objects
self.assertTrue(ProcessingLevel.objects.count() == 0)
# there should be no TimeSeriesResult metadata objects
self.assertTrue(TimeSeriesResult.objects.count() == 0)
# there should not be any CV type records
self.assertEqual(CVVariableType.objects.all().count(), 0)
self.assertEqual(CVVariableName.objects.all().count(), 0)
self.assertEqual(CVSpeciation.objects.all().count(), 0)
self.assertEqual(CVElevationDatum.objects.all().count(), 0)
self.assertEqual(CVSiteType.objects.all().count(), 0)
self.assertEqual(CVMethodType.objects.all().count(), 0)
self.assertEqual(CVUnitsType.objects.all().count(), 0)
self.assertEqual(CVStatus.objects.all().count(), 0)
self.assertEqual(CVMedium.objects.all().count(), 0)
self.assertEqual(CVAggregationStatistic.objects.all().count(), 0)
self.composite_resource.delete()
def _create_composite_resource(self, title='Test Time series File Type Metadata',
file_to_upload=None):
if file_to_upload is None:
file_to_upload = UploadedFile(file=self.sqlite_file_obj,
name=os.path.basename(self.sqlite_file_obj.name))
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title=title,
files=(file_to_upload,)
)
# set the generic logical file as part of resource post create signal
resource_post_create_actions(resource=self.composite_resource, user=self.user,
metadata=self.composite_resource.metadata)
def _test_invalid_file(self):
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with the generic logical file
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# trying to set this invalid sqlite file to timeseries file type should raise
# ValidationError
with self.assertRaises(ValidationError):
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that the invalid file did not get deleted
self.assertEqual(self.composite_resource.files.all().count(), 1)
# check that the resource file is not associated with generic logical file
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
def _test_invalid_csv_file(self, invalid_csv_file_name):
invalid_csv_file_obj = self._get_invalid_csv_file_obj(invalid_csv_file_name)
file_to_upload = UploadedFile(file=invalid_csv_file_obj,
name=os.path.basename(invalid_csv_file_obj.name))
self._create_composite_resource(title='Untitled Resource', file_to_upload=file_to_upload)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# check that there is one GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 1)
# check that there is no TimeSeriesLogicalFile object
self.assertEqual(TimeSeriesLogicalFile.objects.count(), 0)
# trying to set this invalid csv file to timeseries file type should raise
# ValidationError
with self.assertRaises(ValidationError):
TimeSeriesLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
# test that the invalid file did not get deleted
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is still associated with generic logical file
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
self.composite_resource.delete()
def _get_invalid_csv_file_obj(self, invalid_csv_file_name):
invalid_csv_file = 'hs_app_timeseries/tests/{}'.format(invalid_csv_file_name)
target_temp_csv_file = os.path.join(self.temp_dir, invalid_csv_file_name)
shutil.copy(invalid_csv_file, target_temp_csv_file)
return open(target_temp_csv_file, 'r')
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains container classes to represent different protocol buffer types.
This file defines container classes which represent categories of protocol
buffer field types which need extra maintenance. Currently these categories
are:
- Repeated scalar fields - These are all repeated fields which aren't
composite (e.g. they are of simple types like int32, string, etc).
- Repeated composite fields - Repeated fields which are composite. This
includes groups and nested messages.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import collections
import sys
if sys.version_info[0] < 3:
# We would use collections.MutableMapping all the time, but in Python 2 it
# doesn't define __slots__. This causes two significant problems:
#
# 1. we can't disallow arbitrary attribute assignment, even if our derived
# classes *do* define __slots__.
#
# 2. we can't safely derive a C type from it without __slots__ defined (the
# interpreter expects to find a dict at tp_dictoffset, which we can't
# robustly provide. And we don't want an instance dict anyway.
#
# So this is the Python 2.7 definition of Mapping/MutableMapping functions
# verbatim, except that:
# 1. We declare __slots__.
# 2. We don't declare this as a virtual base class. The classes defined
# in collections are the interesting base classes, not us.
#
# Note: deriving from object is critical. It is the only thing that makes
# this a true type, allowing us to derive from it in C++ cleanly and making
# __slots__ properly disallow arbitrary element assignment.
class Mapping(object):
__slots__ = ()
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
return iter(self)
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield (key, self[key])
def keys(self):
return list(self)
def items(self):
return [(key, self[key]) for key in self]
def values(self):
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, collections.Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MutableMapping(Mapping):
__slots__ = ()
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
collections.Mapping.register(Mapping)
collections.MutableMapping.register(MutableMapping)
else:
# In Python 3 we can just use MutableMapping directly, because it defines
# __slots__.
MutableMapping = collections.MutableMapping
class BaseContainer(object):
"""Base container class."""
# Minimizes memory usage and disallows assignment to other attributes.
__slots__ = ['_message_listener', '_values']
def __init__(self, message_listener):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified.
"""
self._message_listener = message_listener
self._values = []
def __getitem__(self, key):
"""Retrieves item by the specified key."""
return self._values[key]
def __len__(self):
"""Returns the number of elements in the container."""
return len(self._values)
def __ne__(self, other):
"""Checks if another instance isn't equal to this one."""
# The concrete classes should define __eq__.
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __repr__(self):
return repr(self._values)
def sort(self, *args, **kwargs):
# Continue to support the old sort_function keyword argument.
# This is expected to be a rare occurrence, so use LBYL to avoid
# the overhead of actually catching KeyError.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._values.sort(*args, **kwargs)
class RepeatedScalarFieldContainer(BaseContainer):
"""Simple, type-checked, list-like container for holding repeated scalars."""
# Disallows assignment to other attributes.
__slots__ = ['_type_checker']
def __init__(self, message_listener, type_checker):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified.
type_checker: A type_checkers.ValueChecker instance to run on elements
inserted into this container.
"""
super(RepeatedScalarFieldContainer, self).__init__(message_listener)
self._type_checker = type_checker
def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self._values.append(self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified()
def insert(self, key, value):
"""Inserts the item at the specified position. Similar to list.insert()."""
self._values.insert(key, self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified()
def extend(self, elem_seq):
"""Extends by appending the given iterable. Similar to list.extend()."""
if elem_seq is None:
return
try:
elem_seq_iter = iter(elem_seq)
except TypeError:
if not elem_seq:
# silently ignore falsy inputs :-/.
# TODO(user): Deprecate this behavior. b/18413862
return
raise
new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter]
if new_values:
self._values.extend(new_values)
self._message_listener.Modified()
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
"""
self._values.extend(other._values)
self._message_listener.Modified()
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified()
def pop(self, key=-1):
"""Removes and returns an item at a given index. Similar to list.pop()."""
value = self._values[key]
self.__delitem__(key)
return value
def __setitem__(self, key, value):
"""Sets the item on the specified position."""
if isinstance(key, slice): # PY3
if key.step is not None:
raise ValueError('Extended slices not supported')
self.__setslice__(key.start, key.stop, value)
else:
self._values[key] = self._type_checker.CheckValue(value)
self._message_listener.Modified()
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __setslice__(self, start, stop, values):
"""Sets the subset of items from between the specified indices."""
new_values = []
for value in values:
new_values.append(self._type_checker.CheckValue(value))
self._values[start:stop] = new_values
self._message_listener.Modified()
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.Modified()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.Modified()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
# Special case for the same type which should be common and fast.
if isinstance(other, self.__class__):
return other._values == self._values
# We are presumably comparing against some other sequence type.
return other == self._values
collections.MutableSequence.register(BaseContainer)
class RepeatedCompositeFieldContainer(BaseContainer):
"""Simple, list-like container for holding repeated composite fields."""
# Disallows assignment to other attributes.
__slots__ = ['_message_descriptor']
def __init__(self, message_listener, message_descriptor):
"""
Note that we pass in a descriptor instead of the generated directly,
since at the time we construct a _RepeatedCompositeFieldContainer we
haven't yet necessarily initialized the type that will be contained in the
container.
Args:
message_listener: A MessageListener implementation.
The RepeatedCompositeFieldContainer will call this object's
Modified() method when it is modified.
message_descriptor: A Descriptor instance describing the protocol type
that should be present in this container. We'll use the
_concrete_class field of this descriptor when the client calls add().
"""
super(RepeatedCompositeFieldContainer, self).__init__(message_listener)
self._message_descriptor = message_descriptor
def add(self, **kwargs):
"""Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
"""
new_element = self._message_descriptor._concrete_class(**kwargs)
new_element._SetListener(self._message_listener)
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified()
return new_element
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
message_class = self._message_descriptor._concrete_class
listener = self._message_listener
values = self._values
for message in elem_seq:
new_element = message_class()
new_element._SetListener(listener)
new_element.MergeFrom(message)
values.append(new_element)
listener.Modified()
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one, copying each individual message.
"""
self.extend(other._values)
def remove(self, elem):
"""Removes an item from the list. Similar to list.remove()."""
self._values.remove(elem)
self._message_listener.Modified()
def pop(self, key=-1):
"""Removes and returns an item at a given index. Similar to list.pop()."""
value = self._values[key]
self.__delitem__(key)
return value
def __getslice__(self, start, stop):
"""Retrieves the subset of items from between the specified indices."""
return self._values[start:stop]
def __delitem__(self, key):
"""Deletes the item at the specified position."""
del self._values[key]
self._message_listener.Modified()
def __delslice__(self, start, stop):
"""Deletes the subset of items from between the specified indices."""
del self._values[start:stop]
self._message_listener.Modified()
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
return self._values == other._values
class ScalarMap(MutableMapping):
"""Simple, type-checked, dict-like container for holding repeated scalars."""
# Disallows assignment to other attributes.
__slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener',
'_entry_descriptor']
def __init__(self, message_listener, key_checker, value_checker,
entry_descriptor):
"""
Args:
message_listener: A MessageListener implementation.
The ScalarMap will call this object's Modified() method when it
is modified.
key_checker: A type_checkers.ValueChecker instance to run on keys
inserted into this container.
value_checker: A type_checkers.ValueChecker instance to run on values
inserted into this container.
entry_descriptor: The MessageDescriptor of a map entry: key and value.
"""
self._message_listener = message_listener
self._key_checker = key_checker
self._value_checker = value_checker
self._entry_descriptor = entry_descriptor
self._values = {}
def __getitem__(self, key):
try:
return self._values[key]
except KeyError:
key = self._key_checker.CheckValue(key)
val = self._value_checker.DefaultValue()
self._values[key] = val
return val
def __contains__(self, item):
# We check the key's type to match the strong-typing flavor of the API.
# Also this makes it easier to match the behavior of the C++ implementation.
self._key_checker.CheckValue(item)
return item in self._values
# We need to override this explicitly, because our defaultdict-like behavior
# will make the default implementation (from our base class) always insert
# the key.
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def __setitem__(self, key, value):
checked_key = self._key_checker.CheckValue(key)
checked_value = self._value_checker.CheckValue(value)
self._values[checked_key] = checked_value
self._message_listener.Modified()
def __delitem__(self, key):
del self._values[key]
self._message_listener.Modified()
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
def __repr__(self):
return repr(self._values)
def MergeFrom(self, other):
self._values.update(other._values)
self._message_listener.Modified()
def InvalidateIterators(self):
# It appears that the only way to reliably invalidate iterators to
# self._values is to ensure that its size changes.
original = self._values
self._values = original.copy()
original[None] = None
# This is defined in the abstract base, but we can do it much more cheaply.
def clear(self):
self._values.clear()
self._message_listener.Modified()
def GetEntryClass(self):
return self._entry_descriptor._concrete_class
class MessageMap(MutableMapping):
"""Simple, type-checked, dict-like container for with submessage values."""
# Disallows assignment to other attributes.
__slots__ = ['_key_checker', '_values', '_message_listener',
'_message_descriptor', '_entry_descriptor']
def __init__(self, message_listener, message_descriptor, key_checker,
entry_descriptor):
"""
Args:
message_listener: A MessageListener implementation.
The ScalarMap will call this object's Modified() method when it
is modified.
key_checker: A type_checkers.ValueChecker instance to run on keys
inserted into this container.
value_checker: A type_checkers.ValueChecker instance to run on values
inserted into this container.
entry_descriptor: The MessageDescriptor of a map entry: key and value.
"""
self._message_listener = message_listener
self._message_descriptor = message_descriptor
self._key_checker = key_checker
self._entry_descriptor = entry_descriptor
self._values = {}
def __getitem__(self, key):
try:
return self._values[key]
except KeyError:
key = self._key_checker.CheckValue(key)
new_element = self._message_descriptor._concrete_class()
new_element._SetListener(self._message_listener)
self._values[key] = new_element
self._message_listener.Modified()
return new_element
def get_or_create(self, key):
"""get_or_create() is an alias for getitem (ie. map[key]).
Args:
key: The key to get or create in the map.
This is useful in cases where you want to be explicit that the call is
mutating the map. This can avoid lint errors for statements like this
that otherwise would appear to be pointless statements:
msg.my_map[key]
"""
return self[key]
# We need to override this explicitly, because our defaultdict-like behavior
# will make the default implementation (from our base class) always insert
# the key.
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def __contains__(self, item):
return item in self._values
def __setitem__(self, key, value):
raise ValueError('May not set values directly, call my_map[key].foo = 5')
def __delitem__(self, key):
del self._values[key]
self._message_listener.Modified()
def __len__(self):
return len(self._values)
def __iter__(self):
return iter(self._values)
def __repr__(self):
return repr(self._values)
def MergeFrom(self, other):
for key in other:
# According to documentation: "When parsing from the wire or when merging,
# if there are duplicate map keys the last key seen is used".
if key in self:
del self[key]
self[key].CopyFrom(other[key])
# self._message_listener.Modified() not required here, because
# mutations to submessages already propagate.
def InvalidateIterators(self):
# It appears that the only way to reliably invalidate iterators to
# self._values is to ensure that its size changes.
original = self._values
self._values = original.copy()
original[None] = None
# This is defined in the abstract base, but we can do it much more cheaply.
def clear(self):
self._values.clear()
self._message_listener.Modified()
def GetEntryClass(self):
return self._entry_descriptor._concrete_class
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_vcmp_guest import (
ModuleParameters, ApiParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
initial_image='BIGIP-12.1.0.1.0.1447-HF1.iso',
mgmt_network='bridged',
mgmt_address='1.2.3.4/24',
vlans=[
'vlan1',
'vlan2'
]
)
p = ModuleParameters(params=args)
assert p.initial_image == 'BIGIP-12.1.0.1.0.1447-HF1.iso'
assert p.mgmt_network == 'bridged'
def test_module_parameters_mgmt_bridged_without_subnet(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4'
)
p = ModuleParameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/32'
def test_module_parameters_mgmt_address_cidr(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4/24'
)
p = ModuleParameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/24'
def test_module_parameters_mgmt_address_subnet(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4/255.255.255.0'
)
p = ModuleParameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/24'
def test_module_parameters_mgmt_route(self):
args = dict(
mgmt_route='1.2.3.4'
)
p = ModuleParameters(params=args)
assert p.mgmt_route == '1.2.3.4'
def test_module_parameters_vcmp_software_image_facts(self):
# vCMP images may include a forward slash in their names. This is probably
# related to the slots on the system, but it is not a valid value to specify
# that slot when providing an initial image
args = dict(
initial_image='BIGIP-12.1.0.1.0.1447-HF1.iso/1',
)
p = ModuleParameters(params=args)
assert p.initial_image == 'BIGIP-12.1.0.1.0.1447-HF1.iso/1'
def test_api_parameters(self):
args = dict(
initialImage="BIGIP-tmos-tier2-13.1.0.0.0.931.iso",
managementGw="2.2.2.2",
managementIp="1.1.1.1/24",
managementNetwork="bridged",
state="deployed",
vlans=[
"/Common/vlan1",
"/Common/vlan2"
]
)
p = ApiParameters(params=args)
assert p.initial_image == 'BIGIP-tmos-tier2-13.1.0.0.0.931.iso'
assert p.mgmt_route == '2.2.2.2'
assert p.mgmt_address == '1.1.1.1/24'
assert '/Common/vlan1' in p.vlans
assert '/Common/vlan2' in p.vlans
def test_api_parameters_with_hotfix(self):
args = dict(
initialImage="BIGIP-14.1.0.3-0.0.6.iso",
initialHotfix="Hotfix-BIGIP-14.1.0.3.0.5.6-ENG.iso",
managementGw="2.2.2.2",
managementIp="1.1.1.1/24",
managementNetwork="bridged",
state="deployed",
vlans=[
"/Common/vlan1",
"/Common/vlan2"
]
)
p = ApiParameters(params=args)
assert p.initial_image == 'BIGIP-14.1.0.3-0.0.6.iso'
assert p.initial_hotfix == 'Hotfix-BIGIP-14.1.0.3.0.5.6-ENG.iso'
assert p.mgmt_route == '2.2.2.2'
assert p.mgmt_address == '1.1.1.1/24'
assert '/Common/vlan1' in p.vlans
assert '/Common/vlan2' in p.vlans
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
self.p1 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_vcmp_guest.ModuleParameters.initial_image_exists')
self.m1 = self.p1.start()
self.m1.return_value = True
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_vcmp_guest.ModuleParameters.initial_hotfix_exists')
self.m2 = self.p2.start()
self.m2.return_value = True
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_vcmp_guest.tmos_version')
self.p4 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_vcmp_guest.send_teem')
self.m3 = self.p3.start()
self.m3.return_value = '14.1.0'
self.m4 = self.p4.start()
self.m4.return_value = True
def tearDown(self):
self.patcher1.stop()
self.p1.stop()
self.p2.stop()
self.p3.stop()
self.p4.stop()
def test_create_vcmpguest(self, *args):
set_module_args(dict(
name="guest1",
mgmt_network="bridged",
mgmt_address="10.10.10.10/24",
initial_image="BIGIP-13.1.0.0.0.931.iso",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
mm.is_deployed = Mock(side_effect=[False, True, True, True, True])
mm.deploy_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'guest1'
def test_create_vcmpguest_with_hotfix(self, *args):
set_module_args(dict(
name="guest2",
mgmt_network="bridged",
mgmt_address="10.10.10.10/24",
initial_image="BIGIP-14.1.0.3-0.0.6.iso",
initial_hotfix="Hotfix-BIGIP-14.1.0.3.0.5.6-ENG.iso",
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
mm.is_deployed = Mock(side_effect=[False, True, True, True, True])
mm.deploy_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'guest2'
| |
# This file was edited from the Py_Shell.py found at
# http://www.daa.com.au/pipermail/pygtk/2003-October/006046.html
# Original author: Pier Carteri
# Modified by: Thiago Teixeira
#
# pyshell.py : inserts the python prompt in a gtk interface
#
import sys, code, os
import __builtin__
import gtk, gobject, pango
# these two must be same length
PS1 = ">>> "
PS2 = "... "
class Completer:
"""
Taken from rlcompleter, with readline references stripped, and a local dictionary to use.
"""
def __init__(self,locals):
self.locals = locals
def complete(self, text, state):
"""
Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def global_matches(self, text):
"""
Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names
currently defined in __main__ that match.
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist, __builtin__.__dict__.keys(), self.locals.keys()]:
for word in list:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
def attr_matches(self, text):
"""
Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions. (For class instances, class members are are also
considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m: return
expr, attr = m.group(1, 3)
object = eval(expr, self.locals, self.locals)
words = dir(object)
if hasattr(object,'__class__'):
words.append('__class__')
words = words + get_class_members(object.__class__)
matches = []
n = len(attr)
for word in words:
if word[:n] == attr and word != "__builtins__":
matches.append("%s.%s" % (expr, word))
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
class Dummy_File:
def __init__(self, buffer, tag):
"""Implements a file-like object to redirect the stream to the buffer"""
self.buffer = buffer
self.tag = tag
def write(self, text):
"""Write text into the buffer and apply self.tag"""
iter = self.buffer.get_end_iter()
self.buffer.insert_with_tags(iter,text,self.tag)
def writelines(self, l): map(self.write, l)
def flush(self): pass
def isatty(self): return 1
class PopUp:
def __init__(self, text_view, token, list, position, n_chars):
self.text_view = text_view
self.token = token
list.sort()
self.list = list
self.position = position
self.popup = gtk.Window(gtk.WINDOW_POPUP)
model = gtk.ListStore(gobject.TYPE_STRING)
frame = gtk.Frame()
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
for item in self.list:
iter = model.append()
model.set(iter, 0, item)
self.list_view = gtk.TreeView(model)
self.list_view.connect("row-activated", self.hide)
self.list_view.set_property("headers-visible", False)
selection = self.list_view.get_selection()
#selection.connect("changed",self.select_row)
selection.select_path((0,))
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("",renderer,text = 0)
self.list_view.append_column(column)
sw.add(self.list_view)
frame.add(sw)
self.popup.add(frame)
# set the width of the popup according with the length of the strings
contest = self.popup.get_pango_context()
desc = contest.get_font_description()
lang = contest.get_language()
metrics = contest.get_metrics(desc, lang)
width = pango.PIXELS(metrics.get_approximate_char_width()*n_chars)
if width > 80 : self.popup.set_size_request(width+80, 90)
else : self.popup.set_size_request(160 , 90)
self.show_popup()
def hide(self, *arg):
self.popup.hide()
def show_popup(self):
buffer = self.text_view.get_buffer()
iter = buffer.get_iter_at_mark(buffer.get_insert())
rectangle = self.text_view.get_iter_location(iter)
absX, absY = self.text_view.buffer_to_window_coords(gtk.TEXT_WINDOW_TEXT,
rectangle.x, #+rectangle.width+20 ,
rectangle.y+rectangle.height) #+20)
parent = self.text_view.get_parent()
self.popup.move(self.position[0]+absX, self.position[1]+absY)
self.popup.show_all()
def prev(self):
sel = self.list_view.get_selection()
model, iter = sel.get_selected()
newIter = model.get_path(iter)
if newIter != None and newIter[0]>0:
path = (newIter[0]-1,)
self.list_view.set_cursor(path)
def next(self):
sel = self.list_view.get_selection()
model, iter = sel.get_selected()
newIter = model.iter_next(iter)
if newIter != None:
path = model.get_path(newIter)
self.list_view.set_cursor(path)
def sel_confirmed(self):
sel = self.list_view.get_selection()
self.select_row(sel)
self.hide()
def sel_canceled(self):
self.set_text(self.token)
self.hide()
def select_row(self, selection):
model, iter = selection.get_selected()
name = model.get_value(iter, 0)
self.set_text(name)
def set_text(self, text):
buffer = self.text_view.get_buffer()
end = buffer.get_iter_at_mark(buffer.get_insert())
start = end.copy()
start.backward_char()
while start.get_char() not in "\t ,()[]=": start.backward_char()
start.forward_char()
buffer.delete(start, end)
iter = buffer.get_iter_at_mark(buffer.get_insert())
buffer.insert(iter, text)
class Shell_Gui:
def __init__(self, with_window = 1, localscope = {}):
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
t_table = gtk.TextTagTable()
# create two tags: one for std_err and one for std_out
tag_err = gtk.TextTag("error")
tag_err.set_property("foreground","red")
tag_err.set_property("font", "monospace")
t_table.add(tag_err)
tag_out = gtk.TextTag("output")
tag_out.set_property("foreground","dark green")
tag_out.set_property("font", "monospace")
t_table.add(tag_out)
tag_no_edit = gtk.TextTag("no_edit")
tag_no_edit.set_property("editable", False)
t_table.add(tag_no_edit)
self.buffer = gtk.TextBuffer(t_table)
self.buffer.set_text(PS1)
start, end = self.buffer.get_bounds()
self.buffer.apply_tag_by_name("output" , start, end)
self.buffer.apply_tag_by_name("no_edit", start, end)
self.view = gtk.TextView()
self.view.set_buffer(self.buffer)
self.view.connect("key_press_event", self.key_press)
self.view.connect("drag_data_received",self.drag_data_received)
self.view.set_wrap_mode(gtk.WRAP_CHAR)
fontdesc = pango.FontDescription("monospace")
self.view.modify_font(fontdesc)
sw.add(self.view)
# creates two dummy files
self.dummy_err = Dummy_File(self.buffer,tag_err)
self.dummy_out = Dummy_File(self.buffer,tag_out)
# creates the console
self.core = code.InteractiveConsole(locals = localscope)
self.localscope = localscope
# autocompletation capabilities
self.completer = Completer(self.core.locals)
self.popup = None
# creates history capabilities
self.history = [""]
self.history_pos = 0
if with_window:
# add buttons
b_box = gtk.Toolbar()
b_box.set_orientation(gtk.ORIENTATION_HORIZONTAL)
b_box.set_style(gtk.TOOLBAR_ICONS)
b_box.insert_stock(gtk.STOCK_CLEAR,"Clear the output", None, self.clear_or_reset, None,-1)
b_box.insert_stock(gtk.STOCK_SAVE,"Save the output", None, self.save_text, None,-1)
box = gtk.VBox()
box.pack_start(sw, expand = True, fill = True)
box.pack_start(b_box, expand = False, fill = True)
# frame = gtk.Frame(label_text)
# frame.show_all()
# frame.add(box)
self.gui = gtk.Window()
self.gui.add(box)
#self.gui.add(sw)
self.gui.connect("destroy",self.quit)
self.gui.set_default_size(520,200)
self.gui.show_all()
else:
self.gui = sw
self.gui.show_all()
def key_press(self, view, event):
# if autocomplete popup is showing
if self.popup != None:
# key: up
# action: next suggestion
if event.keyval == gtk.keysyms.Up:
self.popup.prev()
return True
# key: down
# action: previous suggestion
elif event.keyval == gtk.keysyms.Down:
self.popup.next()
return True
# TODO: add pgup/pgdown/home/end
# key: return
# action: accept suggestion
elif event.keyval == gtk.keysyms.Return:
self.popup.sel_confirmed()
self.popup = None
return True
# key: escape
# action: cancel autocomplete
elif event.keyval == gtk.keysyms.Escape:
self.popup.sel_canceled()
self.popup = None
return True
# key: any legal variable naming character
# action: update suggestions
elif ord('A') <= event.keyval <= ord('Z') or\
ord('a') <= event.keyval <= ord('z') or\
ord('0') <= event.keyval <= ord('9') or\
ord('_') == event.keyval:
self.popup.hide()
self.popup = None
self.buffer.insert_at_cursor(unichr(event.keyval))
self.complete_text(forcepopup=True)
return True
# key: anithing else
# action: hide suggestions
else:
self.popup.hide()
self.popup = None
# printable characters
if ord(' ') <= event.keyval <= ord('~'):
self.buffer.insert_at_cursor(unichr(event.keyval))
return True
# if autocomplete popup is not being shown
else:
# key: Up
# action: move up or history
if event.keyval == gtk.keysyms.Up:
# last_line = self.buffer.get_end_iter().get_line()
# cur_line = self.buffer.get_iter_at_mark(self.buffer.get_insert()).get_line()
#
# if last_line == cur_line:
if self.history_pos >= 0:
# remove text into the line...
end = self.buffer.get_end_iter()
start = self.buffer.get_iter_at_line(end.get_line())
start.forward_chars(len(PS1))
self.buffer.delete(start,end)
# insert the new text
pos = self.buffer.get_end_iter()
self.buffer.insert(pos, self.history[self.history_pos])
self.history_pos -= 1
else:
gtk.gdk.beep()
self.view.emit_stop_by_name("key-press-event")
return True
# key: Down
# action: move down or history
elif event.keyval == gtk.keysyms.Down:
# last_line = self.buffer.get_end_iter().get_line()
# cur_line = self.buffer.get_iter_at_mark(self.buffer.get_insert()).get_line()
#
# if last_line == cur_line:
if self.history_pos <= len(self.history)-1:
# remove text into the line...
end = self.buffer.get_end_iter()
start = self.buffer.get_iter_at_line(end.get_line())
start.forward_chars(len(PS1))
self.buffer.delete(start,end)
# insert the new text
pos = self.buffer.get_end_iter()
self.history_pos += 1
self.buffer.insert(pos, self.history[self.history_pos])
else:
gtk.gdk.beep()
self.view.emit_stop_by_name("key-press-event")
return True
# key: Tab
# action: indent or autocomplete
elif event.keyval == gtk.keysyms.Tab:
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
iter.backward_char()
just_add_tab = iter.get_char() in '\t\n '
iter.forward_char()
if just_add_tab: self.buffer.insert(iter, '\t')
else: self.complete_text()
return True
# key: Shift+Tab
# action: remove one level of indent
elif event.keyval == gtk.keysyms.ISO_Left_Tab:
start = self.buffer.get_iter_at_mark(self.buffer.get_insert())
if start.get_line_offset() <= len(PS1): return True
start.set_line_offset(len(PS1))
end = start.copy()
end.forward_char()
self.buffer.delete(start, end)
return True
# key: Return
# action: execute
elif event.keyval == gtk.keysyms.Return:
command = self.get_line()
if len(command) > 0 and command[0] == '?':
command = 'help(%s)' % command[1:]
elif len(command) > 0 and command[-1] == '?':
command = 'help(%s)' % command[:-1]
self.exec_code(command)
start,end = self.buffer.get_bounds()
self.buffer.apply_tag_by_name("no_edit",start,end)
self.buffer.place_cursor(end)
return True
# key: Ctrl+space
# action: autocomplete
elif event.keyval == gtk.keysyms.space and event.state & gtk.gdk.CONTROL_MASK:
self.complete_text()
return True
# key: Home
# action: go to beginning of line, but stop at '>>>'
elif event.keyval == gtk.keysyms.Home and not (event.state & gtk.gdk.SHIFT_MASK):
last_line = self.buffer.get_end_iter().get_line()
cur_line = self.buffer.get_iter_at_mark(self.buffer.get_insert()).get_line()
if last_line == cur_line:
iter = self.buffer.get_iter_at_line(cur_line)
iter.forward_chars(4)
self.buffer.place_cursor(iter)
self.view.emit_stop_by_name("key-press-event")
return True
# key: Left
# action: move left, but stop at '>>>'
elif event.keyval == gtk.keysyms.Left:
last_line = self.buffer.get_end_iter().get_line()
cur_pos = self.buffer.get_iter_at_mark(self.buffer.get_insert())
cur_line = cur_pos.get_line()
if last_line == cur_line:
if cur_pos.get_line_offset() == 4:
self.view.emit_stop_by_name("key-press-event")
return True
# # key: Ctrl + D
# # action: restart the python console
# elif (event.keyval == gtk.keysyms.d and event.state & gtk.gdk.CONTROL_MASK):
#
# self.buffer.set_text(PS1)
# start,end = self.buffer.get_bounds()
# self.buffer.apply_tag_by_name("output",start,end)
# self.buffer.apply_tag_by_name("no_edit",start,end)
#
# # creates the console
# self.core = code.InteractiveConsole(locals = self.localscope)
#
# # reset history
# self.history = [""]
# self.history_pos = 0
def clear_or_reset(self,*widget):
dlg = gtk.Dialog("Clear")
dlg.add_button("Clear",1)
dlg.add_button("Reset",2)
dlg.add_button(gtk.STOCK_CLOSE,gtk.RESPONSE_CLOSE)
dlg.set_default_size(250,150)
hbox = gtk.HBox()
# add an image
img = gtk.Image()
img.set_from_stock(gtk.STOCK_CLEAR, gtk.ICON_SIZE_DIALOG)
hbox.pack_start(img)
# add text
text = "You have two options:\n"
text += " - clear only the output window\n"
text += " - reset the shell\n"
text += "\n What do you want to do?"
label = gtk.Label(text)
hbox.pack_start(label)
hbox.show_all()
dlg.vbox.pack_start(hbox)
ans = dlg.run()
dlg.hide()
if ans == 1 : self.clear_text()
elif ans == 2 : self.reset_shell()
def clear_text(self):
self.buffer.set_text(PS1)
start,end = self.buffer.get_bounds()
self.buffer.apply_tag_by_name("output",start,end)
self.buffer.apply_tag_by_name("no_edit",start,end)
self.view.grab_focus()
def reset_shell(self):
self.buffer.set_text(PS1)
start,end = self.buffer.get_bounds()
self.buffer.apply_tag_by_name("output",start,end)
self.buffer.apply_tag_by_name("no_edit",start,end)
# creates the console
self.core = code.InteractiveConsole(locals = self.localscope)
# reset history
self.history = [""]
self.history_pos = 0
def save_text(self, *widget):
dlg = gtk.Dialog("Save to file")
dlg.add_button("Commands",1)
dlg.add_button("All",2)
dlg.add_button(gtk.STOCK_CLOSE,gtk.RESPONSE_CLOSE)
dlg.set_default_size(250,150)
hbox = gtk.HBox()
#add an image
img = gtk.Image()
img.set_from_stock(gtk.STOCK_SAVE, gtk.ICON_SIZE_DIALOG)
hbox.pack_start(img)
#add text
text = "You have two options:\n"
text += " -save only commands\n"
text += " -save all\n"
text += "\n What do you want to save?"
label = gtk.Label(text)
hbox.pack_start(label)
hbox.show_all()
dlg.vbox.pack_start(hbox)
ans = dlg.run()
dlg.hide()
if ans == 1 :
def ok_save(button, data = None):
win =button.get_toplevel()
win.hide()
name = win.get_filename()
if os.path.isfile(name):
box = gtk.MessageDialog(dlg,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,gtk.BUTTONS_YES_NO,
name+" already exists; do you want to overwrite it?"
)
ans = box.run()
box.hide()
if ans == gtk.RESPONSE_NO: return
try:
file = open(name,'w')
for i in self.history:
file.write(i)
file.write("\n")
file.close()
except Exception, x:
box = gtk.MessageDialog(dlg,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR,gtk.BUTTONS_CLOSE,
"Unable to write \n"+
name+"\n on disk \n\n%s"%(x)
)
box.run()
box.hide()
def cancel_button(button):
win.get_toplevel()
win.hide()
win = gtk.FileSelection("Save Commands...")
win.ok_button.connect_object("clicked", ok_save,win.ok_button)
win.cancel_button.connect_object("clicked", cancel_button,win.cancel_button)
win.show()
elif ans == 2:
def ok_save(button, data = None):
win =button.get_toplevel()
win.hide()
name = win.get_filename()
if os.path.isfile(name):
box = gtk.MessageDialog(dlg,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,gtk.BUTTONS_YES_NO,
name+" already exists; do you want to overwrite it?"
)
ans = box.run()
box.hide()
if ans == gtk.RESPONSE_NO:
return
try:
start,end = self.buffer.get_bounds()
text = self.buffer.get_text(start,end,0)
file = open(name,'w')
file.write(text)
file.close()
except Exception, x:
box = gtk.MessageDialog(dlg,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR,gtk.BUTTONS_CLOSE,
"Unable to write \n"+
name+"\n on disk \n\n%s"%(x)
)
box.run()
box.hide()
def cancel_button(button):
win.get_toplevel()
win.hide()
win = gtk.FileSelection("Save Log...")
win.ok_button.connect_object("clicked", ok_save,win.ok_button)
win.cancel_button.connect_object("clicked", cancel_button,win.cancel_button)
win.show()
dlg.destroy()
self.view.grab_focus()
def get_line(self):
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
line = iter.get_line()
start = self.buffer.get_iter_at_line(line)
end = start.copy()
end.forward_line()
command = self.buffer.get_text(start,end,0)
if (command[:4] == PS1 or command[:4] == PS2):
command = command[4:]
return command
def complete_text(self, forcepopup=False):
end = self.buffer.get_iter_at_mark(self.buffer.get_insert())
start = end.copy()
start.backward_char()
while start.get_char() not in "\t ,()[]=": start.backward_char()
start.forward_char()
token = self.buffer.get_text(start,end,0).strip()
completions = []
try:
p = self.completer.complete(token,len(completions))
while p != None:
completions.append(p)
p = self.completer.complete(token, len(completions))
except: return
# avoid duplicate items in 'completions'
tmp = {}
n_chars = 0
for item in completions:
dim = len(item)
if dim>n_chars: n_chars = dim
tmp[item] = None
completions = tmp.keys()
if len(completions) > 1 or forcepopup:
# show a popup
if isinstance(self.gui, gtk.ScrolledWindow):
rect = self.gui.get_allocation()
app = self.gui.window.get_root_origin()
position = (app[0]+rect.x,app[1]+rect.y)
else:
position = self.gui.window.get_root_origin()
self.popup = PopUp(self.view, token, completions, position, n_chars)
elif len(completions) == 1:
self.buffer.delete(start,end)
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
self.buffer.insert(iter,completions[0])
def replace_line(self, text):
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
line = iter.get_line()
start = self.buffer.get_iter_at_line(line)
start.forward_chars(4)
end = start.copy()
end.forward_line()
self.buffer.delete(start,end)
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
self.buffer.insert(iter, text)
def redirectstd(self):
"""switch stdin stdout stderr to my dummy files"""
self.std_out_saved = sys.stdout
self.std_err_saved = sys.stderr
sys.stdout = self.dummy_out
sys.stderr = self.dummy_err
def restorestd(self):
"""switch my dummy files to stdin stdout stderr """
sys.stdout = self.std_out_saved
sys.stderr = self.std_err_saved
def drag_data_received(self, source, drag_context, n1, n2, selection_data, long1, long2):
print selection_data.data
def exec_code(self, text):
"""Execute text into the console and display the output into TextView"""
# update history
self.history.append(text)
self.history_pos = len(self.history)-1
self.redirectstd()
sys.stdout.write("\n")
action = self.core.push(text)
if action == 0:
sys.stdout.write(PS1)
elif action == 1:
sys.stdout.write(PS2)
self.restorestd()
self.view.scroll_mark_onscreen(self.buffer.get_insert())
def quit(self,*args):
if __name__ == '__main__':
gtk.main_quit()
else:
if self.popup != None:
self.popup.hide()
self.gui.hide()
if __name__ == '__main__':
shell = Shell_Gui(with_window = 1)
gtk.main()
| |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from pathlib import Path
import neurom.geom.transform as gtr
import numpy as np
from neurom import load_neuron
from neurom.features import neuritefunc as _nf
from nose import tools as nt
TEST_UVEC = np.array([0.01856633, 0.37132666, 0.92831665])
TEST_ANGLE = np.pi / 3.
DATA_PATH = Path(__file__).parent.parent.parent.parent / 'test_data'
H5_NRN_PATH = DATA_PATH / 'h5/v1/Neuron.h5'
SWC_NRN_PATH = DATA_PATH / 'swc/Neuron.swc'
def _Rx(angle):
sn = np.sin(angle)
cs = np.cos(angle)
return np.array([[1., 0., 0.],
[0., cs, -sn],
[0., sn, cs]])
def _Ry(angle):
sn = np.sin(angle)
cs = np.cos(angle)
return np.array([[cs, 0., sn],
[0., 1., 0.],
[-sn, 0., cs]])
def _Rz(angle):
sn = np.sin(angle)
cs = np.cos(angle)
return np.array([[cs, -sn, 0.],
[sn, cs, 0.],
[0., 0., 1.]])
@nt.raises(NotImplementedError)
def test_not_implemented_transform_call_raises():
class Dummy(gtr.Transform3D):
pass
d = Dummy()
d([1, 2, 3])
@nt.raises(NotImplementedError)
def test_translate_bad_type_raises():
gtr.translate("hello", [1, 2, 3])
@nt.raises(NotImplementedError)
def test_rotate_bad_type_raises():
gtr.rotate("hello", [1, 0, 0], math.pi)
def test_translate_point():
t = gtr.Translation([100, -100, 100])
point = [1, 2, 3]
nt.assert_equal(t(point).tolist(), [101, -98, 103])
def test_translate_points():
t = gtr.Translation([100, -100, 100])
points = np.array([[1, 2, 3], [11, 22, 33], [111, 222, 333]])
nt.assert_true(np.all(t(points) == np.array([[101, -98, 103],
[111, -78, 133],
[211, 122, 433]])))
ROT_90 = np.array([[0, -1, 0],
[1, 0, 0],
[0, 0, 1]])
ROT_180 = np.array([[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]])
ROT_270 = np.array([[0, 1, 0],
[-1, 0, 0],
[0, 0, 1]])
def test_rotate_point():
rot = gtr.Rotation(ROT_90)
nt.assert_equal(rot([2, 0, 0]).tolist(), [0, 2, 0])
nt.assert_equal(rot([0, 2, 0]).tolist(), [-2, 0, 0])
nt.assert_equal(rot([0, 0, 2]).tolist(), [0, 0, 2])
rot = gtr.Rotation(ROT_180)
nt.assert_equal(rot([2, 0, 0]).tolist(), [-2, 0, 0])
nt.assert_equal(rot([0, 2, 0]).tolist(), [0, -2, 0])
nt.assert_equal(rot([0, 0, 2]).tolist(), [0, 0, 2])
rot = gtr.Rotation(ROT_270)
nt.assert_equal(rot([2, 0, 0]).tolist(), [0, -2, 0])
nt.assert_equal(rot([0, 2, 0]).tolist(), [2, 0, 0])
nt.assert_equal(rot([0, 0, 2]).tolist(), [0, 0, 2])
def test_rotate_points():
rot = gtr.Rotation(ROT_90)
points = np.array([[2, 0, 0],
[0, 2, 0],
[0, 0, 2],
[3, 0, 3]])
nt.assert_true(np.all(rot(points) == np.array([[0, 2, 0],
[-2, 0, 0],
[0, 0, 2],
[0, 3, 3]])))
rot = gtr.Rotation(ROT_180)
nt.assert_true(np.all(rot(points) == np.array([[-2, 0, 0],
[0, -2, 0],
[0, 0, 2],
[-3, 0, 3]])))
rot = gtr.Rotation(ROT_270)
nt.assert_true(np.all(rot(points) == np.array([[0, -2, 0],
[2, 0, 0],
[0, 0, 2],
[0, -3, 3]])))
def test_pivot_rotate_point():
point = [1, 2, 3]
new_orig = np.array([10., 45., 50.])
t = gtr.Translation(new_orig)
t_inv = gtr.Translation(new_orig * -1)
R = gtr._rodrigues_to_dcm(TEST_UVEC, np.pi)
# change origin, rotate 180
p1 = gtr.PivotRotation(R, new_orig)(point)
# do the steps manually
p2 = t_inv(point)
p2 = gtr.Rotation(R)(p2)
p2 = t(p2)
nt.assert_equal(p1.tolist(), p2.tolist())
def test_pivot_rotate_points():
points = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
new_orig = np.array([10., 45., 50.])
t = gtr.Translation(new_orig)
t_inv = gtr.Translation(new_orig * -1)
R = gtr._rodrigues_to_dcm(TEST_UVEC, np.pi)
# change origin, rotate 180
p1 = gtr.PivotRotation(R, new_orig)(points)
# do the steps manually
p2 = t_inv(points)
p2 = gtr.Rotation(R)(p2)
p2 = t(p2)
nt.assert_true(np.all(p1 == p2))
def _check_fst_nrn_translate(nrn_a, nrn_b, t):
# soma points
nt.assert_true(np.allclose((nrn_b.soma.points[:, 0:3] - nrn_a.soma.points[:, 0:3]), t))
_check_fst_neurite_translate(nrn_a.neurites, nrn_b.neurites, t)
def _check_fst_neurite_translate(nrts_a, nrts_b, t):
# neurite sections
for sa, sb in zip(_nf.iter_sections(nrts_a),
_nf.iter_sections(nrts_b)):
nt.assert_true(np.allclose((sb.points[:, 0:3] - sa.points[:, 0:3]), t))
def test_translate_fst_neuron_swc():
t = np.array([100., 100., 100.])
nrn = load_neuron(SWC_NRN_PATH)
tnrn = gtr.translate(nrn, t)
_check_fst_nrn_translate(nrn, tnrn, t)
def test_translate_fst_neurite_swc():
t = np.array([100., 100., 100.])
nrn = load_neuron(SWC_NRN_PATH)
nrt_a = nrn.neurites[0]
nrt_b = gtr.translate(nrt_a, t)
_check_fst_neurite_translate(nrt_a, nrt_b, t)
def test_transform_translate_neuron_swc():
t = np.array([100., 100., 100.])
nrn = load_neuron(SWC_NRN_PATH)
tnrn = nrn.transform(gtr.Translation(t))
_check_fst_nrn_translate(nrn, tnrn, t)
def test_translate_fst_neuron_h5():
t = np.array([100., 100., 100.])
nrn = load_neuron(H5_NRN_PATH)
tnrn = gtr.translate(nrn, t)
_check_fst_nrn_translate(nrn, tnrn, t)
def test_translate_fst_neurite_h5():
t = np.array([100., 100., 100.])
nrn = load_neuron(H5_NRN_PATH)
nrt_a = nrn.neurites[0]
nrt_b = gtr.translate(nrt_a, t)
_check_fst_neurite_translate(nrt_a, nrt_b, t)
def test_transform_translate_neuron_h5():
t = np.array([100., 100., 100.])
nrn = load_neuron(H5_NRN_PATH)
tnrn = nrn.transform(gtr.Translation(t))
_check_fst_nrn_translate(nrn, tnrn, t)
def _apply_rot(points, rot_mat):
return np.dot(rot_mat, np.array(points).T).T
def _check_fst_nrn_rotate(nrn_a, nrn_b, rot_mat):
# soma points
nt.assert_true(np.allclose(_apply_rot(nrn_a.soma.points[:, 0:3], rot_mat),
nrn_b.soma.points[:, 0:3]))
# neurite sections
_check_fst_neurite_rotate(nrn_a.neurites, nrn_b.neurites, rot_mat)
def _check_fst_neurite_rotate(nrt_a, nrt_b, rot_mat):
for sa, sb in zip(_nf.iter_sections(nrt_a),
_nf.iter_sections(nrt_b)):
nt.assert_true(np.allclose(sb.points[:, 0:3],
_apply_rot(sa.points[:, 0:3], rot_mat)))
def test_rotate_neuron_swc():
nrn_a = load_neuron(SWC_NRN_PATH)
nrn_b = gtr.rotate(nrn_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_nrn_rotate(nrn_a, nrn_b, rot)
def test_rotate_neurite_swc():
nrn_a = load_neuron(SWC_NRN_PATH)
nrt_a = nrn_a.neurites[0]
nrt_b = gtr.rotate(nrt_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_neurite_rotate(nrt_a, nrt_b, rot)
def test_transform_rotate_neuron_swc():
rot = gtr.Rotation(ROT_90)
nrn_a = load_neuron(SWC_NRN_PATH)
nrn_b = nrn_a.transform(rot)
_check_fst_nrn_rotate(nrn_a, nrn_b, ROT_90)
def test_rotate_neuron_h5():
nrn_a = load_neuron(H5_NRN_PATH)
nrn_b = gtr.rotate(nrn_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_nrn_rotate(nrn_a, nrn_b, rot)
def test_rotate_neurite_h5():
nrn_a = load_neuron(H5_NRN_PATH)
nrt_a = nrn_a.neurites[0]
nrt_b = gtr.rotate(nrt_a, [0, 0, 1], math.pi/2.0)
rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)
_check_fst_neurite_rotate(nrt_a, nrt_b, rot)
def test_transform_rotate_neuron_h5():
rot = gtr.Rotation(ROT_90)
nrn_a = load_neuron(H5_NRN_PATH)
nrn_b = nrn_a.transform(rot)
_check_fst_nrn_rotate(nrn_a, nrn_b, ROT_90)
def test_rodrigues_to_dcm():
RES = np.array([[0.50017235, -0.80049871, 0.33019604],
[0.80739289, 0.56894174, 0.15627544],
[-0.3129606, 0.18843328, 0.9308859]])
R = gtr._rodrigues_to_dcm(TEST_UVEC, TEST_ANGLE)
# assess rotation matrix properties:
# detR = +=1
nt.assert_almost_equal(np.linalg.det(R), 1.)
# R.T = R^-1
nt.assert_true(np.allclose(np.linalg.inv(R), R.transpose()))
# check against calculated matrix
nt.assert_true(np.allclose(R, RES))
# check if opposite sign generates inverse
Rinv = gtr._rodrigues_to_dcm(TEST_UVEC, -TEST_ANGLE)
nt.assert_true(np.allclose(np.dot(Rinv, R), np.identity(3)))
# check basic rotations with a range of angles
for angle in np.linspace(0., 2. * np.pi, 10):
Rx = gtr._rodrigues_to_dcm(np.array([1., 0., 0.]), angle)
Ry = gtr._rodrigues_to_dcm(np.array([0., 1., 0.]), angle)
Rz = gtr._rodrigues_to_dcm(np.array([0., 0., 1.]), angle)
nt.assert_true(np.allclose(Rx, _Rx(angle)))
nt.assert_true(np.allclose(Ry, _Ry(angle)))
nt.assert_true(np.allclose(Rz, _Rz(angle)))
| |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.directtools.DirectGeometry import LineNodePath
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.fishing import FishGlobals
from toontown.shtiker import FishPage
from toontown.toonbase import TTLocalizer
from toontown.quest import Quests
from direct.actor import Actor
from direct.showutil import Rope
import math
from direct.task.Task import Task
import random
import random
from toontown.fishing import FishingTargetGlobals
from toontown.fishing import FishBase
from toontown.fishing import FishPanel
from toontown.effects import Ripples
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownTimer
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.hood import ZoneUtil
from toontown.toontowngui import TeaserPanel
class DistributedFishingSpot(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedFishingSpot')
vZeroMax = 25.0
angleMax = 30.0
def __init__(self, cr):
if hasattr(self, 'fishInit'):
return
self.fishInit = 1
DistributedObject.DistributedObject.__init__(self, cr)
self.lastAvId = 0
self.lastFrame = 0
self.avId = 0
self.av = None
self.placedAvatar = 0
self.localToonFishing = 0
self.nodePath = None
self.collSphere = None
self.collNode = None
self.collNodePath = None
self.castTrack = None
self.pond = None
self.guiTrack = None
self.madeGui = 0
self.castGui = None
self.itemGui = None
self.pole = None
self.line = None
self.poleNode = []
self.ptop = None
self.bob = None
self.bobBobTask = None
self.splashSounds = None
self.ripples = None
self.line = None
self.lineSphere = None
self.power = 0.0
self.startAngleNP = 0
self.firstCast = 1
self.fishPanel = None
self.fsm = ClassicFSM.ClassicFSM('DistributedFishingSpot', [State.State('off', self.enterOff, self.exitOff, ['waiting',
'distCasting',
'fishing',
'reward',
'leaving']),
State.State('waiting', self.enterWaiting, self.exitWaiting, ['localAdjusting',
'distCasting',
'leaving',
'sellFish']),
State.State('localAdjusting', self.enterLocalAdjusting, self.exitLocalAdjusting, ['localCasting', 'leaving']),
State.State('localCasting', self.enterLocalCasting, self.exitLocalCasting, ['localAdjusting', 'fishing', 'leaving']),
State.State('distCasting', self.enterDistCasting, self.exitDistCasting, ['fishing', 'leaving', 'reward']),
State.State('fishing', self.enterFishing, self.exitFishing, ['localAdjusting',
'distCasting',
'waitForAI',
'reward',
'leaving']),
State.State('sellFish', self.enterSellFish, self.exitSellFish, ['waiting', 'leaving']),
State.State('waitForAI', self.enterWaitForAI, self.exitWaitForAI, ['reward', 'leaving']),
State.State('reward', self.enterReward, self.exitReward, ['localAdjusting',
'distCasting',
'leaving',
'sellFish']),
State.State('leaving', self.enterLeaving, self.exitLeaving, [])], 'off', 'off')
self.fsm.enterInitialState()
return
def disable(self):
self.ignore(self.uniqueName('enterFishingSpotSphere'))
self.setOccupied(0)
self.avId = 0
if self.castTrack != None:
if self.castTrack.isPlaying():
self.castTrack.finish()
self.castTrack = None
if self.guiTrack != None:
if self.guiTrack.isPlaying():
self.guiTrack.finish()
self.guiTrack = None
self.__hideBob()
self.nodePath.detachNode()
self.__unmakeGui()
self.pond.stopCheckingTargets()
self.pond = None
for event in self.getAllAccepting():
if event.startswith('generate-'):
self.ignore(event)
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
if hasattr(self, 'fishDeleted'):
return
self.fishDeleted = 1
del self.pond
del self.fsm
if self.nodePath:
self.nodePath.removeNode()
del self.nodePath
DistributedObject.DistributedObject.delete(self)
if self.ripples:
self.ripples.destroy()
def generateInit(self):
DistributedObject.DistributedObject.generateInit(self)
self.nodePath = NodePath(self.uniqueName('FishingSpot'))
self.angleNP = self.nodePath.attachNewNode(self.uniqueName('FishingSpotAngleNP'))
self.collSphere = CollisionSphere(0, 0, 0, self.getSphereRadius())
self.collSphere.setTangible(0)
self.collNode = CollisionNode(self.uniqueName('FishingSpotSphere'))
self.collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.nodePath.attachNewNode(self.collNode)
self.bobStartPos = Point3(0.0, 3.0, 8.5)
def generate(self):
DistributedObject.DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.nodePath.reparentTo(self.getParentNodePath())
self.accept(self.uniqueName('enterFishingSpotSphere'), self.__handleEnterSphere)
def setPondDoId(self, pondDoId):
self.pondDoId = pondDoId
if pondDoId in self.cr.doId2do:
self.setPond(self.cr.doId2do[pondDoId])
else:
self.acceptOnce('generate-%d' % pondDoId, self.setPond)
def setPond(self, pond):
self.pond = pond
self.area = self.pond.getArea()
self.waterLevel = FishingTargetGlobals.getWaterLevel(self.area)
def allowedToEnter(self):
if hasattr(base, 'ttAccess') and base.ttAccess and base.ttAccess.canAccess():
return True
return False
def handleOkTeaser(self):
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def __handleEnterSphere(self, collEntry):
if self.allowedToEnter():
if base.localAvatar.doId == self.lastAvId and globalClock.getFrameCount() <= self.lastFrame + 1:
self.notify.debug('Ignoring duplicate entry for avatar.')
return
if base.localAvatar.hp > 0 and base.cr.playGame.getPlace().fsm.getCurrentState().getName() != 'fishing':
self.cr.playGame.getPlace().detectedFishingCollision()
self.d_requestEnter()
else:
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('stopped')
self.dialog = TeaserPanel.TeaserPanel(pageName='fishing', doneFunc=self.handleOkTeaser)
def d_requestEnter(self):
self.sendUpdate('requestEnter', [])
def rejectEnter(self):
self.cr.playGame.getPlace().setState('walk')
def d_requestExit(self):
self.sendUpdate('requestExit', [])
def d_doCast(self, power, heading):
self.sendUpdate('doCast', [power, heading])
def getSphereRadius(self):
return 1.5
def getParentNodePath(self):
return render
def setPosHpr(self, x, y, z, h, p, r):
self.nodePath.setPosHpr(x, y, z, h, p, r)
self.angleNP.setH(render, self.nodePath.getH(render))
def setOccupied(self, avId):
if avId and avId not in self.cr.doId2do:
def tryAgain(av):
def reposition(task):
self.setOccupied(avId)
return task.done
taskMgr.doMethodLater(0.1, reposition, self.uniqueName('reposition'))
self.acceptOnce('generate-%d' % avId, tryAgain)
return
if self.av != None:
if not self.av.isEmpty():
self.__dropPole()
self.av.loop('neutral')
self.av.setParent(ToontownGlobals.SPRender)
self.av.startSmooth()
self.ignore(self.av.uniqueName('disable'))
self.__hideBob()
self.fsm.requestFinalState()
self.__removePole()
self.av = None
self.placedAvatar = 0
self.angleNP.setH(render, self.nodePath.getH(render))
self.__hideLine()
wasLocalToon = self.localToonFishing
self.lastAvId = self.avId
self.lastFrame = globalClock.getFrameCount()
self.avId = avId
self.localToonFishing = 0
if self.avId == 0:
self.collSphere.setTangible(0)
else:
self.collSphere.setTangible(1)
if self.avId == base.localAvatar.doId:
base.setCellsActive(base.bottomCells, 0)
self.localToonFishing = 1
if base.wantBingo:
self.pond.setLocalToonSpot(self)
self.av = self.cr.doId2do.get(self.avId)
self.__loadStuff()
self.placedAvatar = 0
self.firstCast = 1
self.acceptOnce(self.av.uniqueName('disable'), self.__avatarGone)
self.av.stopSmooth()
self.av.wrtReparentTo(self.angleNP)
self.av.setAnimState('neutral', 1.0)
self.createCastTrack()
if wasLocalToon and not self.localToonFishing:
self.__hideCastGui()
if base.wantBingo:
self.pond.setLocalToonSpot()
base.setCellsActive([base.bottomCells[1], base.bottomCells[2]], 1)
base.setCellsActive(base.rightCells, 1)
place = base.cr.playGame.getPlace()
if place:
place.setState('walk')
return
def __avatarGone(self):
self.setOccupied(0)
def setMovie(self, mode, code, itemDesc1, itemDesc2, itemDesc3, power, h):
if self.av == None:
return
if mode == FishGlobals.NoMovie:
pass
elif mode == FishGlobals.EnterMovie:
self.fsm.request('waiting')
elif mode == FishGlobals.ExitMovie:
self.fsm.request('leaving')
elif mode == FishGlobals.CastMovie:
if not self.localToonFishing:
self.fsm.request('distCasting', [power, h])
elif mode == FishGlobals.PullInMovie:
self.fsm.request('reward', [code,
itemDesc1,
itemDesc2,
itemDesc3])
return
def getStareAtNodeAndOffset(self):
return (self.nodePath, Point3())
def __loadStuff(self):
rodId = self.av.getFishingRod()
rodPath = FishGlobals.RodFileDict.get(rodId)
if not rodPath:
self.notify.warning('Rod id: %s model not found' % rodId)
rodPath = RodFileDict[0]
self.pole = Actor.Actor()
self.pole.loadModel(rodPath)
self.pole.loadAnims({'cast': 'phase_4/models/props/fishing-pole-chan'})
self.pole.pose('cast', 0)
self.ptop = self.pole.find('**/joint_attachBill')
if self.line == None:
self.line = Rope.Rope(self.uniqueName('Line'))
self.line.setColor(1, 1, 1, 0.4)
self.line.setTransparency(1)
self.lineSphere = BoundingSphere(Point3(-0.6, -2, -5), 5.5)
if self.bob == None:
self.bob = loader.loadModel('phase_4/models/props/fishing_bob')
self.bob.setScale(1.5)
self.ripples = Ripples.Ripples(self.nodePath)
self.ripples.setScale(0.4)
self.ripples.hide()
if self.splashSounds == None:
self.splashSounds = (base.loadSfx('phase_4/audio/sfx/TT_splash1.ogg'), base.loadSfx('phase_4/audio/sfx/TT_splash2.ogg'))
return
def __placeAvatar(self):
if not self.placedAvatar:
self.placedAvatar = 1
self.__holdPole()
self.av.setPosHpr(0, 0, 0, 0, 0, 0)
def __holdPole(self):
if self.poleNode != []:
self.__dropPole()
np = NodePath('pole-holder')
hands = self.av.getRightHands()
for h in hands:
self.poleNode.append(np.instanceTo(h))
self.pole.reparentTo(self.poleNode[0])
def __dropPole(self):
self.__hideBob()
self.__hideLine()
if self.pole != None:
self.pole.clearMat()
self.pole.detachNode()
for pn in self.poleNode:
pn.removeNode()
self.poleNode = []
return
def __removePole(self):
self.pole.cleanup()
self.pole.removeNode()
self.poleNode = []
self.ptop.removeNode()
self.pole = None
self.ptop = None
return
def __showLineWaiting(self):
self.line.setup(4, ((None, (0, 0, 0)),
(None, (0, -2, -4)),
(self.bob, (0, -1, 0)),
(self.bob, (0, 0, 0))))
self.line.ropeNode.setBounds(self.lineSphere)
self.line.reparentTo(self.ptop)
return
def __showLineCasting(self):
self.line.setup(2, ((None, (0, 0, 0)), (self.bob, (0, 0, 0))))
self.line.ropeNode.setBounds(self.lineSphere)
self.line.reparentTo(self.ptop)
return
def __showLineReeling(self):
self.line.setup(2, ((None, (0, 0, 0)), (self.bob, (0, 0, 0))))
self.line.ropeNode.setBounds(self.lineSphere)
self.line.reparentTo(self.ptop)
return
def __hideLine(self):
if self.line:
self.line.detachNode()
def __showBobFloat(self):
self.__hideBob()
self.bob.reparentTo(self.angleNP)
self.ripples.reparentTo(self.angleNP)
self.ripples.setPos(self.bob.getPos())
self.ripples.setZ(self.waterLevel + 0.025)
self.ripples.play()
splashSound = random.choice(self.splashSounds)
base.playSfx(splashSound, volume=0.8, node=self.bob)
self.bobBobTask = taskMgr.add(self.__doBobBob, self.taskName('bob'))
def __hideBob(self):
if self.bob:
self.bob.detachNode()
if self.bobBobTask:
taskMgr.remove(self.bobBobTask)
self.bobBobTask = None
if self.ripples:
self.ripples.stop()
self.ripples.detachNode()
return
def __doBobBob(self, task):
z = math.sin(task.time * 1.8) * 0.08
self.bob.setZ(self.waterLevel + z)
return Task.cont
def __userExit(self, event = None):
if self.localToonFishing:
self.fsm.request('leaving')
self.d_requestExit()
def __sellFish(self, result = None):
if self.localToonFishing:
if result == DGG.DIALOG_OK:
self.sendUpdate('sellFish', [])
for button in self.sellFishDialog.buttonList:
button['state'] = DGG.DISABLED
else:
self.fsm.request('leaving')
self.d_requestExit()
def __sellFishConfirm(self, result = None):
if self.localToonFishing:
self.fsm.request('waiting', [False])
def __showCastGui(self):
self.__hideCastGui()
self.__makeGui()
self.castButton.show()
self.arrow.hide()
self.exitButton.show()
self.timer.show()
self.__updateFishTankGui()
self.castGui.reparentTo(aspect2d)
self.castButton['state'] = DGG.NORMAL
self.jar['text'] = str(self.av.getMoney())
self.accept(localAvatar.uniqueName('moneyChange'), self.__moneyChange)
self.accept(localAvatar.uniqueName('fishTankChange'), self.__updateFishTankGui)
target = base.cr.doFind('DistributedTarget')
if target:
target.hideGui()
if base.wantBingo:
self.__setBingoCastGui()
def requestLocalAdjusting(mouseEvent):
if self.av.isFishTankFull() and self.__allowSellFish():
self.fsm.request('sellFish')
else:
self.fsm.request('localAdjusting')
def requestLocalCasting(mouseEvent):
if not (self.av.isFishTankFull() and self.__allowSellFish()):
self.fsm.request('localCasting')
self.castButton.bind(DGG.B1PRESS, requestLocalAdjusting)
self.castButton.bind(DGG.B3PRESS, requestLocalAdjusting)
self.castButton.bind(DGG.B1RELEASE, requestLocalCasting)
self.castButton.bind(DGG.B3RELEASE, requestLocalCasting)
if self.firstCast and len(self.av.fishCollection) == 0 and len(self.av.fishTank) == 0:
self.__showHowTo(TTLocalizer.FishingHowToFirstTime)
elif base.wantBingo and self.pond.hasPondBingoManager() and not self.av.bFishBingoTutorialDone:
if self.pond.getPondBingoManager().state != 'Off':
self.__showHowTo(TTLocalizer.FishBingoHelpMain)
self.av.sendUpdate('setFishBingoTutorialDone', [True])
self.av.bFishBingoTutorialDone = True
def __moneyChange(self, money):
self.jar['text'] = str(money)
def __initCastGui(self):
self.timer.countdown(FishGlobals.CastTimeout)
def __showQuestItem(self, itemId):
self.__makeGui()
itemName = Quests.getItemName(itemId)
self.itemLabel['text'] = itemName
self.itemGui.reparentTo(aspect2d)
self.itemPackage.show()
self.itemJellybean.hide()
self.itemBoot.hide()
def __showBootItem(self):
self.__makeGui()
itemName = TTLocalizer.FishingBootItem
self.itemLabel['text'] = itemName
self.itemGui.reparentTo(aspect2d)
self.itemBoot.show()
self.itemJellybean.hide()
self.itemPackage.hide()
def __setItemLabel(self):
if self.pond.hasPondBingoManager():
self.itemLabel['text'] = str(itemName + '\n\n' + 'BINGO WILDCARD')
else:
self.itemLabel['text'] = itemName
def __showJellybeanItem(self, amount):
self.__makeGui()
itemName = TTLocalizer.FishingJellybeanItem % amount
self.itemLabel['text'] = itemName
self.itemGui.reparentTo(aspect2d)
self.jar['text'] = str(self.av.getMoney())
self.itemJellybean.show()
self.itemBoot.hide()
self.itemPackage.hide()
def __showFishItem(self, code, fish):
self.fishPanel = FishPanel.FishPanel(fish)
self.__setFishItemPos()
self.fishPanel.setSwimBounds(-0.3, 0.3, -0.235, 0.25)
self.fishPanel.setSwimColor(1.0, 1.0, 0.74901, 1.0)
self.fishPanel.load()
self.fishPanel.show(code)
self.__updateFishTankGui()
def __setFishItemPos(self):
if base.wantBingo:
if self.pond.hasPondBingoManager():
self.fishPanel.setPos(0.65, 0, 0.4)
else:
self.fishPanel.setPos(0, 0, 0.5)
else:
self.fishPanel.setPos(0, 0, 0.5)
def __updateFishTankGui(self):
fishTank = self.av.getFishTank()
lenFishTank = len(fishTank)
maxFishTank = self.av.getMaxFishTank()
self.bucket['text'] = '%s/%s' % (lenFishTank, maxFishTank)
def __showFailureReason(self, code):
self.__makeGui()
reason = ''
if code == FishGlobals.OverTankLimit:
reason = TTLocalizer.FishingOverTankLimit
self.failureDialog.setMessage(reason)
self.failureDialog.show()
def __showSellFishDialog(self):
self.__makeGui()
self.sellFishDialog.show()
def __hideSellFishDialog(self):
self.__makeGui()
self.sellFishDialog.hide()
def __showSellFishConfirmDialog(self, numFishCaught):
self.__makeGui()
msg = TTLocalizer.STOREOWNER_TROPHY % (numFishCaught, FishGlobals.getTotalNumFish())
self.sellFishConfirmDialog.setMessage(msg)
self.sellFishConfirmDialog.show()
def __hideSellFishConfirmDialog(self):
self.__makeGui()
self.sellFishConfirmDialog.hide()
def __showBroke(self):
self.__makeGui()
self.brokeDialog.show()
self.castButton['state'] = DGG.DISABLED
def __showHowTo(self, message):
self.__makeGui()
self.howToDialog.setMessage(message)
self.howToDialog.show()
def __hideHowTo(self, event = None):
self.__makeGui()
self.howToDialog.hide()
def __showFishTankFull(self):
self.__makeGui()
self.__showFailureReason(FishGlobals.OverTankLimit)
self.castButton['state'] = DGG.DISABLED
def __hideCastGui(self):
target = base.cr.doFind('DistributedTarget')
if target:
target.showGui()
if self.madeGui:
self.timer.hide()
self.castGui.detachNode()
self.itemGui.detachNode()
self.failureDialog.hide()
self.sellFishDialog.hide()
self.sellFishConfirmDialog.hide()
self.brokeDialog.hide()
self.howToDialog.hide()
self.castButton.unbind(DGG.B1PRESS)
self.castButton.unbind(DGG.B3PRESS)
self.castButton.unbind(DGG.B1RELEASE)
self.castButton.unbind(DGG.B3RELEASE)
self.ignore(localAvatar.uniqueName('moneyChange'))
self.ignore(localAvatar.uniqueName('fishTankChange'))
def __itemGuiClose(self):
self.itemGui.detachNode()
def __makeGui(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: FISHING: ZoneId: %s' % self.pond.getArea())
if self.madeGui:
return
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.hide()
self.castGui = loader.loadModel('phase_4/models/gui/fishingGui')
self.castGui.setBin('background', 10)
self.castGui.setScale(0.67)
self.castGui.setPos(0, 1, 0)
for nodeName in ('bucket', 'jar', 'display_bucket', 'display_jar'):
self.castGui.find('**/' + nodeName).reparentTo(self.castGui)
self.exitButton = DirectButton(parent=self.castGui, relief=None, text=('', TTLocalizer.FishingExit, TTLocalizer.FishingExit), text_align=TextNode.ACenter, text_scale=0.1, text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0.0, -0.12), pos=(1.75*(4./3.), 0, -1.33), textMayChange=0, image=(self.castGui.find('**/exit_buttonUp'), self.castGui.find('**/exit_buttonDown'), self.castGui.find('**/exit_buttonRollover')), command=self.__userExit)
self.castGui.find('**/exitButton').removeNode()
self.castButton = DirectButton(parent=self.castGui, relief=None, text=TTLocalizer.FishingCast, text_align=TextNode.ACenter, text_scale=(3, 3 * 0.75, 3 * 0.75), text_fg=Vec4(1, 1, 1, 1), text_shadow=Vec4(0, 0, 0, 1), text_pos=(0, -4), image=self.castGui.find('**/castButton'), image0_color=(1, 0, 0, 1), image1_color=(0, 1, 0, 1), image2_color=(1, 1, 0, 1), image3_color=(0.8, 0.5, 0.5, 1), pos=(0, -0.05, -0.666), scale=(0.036, 1, 0.048))
self.castGui.find('**/castButton').removeNode()
self.arrow = self.castGui.find('**/arrow')
self.arrowTip = self.arrow.find('**/arrowTip')
self.arrowTail = self.arrow.find('**/arrowTail')
self.arrow.reparentTo(self.castGui)
self.arrow.setColorScale(0.9, 0.9, 0.1, 0.7)
self.arrow.hide()
self.jar = DirectLabel(parent=self.castGui, relief=None, text=str(self.av.getMoney()), text_scale=0.16, text_fg=(0.95, 0.95, 0, 1), text_font=ToontownGlobals.getSignFont(), pos=(-1.12, 0, -1.3))
self.bucket = DirectLabel(parent=self.castGui, relief=None, text='', text_scale=0.09, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), pos=(1.14, 0, -1.33))
self.__updateFishTankGui()
self.itemGui = NodePath('itemGui')
self.itemFrame = DirectFrame(parent=self.itemGui, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1, 1, 0.6), text=TTLocalizer.FishingItemFound, text_pos=(0, 0.2), text_scale=0.08, pos=(0, 0, 0.587))
self.itemLabel = DirectLabel(parent=self.itemFrame, text='', text_scale=0.06, pos=(0, 0, -0.25))
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
self.itemGuiCloseButton = DirectButton(parent=self.itemFrame, pos=(0.44, 0, -0.24), relief=None, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), image_scale=(0.7, 1, 0.7), command=self.__itemGuiClose)
buttons.removeNode()
jarGui = loader.loadModel('phase_3.5/models/gui/jar_gui')
bootGui = loader.loadModel('phase_4/models/gui/fishing_boot')
packageGui = loader.loadModel('phase_3.5/models/gui/stickerbook_gui').find('**/package')
self.itemJellybean = DirectFrame(parent=self.itemFrame, relief=None, image=jarGui, scale=0.5)
self.itemBoot = DirectFrame(parent=self.itemFrame, relief=None, image=bootGui, scale=0.2)
self.itemPackage = DirectFrame(parent=self.itemFrame, relief=None, image=packageGui, scale=0.25)
self.itemJellybean.hide()
self.itemBoot.hide()
self.itemPackage.hide()
self.failureDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('failureDialog'), doneEvent=self.uniqueName('failureDialog'), command=self.__userExit, message=TTLocalizer.FishingFailure, style=TTDialog.CancelOnly, cancelButtonText=TTLocalizer.FishingExit)
self.failureDialog.hide()
self.sellFishDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('sellFishDialog'), doneEvent=self.uniqueName('sellFishDialog'), command=self.__sellFish, message=TTLocalizer.FishBingoOfferToSellFish, style=TTDialog.YesNo)
self.sellFishDialog.hide()
self.sellFishConfirmDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('sellFishConfirmDialog'), doneEvent=self.uniqueName('sellFishConfirmDialog'), command=self.__sellFishConfirm, message=TTLocalizer.STOREOWNER_TROPHY, style=TTDialog.Acknowledge)
self.sellFishConfirmDialog.hide()
self.brokeDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('brokeDialog'), doneEvent=self.uniqueName('brokeDialog'), command=self.__userExit, message=TTLocalizer.FishingBroke, style=TTDialog.CancelOnly, cancelButtonText=TTLocalizer.FishingExit)
self.brokeDialog.hide()
self.howToDialog = TTDialog.TTGlobalDialog(dialogName=self.uniqueName('howToDialog'), doneEvent=self.uniqueName('howToDialog'), fadeScreen=0, message=TTLocalizer.FishingHowToFailed, style=TTDialog.Acknowledge)
self.howToDialog['command'] = self.__hideHowTo
self.howToDialog.setPos(-0.3, 0, 0.5)
self.howToDialog.hide()
self.madeGui = 1
return
def __setBingoCastGui(self):
if self.pond.hasPondBingoManager():
self.notify.debug('__setBingoCastGui: Has PondBing Manager %s' % self.pond.getPondBingoManager().getDoId())
bucket = self.castGui.find('**/bucket')
self.castGui.find('**/display_bucket').reparentTo(bucket)
self.bucket.reparentTo(bucket)
jar = self.castGui.find('**/jar')
self.castGui.find('**/display_jar').reparentTo(jar)
self.jar.reparentTo(jar)
base.setCellsActive(base.rightCells, 0)
bucket.setScale(0.9)
bucket.setX(-1.9)
bucket.setZ(-.11)
jar.setScale(0.9)
jar.setX(-.375)
jar.setZ(-.135)
else:
self.notify.debug('__setItemFramePos: Has No Pond Bingo Manager')
bucket = self.castGui.find('**/bucket')
bucket.setScale(1)
bucket.setPos(0, 0, 0)
jar = self.castGui.find('**/jar')
jar.setScale(1)
jar.setPos(0, 0, 0)
def resetCastGui(self):
self.notify.debug('resetCastGui: Bingo Night Ends - resetting Gui')
bucket = self.castGui.find('**/bucket')
jar = self.castGui.find('**/jar')
bucketPosInt = bucket.posInterval(5.0, Point3(0, 0, 0), startPos=bucket.getPos(), blendType='easeInOut')
bucketScaleInt = bucket.scaleInterval(5.0, VBase3(1.0, 1.0, 1.0), startScale=bucket.getScale(), blendType='easeInOut')
bucketTrack = Parallel(bucketPosInt, bucketScaleInt)
jarPosInt = jar.posInterval(5.0, Point3(0, 0, 0), startPos=jar.getPos(), blendType='easeInOut')
jarScaleInt = jar.scaleInterval(5.0, VBase3(1.0, 1.0, 1.0), startScale=jar.getScale(), blendType='easeInOut')
jarTrack = Parallel(jarPosInt, jarScaleInt)
self.guiTrack = Parallel(bucketTrack, jarTrack)
self.guiTrack.start()
def setCastGui(self):
self.notify.debug('setCastGui: Bingo Night Starts - setting Gui')
bucket = self.castGui.find('**/bucket')
self.castGui.find('**/display_bucket').reparentTo(bucket)
self.bucket.reparentTo(bucket)
jar = self.castGui.find('**/jar')
self.castGui.find('**/display_jar').reparentTo(jar)
self.jar.reparentTo(jar)
bucketPosInt = bucket.posInterval(3.0, Point3(-1.9, 0, -.11), startPos=bucket.getPos(), blendType='easeInOut')
bucketScaleInt = bucket.scaleInterval(3.0, VBase3(0.9, 0.9, 0.9), startScale=bucket.getScale(), blendType='easeInOut')
bucketTrack = Parallel(bucketPosInt, bucketScaleInt)
jarPosInt = jar.posInterval(3.0, Point3(-.375, 0, -.135), startPos=jar.getPos(), blendType='easeInOut')
jarScaleInt = jar.scaleInterval(3.0, VBase3(0.9, 0.9, 0.9), startScale=jar.getScale(), blendType='easeInOut')
jarTrack = Parallel(jarPosInt, jarScaleInt)
self.guiTrack = Parallel(bucketTrack, jarTrack)
self.guiTrack.start()
def setJarAmount(self, amount):
if self.madeGui:
money = int(self.jar['text']) + amount
pocketMoney = min(money, self.av.getMaxMoney())
self.jar.setProp('text', str(pocketMoney))
def __unmakeGui(self):
if not self.madeGui:
return
self.timer.destroy()
del self.timer
self.exitButton.destroy()
self.castButton.destroy()
self.jar.destroy()
self.bucket.destroy()
self.itemFrame.destroy()
self.itemGui.removeNode()
self.failureDialog.cleanup()
self.sellFishDialog.cleanup()
self.sellFishConfirmDialog.cleanup()
self.brokeDialog.cleanup()
self.howToDialog.cleanup()
self.castGui.removeNode()
self.madeGui = 0
def localAdjustingCastTask(self, state):
self.getMouse()
deltaX = self.mouseX - self.initMouseX
deltaY = self.mouseY - self.initMouseY
if deltaY >= 0:
if self.power == 0:
self.arrowTail.setScale(0.075, 0.075, 0)
self.arrow.setR(0)
self.castTrack.pause()
return Task.cont
dist = math.sqrt(deltaX * deltaX + deltaY * deltaY)
delta = dist / 0.5
self.power = max(min(abs(delta), 1.0), 0.0)
self.castTrack.setT(0.2 + self.power * 0.7)
angle = rad2Deg(math.atan(deltaX / deltaY))
if self.power < 0.25:
angle = angle * math.pow(self.power * 4, 3)
if delta < 0:
angle += 180
minAngle = -FishGlobals.FishingAngleMax
maxAngle = FishGlobals.FishingAngleMax
if angle < minAngle:
self.arrow.setColorScale(1, 0, 0, 1)
angle = minAngle
elif angle > maxAngle:
self.arrow.setColorScale(1, 0, 0, 1)
angle = maxAngle
else:
self.arrow.setColorScale(1, 1 - math.pow(self.power, 3), 0.1, 0.7)
self.arrowTail.setScale(0.075, 0.075, self.power * 0.2)
self.arrow.setR(angle)
self.angleNP.setH(-angle)
return Task.cont
def localAdjustingCastTaskIndAxes(self, state):
self.getMouse()
deltaX = self.mouseX - self.initMouseX
deltaY = self.mouseY - self.initMouseY
self.power = max(min(abs(deltaY) * 1.5, 1.0), 0.0)
self.castTrack.setT(0.4 + self.power * 0.5)
angle = deltaX * -180.0
self.angleNP.setH(self.startAngleNP - angle)
return Task.cont
def getMouse(self):
if base.mouseWatcherNode.hasMouse():
self.mouseX = base.mouseWatcherNode.getMouseX()
self.mouseY = base.mouseWatcherNode.getMouseY()
else:
self.mouseX = 0
self.mouseY = 0
def createCastTrack(self):
self.castTrack = Sequence(ActorInterval(self.av, 'castlong', playRate=4), ActorInterval(self.av, 'cast', startFrame=20), Func(self.av.loop, 'fish-neutral'))
def startMoveBobTask(self):
self.__showBob()
taskMgr.add(self.moveBobTask, self.taskName('moveBobTask'))
def moveBobTask(self, task):
g = 32.2
t = task.time
vZero = self.power * self.vZeroMax
angle = deg2Rad(self.power * self.angleMax)
deltaY = vZero * math.cos(angle) * t
deltaZ = vZero * math.sin(angle) * t - g * t * t / 2.0
deltaPos = Point3(0, deltaY, deltaZ)
self.bobStartPos = Point3(0.0, 3.0, 8.5)
pos = self.bobStartPos + deltaPos
self.bob.setPos(pos)
if pos[2] < self.waterLevel:
self.fsm.request('fishing')
return Task.done
else:
return Task.cont
def __showBob(self):
self.__hideBob()
self.bob.reparentTo(self.angleNP)
self.bob.setPos(self.ptop, 0, 0, 0)
self.av.update(0)
def hitTarget(self):
self.fsm.request('waitForAI')
def enterOff(self):
pass
def exitOff(self):
pass
def enterWaiting(self, doAnimation = True):
self.av.stopLookAround()
self.__hideLine()
self.track = Parallel()
if doAnimation:
toonTrack = Sequence(Func(self.av.setPlayRate, 1.0, 'run'), Func(self.av.loop, 'run'), LerpPosHprInterval(self.av, 1.0, Point3(0, 0, 0), Point3(0, 0, 0)), Func(self.__placeAvatar), Parallel(ActorInterval(self.av, 'pole'), Func(self.pole.pose, 'cast', 0), LerpScaleInterval(self.pole, duration=0.5, scale=1.0, startScale=0.01)), Func(self.av.loop, 'pole-neutral'))
if self.localToonFishing:
base.camera.wrtReparentTo(render)
self.track.append(LerpPosHprInterval(nodePath=camera, other=self.av, duration=1.5, pos=Point3(0, -12, 15), hpr=VBase3(0, -38, 0), blendType='easeInOut'))
toonTrack.append(Func(self.__showCastGui))
toonTrack.append(Func(self.__initCastGui))
if base.wantBingo:
self.__appendBingoMethod(toonTrack, self.pond.showBingoGui)
self.track.append(toonTrack)
else:
self.__showCastGui()
self.track.start()
def __appendBingoMethod(self, interval, callback):
interval.append(Func(callback))
def exitWaiting(self):
self.track.finish()
self.track = None
return
def enterLocalAdjusting(self, guiEvent = None):
if self.track:
self.track.pause()
if self.castTrack:
self.castTrack.pause()
self.power = 0.0
self.firstCast = 0
self.castButton['image0_color'] = Vec4(0, 1, 0, 1)
self.castButton['text'] = ''
self.av.stopLookAround()
self.__hideLine()
self.__hideBob()
self.howToDialog.hide()
castCost = FishGlobals.getCastCost(self.av.getFishingRod())
if self.av.getMoney() < castCost:
self.__hideCastGui()
self.__showBroke()
self.av.loop('pole-neutral')
return
if self.av.isFishTankFull():
self.__hideCastGui()
self.__showFishTankFull()
self.av.loop('pole-neutral')
return
self.arrow.show()
self.arrow.setColorScale(1, 1, 0, 0.7)
self.startAngleNP = self.angleNP.getH()
self.getMouse()
self.initMouseX = self.mouseX
self.initMouseY = self.mouseY
self.__hideBob()
if config.GetBool('fishing-independent-axes', 0):
taskMgr.add(self.localAdjustingCastTaskIndAxes, self.taskName('adjustCastTask'))
else:
taskMgr.add(self.localAdjustingCastTask, self.taskName('adjustCastTask'))
if base.wantBingo:
bingoMgr = self.pond.getPondBingoManager()
if bingoMgr:
bingoMgr.castingStarted()
def exitLocalAdjusting(self):
taskMgr.remove(self.taskName('adjustCastTask'))
self.castButton['image0_color'] = Vec4(1, 0, 0, 1)
self.castButton['text'] = TTLocalizer.FishingCast
self.arrow.hide()
def enterLocalCasting(self):
if self.power == 0.0 and len(self.av.fishCollection) == 0:
self.__showHowTo(TTLocalizer.FishingHowToFailed)
if self.castTrack:
self.castTrack.pause()
self.av.loop('pole-neutral')
self.track = None
return
castCost = FishGlobals.getCastCost(self.av.getFishingRod())
self.jar['text'] = str(max(self.av.getMoney() - castCost, 0))
if not self.castTrack:
self.createCastTrack()
self.castTrack.pause()
startT = 0.7 + (1 - self.power) * 0.3
self.castTrack.start(startT)
self.track = Sequence(Wait(1.2 - startT), Func(self.startMoveBobTask), Func(self.__showLineCasting))
self.track.start()
heading = self.angleNP.getH()
self.d_doCast(self.power, heading)
self.timer.countdown(FishGlobals.CastTimeout)
return
def exitLocalCasting(self):
taskMgr.remove(self.taskName('moveBobTask'))
if self.track:
self.track.pause()
self.track = None
if self.castTrack:
self.castTrack.pause()
self.__hideLine()
self.__hideBob()
return
def enterDistCasting(self, power, h):
self.av.stopLookAround()
self.__placeAvatar()
self.__hideLine()
self.__hideBob()
self.angleNP.setH(h)
self.power = power
self.track = Parallel(Sequence(ActorInterval(self.av, 'cast'), Func(self.pole.pose, 'cast', 0), Func(self.av.loop, 'fish-neutral')), Sequence(Wait(1.0), Func(self.startMoveBobTask), Func(self.__showLineCasting)))
self.track.start()
def exitDistCasting(self):
self.track.finish()
self.track = None
taskMgr.remove(self.taskName('moveBobTask'))
self.__hideLine()
self.__hideBob()
return
def enterFishing(self):
if self.localToonFishing:
self.track = Sequence(ActorInterval(self.av, 'cast'), Func(self.pole.pose, 'cast', 0), Func(self.av.loop, 'fish-neutral'))
self.track.start(self.castTrack.getT())
else:
self.track = None
self.av.loop('fish-neutral')
self.__showBobFloat()
self.__showLineWaiting()
if self.localToonFishing:
self.pond.startCheckingTargets(self, self.bob.getPos(render))
return
def exitFishing(self):
if self.localToonFishing:
self.pond.stopCheckingTargets()
if self.track:
self.track.finish()
self.track = None
return
def enterWaitForAI(self):
self.castButton['state'] = DGG.DISABLED
def exitWaitForAI(self):
self.castButton['state'] = DGG.NORMAL
def enterReward(self, code, itemDesc1, itemDesc2, itemDesc3):
self.__placeAvatar()
self.bob.reparentTo(self.angleNP)
self.waterLevel = FishingTargetGlobals.getWaterLevel(self.area)
self.bob.setZ(self.waterLevel)
self.__showLineReeling()
self.castTrack.pause()
if self.localToonFishing:
self.__showCastGui()
if code == FishGlobals.QuestItem:
self.__showQuestItem(itemDesc1)
elif code in (FishGlobals.FishItem, FishGlobals.FishItemNewEntry, FishGlobals.FishItemNewRecord):
genus, species, weight = itemDesc1, itemDesc2, itemDesc3
fish = FishBase.FishBase(genus, species, weight)
self.__showFishItem(code, fish)
if base.wantBingo:
self.pond.handleBingoCatch((genus, species))
elif code == FishGlobals.BootItem:
self.__showBootItem()
if base.wantBingo:
self.pond.handleBingoCatch(FishGlobals.BingoBoot)
elif code == FishGlobals.JellybeanItem:
amount = itemDesc1
self.__showJellybeanItem(amount)
elif code == FishGlobals.OverTankLimit:
self.__hideCastGui()
else:
self.__showFailureReason(code)
self.track = Sequence(Parallel(ActorInterval(self.av, 'reel'), ActorInterval(self.pole, 'cast', startFrame=63, endFrame=127)), ActorInterval(self.av, 'reel-neutral'), Func(self.__hideLine), Func(self.__hideBob), ActorInterval(self.av, 'fish-again'), Func(self.av.loop, 'pole-neutral'))
self.track.start()
def cleanupFishPanel(self):
if self.fishPanel:
self.fishPanel.hide()
self.fishPanel.destroy()
self.fishPanel = None
return
def hideBootPanel(self):
if self.madeGui and self.itemBoot:
self.__itemGuiClose()
def exitReward(self):
if self.localToonFishing:
self.itemGui.detachNode()
self.cleanupFishPanel()
self.track.finish()
self.track = None
return
def enterLeaving(self):
if self.localToonFishing:
self.__hideCastGui()
if base.wantBingo:
self.pond.cleanupBingoMgr()
self.av.stopLookAround()
self.av.startLookAround()
self.__placeAvatar()
self.__hideLine()
self.__hideBob()
self.track = Sequence(Parallel(ActorInterval(self.av, 'fish-end'), Func(self.pole.pose, 'cast', 0), LerpScaleInterval(self.pole, duration=0.5, scale=0.01, startScale=1.0)), Func(self.__dropPole), Func(self.av.loop, 'neutral'))
if self.localToonFishing:
self.track.append(Func(self.fsm.requestFinalState))
self.track.start()
def exitLeaving(self):
self.track.pause()
self.track = None
return
def enterSellFish(self):
self.castButton['state'] = DGG.DISABLED
self.__showSellFishDialog()
self.__hideHowTo()
def exitSellFish(self):
self.castButton['state'] = DGG.NORMAL
self.__hideSellFishDialog()
self.__hideSellFishConfirmDialog()
def sellFishComplete(self, trophyResult, numFishCaught):
for button in self.sellFishDialog.buttonList:
button['state'] = DGG.NORMAL
if self.localToonFishing:
if trophyResult:
self.__hideSellFishDialog()
self.__showSellFishConfirmDialog(numFishCaught)
else:
self.fsm.request('waiting', [False])
def __allowSellFish(self):
if base.wantBingo:
if self.pond.hasPondBingoManager():
hoodId = base.cr.playGame.getPlaceId()
if hoodId == ToontownGlobals.MyEstate:
return True
return False
| |
'''
Created on Oct 30, 2015
@author: kashefy
'''
from nose.tools import assert_equal, assert_false, \
assert_list_equal, assert_true
from mock import patch, PropertyMock
import os
import tempfile
import shutil
import numpy as np
from numpy.testing import assert_array_equal
import h5py
import nideep.eval.inference as infr
import sys
CURRENT_MODULE_PATH = os.path.abspath(sys.modules[__name__].__file__)
ROOT_PKG_PATH = os.path.dirname(os.path.dirname(CURRENT_MODULE_PATH))
TEST_DATA_DIRNAME = 'test_data'
TEST_NET_FILENAME = 'n1.prototxt'
TEST_NET_HDF5DATA_FILENAME = 'n1h.prototxt'
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class TestInference:
@patch('nideep.eval.inference.caffe.Net')
def test_forward(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
assert_false(net.forward.called, "Problem with mocked forward()")
out = infr.forward(net, ['x', 'z'])
assert_true(net.forward.called, "Problem with mocked forward()")
assert_list_equal(out.keys(), ['x', 'z'])
for k in ['x', 'z']:
assert_equal(out[k].shape, (3, 2),
msg="unexpected shape for blob %s" % k)
assert_array_equal(b[k].data, out[k])
# repeat with smaller set of keys
out = infr.forward(net, ['z'])
assert_list_equal(out.keys(), ['z'])
assert_equal(out['z'].shape, (3, 2), msg="unexpected shape for blob z")
assert_array_equal(b['z'].data, out['z'])
class TestInferenceEstNumFwdPasses():
@patch('nideep.iow.dataSource.DataSourceLMDB')
def test_est_num_fwd_passes_caffe_lmdb(self, mock_ds):
# we know the batch sizes from the prototxt file
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_FILENAME)
mock_ds.return_value.num_entries.return_value = 77 * 64 # got batch size 64 from files directly
assert_equal(77, infr.est_min_num_fwd_passes(fpath_net, 'train'))
mock_ds.return_value.num_entries.return_value = 33 * 100 # got batch size 64 from files directly
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_FILENAME)
assert_equal(33, infr.est_min_num_fwd_passes(fpath_net, 'test'))
@patch('nideep.iow.dataSource.DataSourceH5List')
def test_est_num_fwd_passes_caffe_h5list(self, mock_ds):
# we know the batch sizes from the prototxt file
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_HDF5DATA_FILENAME)
mock_ds.return_value.num_entries.return_value = 44 * 64 # got batch size 64 from files directly
assert_equal(44, infr.est_min_num_fwd_passes(fpath_net, 'train'))
mock_ds.return_value.num_entries.return_value = 11 * 128 # got batch size 64 from files directly
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_HDF5DATA_FILENAME)
assert_equal(11, infr.est_min_num_fwd_passes(fpath_net, 'test'))
class TestInferenceHDF5:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1 + idx, 3, 2 * (idx + 1))) for idx, k in enumerate(['x', 'y', 'z'])}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims.h5')
assert_false(os.path.isfile(fpath))
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], 1, fpath)
assert_equal(net.forward.call_count, 1)
assert_true(os.path.isfile(fpath))
assert_list_equal(out, [1, 1])
# check db content
with h5py.File(fpath, "r") as f:
assert_list_equal([str(k) for k in f.keys()], ['x', 'z'])
for idx, k in enumerate(['x', 'y', 'z']):
if k == 'y':
assert_false(k in f, "Unexpected key found (%s)" % k)
else:
assert_equal(f[k].shape, (1, 1 + idx, 3, 2 * (idx + 1)),
msg="unexpected shape for blob %s" % k)
assert_array_equal(b[k].data, f[k])
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims_n(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
for n in range(1, 10):
net = mock_net()
net.reset_mock()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims_n.h5')
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], n, fpath)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n, n])
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims_preserve_batch_no(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims_preserve_batch_no.h5')
assert_false(os.path.isfile(fpath))
n = 3
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], n, fpath,
preserve_batch=False)
assert_equal(net.forward.call_count, n)
assert_true(os.path.isfile(fpath))
assert_list_equal(out, [n * 4] * 2)
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims_preserve_batch_yes(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims_preserve_batch_yes.h5')
assert_false(os.path.isfile(fpath))
n = 3
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], n, fpath,
preserve_batch=True)
assert_equal(net.forward.call_count, n)
assert_true(os.path.isfile(fpath))
assert_list_equal(out, [n] * 2)
class TestInferenceLMDB:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_fixed_dims(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_fixed_dims_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
out = infr.infer_to_lmdb(net, ['x', 'z'], 1, dst_prefix)
assert_equal(net.forward.call_count, 1)
assert_list_equal(out, [1, 1])
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_fixed_dims_n(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
for n in range(1, 10):
net = mock_net()
net.reset_mock()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_fixed_dims_n_%s_lmdb')
out = infr.infer_to_lmdb(net, ['x', 'z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n, n])
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_fixed_dims_preserve_batch_no(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_fixed_dims_preserve_batch_no_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
n = 3
out = infr.infer_to_lmdb(net, ['x', 'z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n * 4] * 2)
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_cur_multi_key(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_cur_multi_key_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
n = 3
out = infr.infer_to_lmdb_cur(net, ['x', 'z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n * 4] * 2)
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_cur_single_key(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_cur_single_key_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
n = 3
out = infr.infer_to_lmdb_cur(net, ['z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n * 4])
for k in b.keys():
if k in ['z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.est_min_num_fwd_passes')
@patch('nideep.eval.inference.caffe.Net')
def test_response_to_lmdb(self, mock_net, mock_num):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_num.return_value = 3
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_response_to_lmdb_')
for m in ['train', 'test']:
for k in b.keys():
assert_false(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
import nideep
out = nideep.eval.inference.response_to_lmdb("net.prototxt",
"w.caffemodel",
['x', 'z'],
dst_prefix)
assert_equal(net.forward.call_count, 3 * 2) # double for both modes
from caffe import TRAIN, TEST
assert_list_equal(out.keys(), [TRAIN, TEST])
assert_list_equal(out[TRAIN], [3 * 4] * 2)
assert_list_equal(out[TEST], [3 * 4] * 2)
for m in ['train', 'test']:
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
else:
assert_false(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This auth module is intended to allow OpenStack client-tools to select from a
variety of authentication strategies, including NoAuth (the default), and
Keystone (an identity management system).
> auth_plugin = AuthPlugin(creds)
> auth_plugin.authenticate()
> auth_plugin.auth_token
abcdefg
> auth_plugin.management_url
http://service_endpoint/
"""
import httplib2
import logging
from oslo_serialization import jsonutils
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from six.moves import urllib
from glance_store import exceptions
LOG = logging.getLogger(__name__)
class BaseStrategy(object):
def __init__(self):
self.auth_token = None
# TODO(sirp): Should expose selecting public/internal/admin URL.
self.management_url = None
def authenticate(self):
raise NotImplementedError
@property
def is_authenticated(self):
raise NotImplementedError
@property
def strategy(self):
raise NotImplementedError
class NoAuthStrategy(BaseStrategy):
def authenticate(self):
pass
@property
def is_authenticated(self):
return True
@property
def strategy(self):
return 'noauth'
class KeystoneStrategy(BaseStrategy):
MAX_REDIRECTS = 10
def __init__(self, creds, insecure=False, configure_via_auth=True):
self.creds = creds
self.insecure = insecure
self.configure_via_auth = configure_via_auth
super(KeystoneStrategy, self).__init__()
def check_auth_params(self):
# Ensure that supplied credential parameters are as required
for required in ('username', 'password', 'auth_url',
'strategy'):
if self.creds.get(required) is None:
raise exceptions.MissingCredentialError(required=required)
if self.creds['strategy'] != 'keystone':
raise exceptions.BadAuthStrategy(expected='keystone',
received=self.creds['strategy'])
# For v2.0 also check tenant is present
if self.creds['auth_url'].rstrip('/').endswith('v2.0'):
if self.creds.get("tenant") is None:
raise exceptions.MissingCredentialError(required='tenant')
def authenticate(self):
"""Authenticate with the Keystone service.
There are a few scenarios to consider here:
1. Which version of Keystone are we using? v1 which uses headers to
pass the credentials, or v2 which uses a JSON encoded request body?
2. Keystone may respond back with a redirection using a 305 status
code.
3. We may attempt a v1 auth when v2 is what's called for. In this
case, we rewrite the url to contain /v2.0/ and retry using the v2
protocol.
"""
def _authenticate(auth_url):
# If OS_AUTH_URL is missing a trailing slash add one
if not auth_url.endswith('/'):
auth_url += '/'
token_url = urllib.parse.urljoin(auth_url, "tokens")
# 1. Check Keystone version
is_v2 = auth_url.rstrip('/').endswith('v2.0')
if is_v2:
self._v2_auth(token_url)
else:
self._v1_auth(token_url)
self.check_auth_params()
auth_url = self.creds['auth_url']
for _ in range(self.MAX_REDIRECTS):
try:
_authenticate(auth_url)
except exceptions.AuthorizationRedirect as e:
# 2. Keystone may redirect us
auth_url = e.url
except exceptions.AuthorizationFailure:
# 3. In some configurations nova makes redirection to
# v2.0 keystone endpoint. Also, new location does not
# contain real endpoint, only hostname and port.
if 'v2.0' not in auth_url:
auth_url = urllib.parse.urljoin(auth_url, 'v2.0/')
else:
# If we successfully auth'd, then memorize the correct auth_url
# for future use.
self.creds['auth_url'] = auth_url
break
else:
# Guard against a redirection loop
raise exceptions.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS)
def _v1_auth(self, token_url):
creds = self.creds
headers = {}
headers['X-Auth-User'] = creds['username']
headers['X-Auth-Key'] = creds['password']
tenant = creds.get('tenant')
if tenant:
headers['X-Auth-Tenant'] = tenant
resp, resp_body = self._do_request(token_url, 'GET', headers=headers)
def _management_url(self, resp):
for url_header in ('x-image-management-url',
'x-server-management-url',
'x-glance'):
try:
return resp[url_header]
except KeyError as e:
not_found = e
raise not_found
if resp.status in (200, 204):
try:
if self.configure_via_auth:
self.management_url = _management_url(self, resp)
self.auth_token = resp['x-auth-token']
except KeyError:
raise exceptions.AuthorizationFailure()
elif resp.status == 305:
raise exceptions.AuthorizationRedirect(uri=resp['location'])
elif resp.status == 400:
raise exceptions.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exceptions.NotAuthenticated()
elif resp.status == 404:
raise exceptions.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
def _v2_auth(self, token_url):
creds = self.creds
creds = {
"auth": {
"tenantName": creds['tenant'],
"passwordCredentials": {
"username": creds['username'],
"password": creds['password']
}
}
}
headers = {}
headers['Content-Type'] = 'application/json'
req_body = jsonutils.dumps(creds)
resp, resp_body = self._do_request(
token_url, 'POST', headers=headers, body=req_body)
if resp.status == 200:
resp_auth = jsonutils.loads(resp_body)['access']
creds_region = self.creds.get('region')
if self.configure_via_auth:
endpoint = get_endpoint(resp_auth['serviceCatalog'],
endpoint_region=creds_region)
self.management_url = endpoint
self.auth_token = resp_auth['token']['id']
elif resp.status == 305:
raise exceptions.RedirectException(resp['location'])
elif resp.status == 400:
raise exceptions.AuthBadRequest(url=token_url)
elif resp.status == 401:
raise exceptions.NotAuthenticated()
elif resp.status == 404:
raise exceptions.AuthUrlNotFound(url=token_url)
else:
raise Exception(_('Unexpected response: %s') % resp.status)
@property
def is_authenticated(self):
return self.auth_token is not None
@property
def strategy(self):
return 'keystone'
def _do_request(self, url, method, headers=None, body=None):
headers = headers or {}
conn = httplib2.Http()
conn.force_exception_to_status_code = True
conn.disable_ssl_certificate_validation = self.insecure
headers['User-Agent'] = 'glance-client'
resp, resp_body = conn.request(url, method, headers=headers, body=body)
return resp, resp_body
def get_plugin_from_strategy(strategy, creds=None, insecure=False,
configure_via_auth=True):
if strategy == 'noauth':
return NoAuthStrategy()
elif strategy == 'keystone':
return KeystoneStrategy(creds, insecure,
configure_via_auth=configure_via_auth)
else:
raise Exception(_("Unknown auth strategy '%s'") % strategy)
def get_endpoint(service_catalog, service_type='image', endpoint_region=None,
endpoint_type='publicURL'):
"""
Select an endpoint from the service catalog
We search the full service catalog for services
matching both type and region. If the client
supplied no region then any 'image' endpoint
is considered a match. There must be one -- and
only one -- successful match in the catalog,
otherwise we will raise an exception.
"""
endpoint = None
for service in service_catalog:
s_type = None
try:
s_type = service['type']
except KeyError:
msg = _('Encountered service with no "type": %s') % s_type
LOG.warn(msg)
continue
if s_type == service_type:
for ep in service['endpoints']:
if endpoint_region is None or endpoint_region == ep['region']:
if endpoint is not None:
# This is a second match, abort
exc = exceptions.RegionAmbiguity
raise exc(region=endpoint_region)
endpoint = ep
if endpoint and endpoint.get(endpoint_type):
return endpoint[endpoint_type]
else:
raise exceptions.NoServiceEndpoint()
| |
import numpy as np
import operator
from scipy.sparse import (_sparsetools, isspmatrix, isspmatrix_csr,
csr_matrix, coo_matrix, csc_matrix, dia_matrix)
from scipy.sparse.sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, get_index_dtype)
from scipy.sparse.base import spmatrix, isspmatrix, SparseEfficiencyWarning
from warnings import warn
class fast_csr_matrix(csr_matrix):
"""
A subclass of scipy.sparse.csr_matrix that skips the data format
checks that are run everytime a new csr_matrix is created.
"""
def __init__(self, args=None, shape=None, dtype=None, copy=False):
if args is None: #Build zero matrix
if shape is None:
raise Exception('Shape must be given when building zero matrix.')
self.data = np.array([], dtype=complex)
self.indices = np.array([], dtype=np.int32)
self.indptr = np.zeros(shape[0]+1, dtype=np.int32)
self._shape = tuple(int(s) for s in shape)
else:
if args[0].shape[0] and args[0].dtype != complex:
raise TypeError('fast_csr_matrix allows only complex data.')
if args[1].shape[0] and args[1].dtype != np.int32:
raise TypeError('fast_csr_matrix allows only int32 indices.')
if args[2].shape[0] and args[1].dtype != np.int32:
raise TypeError('fast_csr_matrix allows only int32 indptr.')
self.data = np.array(args[0], dtype=complex, copy=copy)
self.indices = np.array(args[1], dtype=np.int32, copy=copy)
self.indptr = np.array(args[2], dtype=np.int32, copy=copy)
if shape is None:
self._shape = tuple([len(self.indptr)-1]*2)
else:
self._shape = tuple(int(s) for s in shape)
self.dtype = complex
self.maxprint = 50
self.format = 'csr'
def _binopt(self, other, op):
"""
Do the binary operation fn to two sparse matrices using
fast_csr_matrix only when other is also a fast_csr_matrix.
"""
# e.g. csr_plus_csr, csr_minus_csr, etc.
if not isinstance(other, fast_csr_matrix):
other = csr_matrix(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
actual_nnz = indptr[-1]
indices = indices[:actual_nnz]
data = data[:actual_nnz]
if actual_nnz < maxnnz // 2:
# too much waste, trim arrays
indices = indices.copy()
data = data.copy()
if isinstance(other, fast_csr_matrix) and (not op in bool_ops):
A = fast_csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)
else:
A = csr_matrix((data, indices, indptr), dtype=data.dtype, shape=self.shape)
return A
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
if not isinstance(other, fast_csr_matrix):
other = csr_matrix(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1,1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1,1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == other.shape[0] and self.shape[1] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == other.shape[1] and self.shape[0] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Dense matrix.
if isdense(other):
if self.shape == other.shape:
ret = self.tocoo()
ret.data = np.multiply(ret.data, other[ret.row, ret.col]
).view(np.ndarray).ravel()
return ret
# Single element.
elif other.size == 1:
return self._mul_scalar(other.flat[0])
# Anything else.
return np.multiply(self.toarray(), other)
def _mul_sparse_matrix(self, other):
"""
Do the sparse matrix mult returning fast_csr_matrix only
when other is also fast_csr_matrix.
"""
M, _ = self.shape
_, N = other.shape
major_axis = self._swap((M, N))[0]
if isinstance(other, fast_csr_matrix):
A = zcsr_mult(self, other, sorted=1)
return A
other = csr_matrix(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=M*N)
# scipy 1.5 renamed the older csr_matmat_pass1 to the much more
# descriptive csr_matmat_maxnnz, but also changed the call and logic
# structure of constructing the indices.
try:
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
except AttributeError:
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
fn = getattr(_sparsetools, self.format + '_matmat_pass1')
fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
indptr)
nnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
try:
fn = getattr(_sparsetools, self.format + '_matmat')
except AttributeError:
fn = getattr(_sparsetools, self.format + '_matmat_pass2')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = csr_matrix((data, indices, indptr), shape=(M, N))
return A
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return csr_matrix(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning)
all_true = _all_true(self.shape)
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.toarray() == other
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning)
#TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other,'_ne_')
all_true = _all_true(self.shape)
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is inefficient",
SparseEfficiencyWarning)
all_true = _all_true(self.shape)
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.", SparseEfficiencyWarning)
all_true = _all_true(self.shape)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.toarray() != other
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other,'_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = csr_matrix(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.toarray(), other)
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = _all_true(self.shape)
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
# We need this just in case something like abs(data) gets called
# does nothing if data.dtype is complex.
data = np.asarray(data, dtype=complex)
if copy:
return fast_csr_matrix((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return fast_csr_matrix((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
def transpose(self):
"""
Returns the transpose of the matrix, keeping
it in fast_csr format.
"""
return zcsr_transpose(self)
def trans(self):
"""
Same as transpose
"""
return zcsr_transpose(self)
def getH(self):
"""
Returns the conjugate-transpose of the matrix, keeping
it in fast_csr format.
"""
return zcsr_adjoint(self)
def adjoint(self):
"""
Same as getH
"""
return zcsr_adjoint(self)
def csr2fast(A, copy=False):
if (not isinstance(A, fast_csr_matrix)) or copy:
# Do not need to do any type checking here
# since fast_csr_matrix does that.
return fast_csr_matrix((A.data,A.indices,A.indptr),
shape=A.shape,copy=copy)
else:
return A
def fast_identity(N):
"""Generates a sparse identity matrix in
fast_csr format.
"""
data = np.ones(N, dtype=complex)
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1, dtype=np.int32)
ptr[-1] = N
return fast_csr_matrix((data,ind,ptr),shape=(N,N))
#Convenience functions
#--------------------
def _all_true(shape):
A = csr_matrix((np.ones(np.prod(shape), dtype=np.bool_),
np.tile(np.arange(shape[1],dtype=np.int32),shape[0]),
np.arange(0,np.prod(shape)+1,shape[1],dtype=np.int32)),
shape=shape)
return A
#Need to do some trailing imports here
#-------------------------------------
from qutip.cy.spmath import (zcsr_transpose, zcsr_adjoint, zcsr_mult)
| |
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
from __future__ import unicode_literals
import functools
import re
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache, six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from django.utils.regex_helper import normalize
from django.utils.translation import get_language
from .exceptions import NoReverseMatch, Resolver404
from .utils import get_callable
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
self.namespaces = [x for x in namespaces if x] if namespaces else []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name,
self.app_names, self.namespaces,
)
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent URLconf pattern.
# This makes it possible to have captured parameters in the parent
# URLconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Return a compiled regular expression based on the activate language.
"""
language_code = get_language()
if language_code not in self._regex_dict:
regex = self._regex if isinstance(self._regex, six.string_types) else force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e))
)
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
self.callback = callback # the view
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
args = () if kwargs else match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@cached_property
def lookup_str(self):
"""
A string that identifies the view (e.g. 'path.to.view_function' or
'path.to.ClassBasedView').
"""
callback = self.callback
# Python 3.5 collapses nested partials, so can change "while" to "if"
# when it's the minimum supported version.
while isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
return callback.__module__ + "." + callback.__class__.__name__
else:
return callback.__module__ + "." + callback.__name__
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern,
)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if isinstance(pattern, RegexURLPattern):
self._callback_strs.add(pattern.lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = force_text(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces,
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, six.string_types):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included URLconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use lazy import, since
# django.conf.urls imports this file.
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if (set(kwargs.keys()) | set(defaults.keys()) != set(params) |
set(defaults.keys())):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE):
# safe characters from `pchar` definition of RFC 3986
url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@'))
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL name or callable, but callables are not
# friendly in error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch(
"Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns)
)
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace,
)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
| |
"""
References:
[1] http://stackoverflow.com/questions/866465/sql-order-by-the-in-value-list
answer from "a_horse_with_no_name"
"""
import cPickle
import itertools
import logging
from smqtk.representation import DescriptorIndex
from smqtk.utils.errors import ReadOnlyError
try:
import psycopg2
except ImportError, ex:
logging.getLogger(__name__)\
.warning("Failed to import psycopg2: %s", str(ex))
psycopg2 = None
__author__ = "paul.tunison@kitware.com"
def norm_psql_cmd_string(s):
"""
:rtype: str
"""
return ' '.join(s.split())
# noinspection SqlNoDataSourceInspection
class PostgresDescriptorIndex (DescriptorIndex):
"""
DescriptorIndex implementation that stored DescriptorElement references in
a PostgreSQL database.
A ``PostgresDescriptorIndex`` effectively controls the entire table. Thus
a ``clear()`` call will remove everything from the table.
PostgreSQL version support:
- 9.4
Table format:
<uuid col> TEXT NOT NULL
<element col> BYTEA NOT NULL
<uuid_col> should be the primary key (we assume unique).
We require that the no column labels not be 'true' for the use of a value
return shortcut.
"""
SELECT_TMPL = norm_psql_cmd_string("""
SELECT {col:s}
FROM {table_name:s}
""")
SELECT_LIKE_TMPL = norm_psql_cmd_string("""
SELECT {element_col:s}
FROM {table_name:s}
WHERE {uuid_col:s} like %(uuid_like)s
""")
# So we can ensure we get back elements in specified order
# - reference [1]
SELECT_MANY_ORDERED_TMPL = norm_psql_cmd_string("""
SELECT {table_name:s}.{element_col:s}
FROM {table_name:s}
JOIN (
SELECT *
FROM unnest(%(uuid_list)s) with ordinality
) AS __ordering__ ({uuid_col:s}, {uuid_col:s}_order)
ON {table_name:s}.{uuid_col:s} = __ordering__.{uuid_col:s}
ORDER BY __ordering__.{uuid_col:s}_order
""")
UPSERT_TMPL = norm_psql_cmd_string("""
WITH upsert AS (
UPDATE {table_name:s}
SET {element_col:s} = %(element_val)s
WHERE {uuid_col:s} = %(uuid_val)s
RETURNING *
)
INSERT INTO {table_name:s}
({uuid_col:s}, {element_col:s})
SELECT %(uuid_val)s, %(element_val)s
WHERE NOT EXISTS (SELECT * FROM upsert)
""")
DELETE_LIKE_TMPL = norm_psql_cmd_string("""
DELETE FROM {table_name:s}
WHERE {uuid_col:s} like %(uuid_like)s
""")
DELETE_MANY_TMPL = norm_psql_cmd_string("""
DELETE FROM {table_name:s}
WHERE {uuid_col:s} in %(uuid_tuple)s
RETURNING uid
""")
@classmethod
def is_usable(cls):
return psycopg2 is not None
def __init__(self, table_name='descriptor_index', uuid_col='uid',
element_col='element',
db_name='postgres', db_host=None, db_port=None, db_user=None,
db_pass=None, multiquery_batch_size=1000, pickle_protocol=-1,
read_only=False):
"""
Initialize index instance.
:param table_name: Name of the table to use.
:type table_name: str
:param uuid_col: Name of the column containing the UUID signatures.
:type uuid_col: str
:param element_col: Name of the table column
:param db_name: The name of the database to connect to.
:type db_name: str
:param db_host: Host address of the Postgres server. If None, we
assume the server is on the local machine and use the UNIX socket.
This might be a required field on Windows machines (not tested yet).
:type db_host: str | None
:param db_port: Port the Postgres server is exposed on. If None, we
assume the default port (5423).
:type db_port: int | None
:param db_user: Postgres user to connect as. If None, postgres
defaults to using the current accessing user account name on the
operating system.
:type db_user: str | None
:param db_pass: Password for the user we're connecting as. This may be
None if no password is to be used.
:type db_pass: str | None
:param multiquery_batch_size: For queries that handle sending or
receiving many queries at a time, batch queries based on this size.
If this is None, then no batching occurs.
The advantage of batching is that it reduces the memory impact for
queries dealing with a very large number of elements (don't have to
store the full query for all elements in RAM), but the transaction
will be some amount slower due to splitting the query into multiple
transactions.
:type multiquery_batch_size: int | None
:param pickle_protocol: Pickling protocol to use. We will use -1 by
default (latest version, probably binary).
:type pickle_protocol: int
:param read_only: Only allow read actions against this index.
Modification actions will throw a ReadOnlyError exceptions.
:type read_only: bool
"""
super(PostgresDescriptorIndex, self).__init__()
self.table_name = table_name
self.uuid_col = uuid_col
self.element_col = element_col
self.db_name = db_name
self.db_host = db_host
self.db_port = db_port
self.db_user = db_user
self.db_pass = db_pass
self.multiquery_batch_size = multiquery_batch_size
self.pickle_protocol = pickle_protocol
self.read_only = bool(read_only)
# Checking parameters where necessary
if self.multiquery_batch_size is not None:
self.multiquery_batch_size = int(self.multiquery_batch_size)
assert self.multiquery_batch_size > 0, \
"A given batch size must be greater than 0 in size " \
"(given: %d)." % self.multiquery_batch_size
assert -1 <= self.pickle_protocol <= 2, \
("Given pickle protocol is not in the known valid range. Given: %s"
% self.pickle_protocol)
def get_config(self):
return {
"table_name": self.table_name,
"uuid_col": self.uuid_col,
"element_col": self.element_col,
"db_name": self.db_name,
"db_host": self.db_host,
"db_port": self.db_port,
"db_user": self.db_user,
"db_pass": self.db_pass,
"multiquery_batch_size": self.multiquery_batch_size,
"pickle_protocol": self.pickle_protocol,
"read_only": self.read_only,
}
def _get_psql_connection(self):
"""
:return: A new connection to the configured database
:rtype: psycopg2._psycopg.connection
"""
return psycopg2.connect(
database=self.db_name,
user=self.db_user,
password=self.db_pass,
host=self.db_host,
port=self.db_port,
)
def _single_execute(self, execute_hook, yield_result_rows=False):
"""
Perform a single execution in a new connection transaction. Handles
connection/cursor acquisition and handling.
:param execute_hook: Function controlling execution on a cursor. Takes
the active cursor.
:type execute_hook: (psycopg2._psycopg.cursor) -> None
:param yield_result_rows: Optionally yield rows from each batch
execution. False by default.
:type yield_result_rows: bool
:return: Iterator over result rows if ``yield_result_rows`` is True,
otherwise None.
:rtype: __generator | None
"""
conn = self._get_psql_connection()
try:
with conn:
with conn.cursor() as cur:
execute_hook(cur)
if yield_result_rows:
for r in cur:
yield r
finally:
# conn.__exit__ doesn't close connection, just the transaction
conn.close()
def _batch_execute(self, iterable, execute_hook,
yield_result_rows=False):
"""
Due to this method optionally yielding values, calling this returns a
generator. This must be iterated over for anything to occur even if
nothing is to be actively yielded.
:param iterable: Iterable of elements to batch
:type iterable: collections.Iterable
:param execute_hook: Function controlling execution on a cursor for
a collected batch of elements. Takes the active cursor and a
sequence of the current batch of elements.
:type execute_hook: (psycopg2._psycopg.cursor, list) -> None
:param yield_result_rows: Optionally yield rows from each batch
execution. False by default.
:type yield_result_rows: bool
:return: Iterator over result rows if ``yield_result_rows`` is True,
otherwise None.
:rtype: __generator | None
"""
self._log.debug("starting multi operation (batching: %s)",
self.multiquery_batch_size)
# Lazy initialize -- only if there are elements to iterate over
#: :type: None | psycopg2._psycopg.connection
conn = None
try:
batch = []
i = 0
for e in iterable:
if conn is None:
conn = self._get_psql_connection()
batch.append(e)
if self.multiquery_batch_size and \
len(batch) >= self.multiquery_batch_size:
i += 1
self._log.debug('-- batch %d (size: %d)', i, len(batch))
with conn:
with conn.cursor() as cur:
execute_hook(cur, batch)
if yield_result_rows:
for r in cur:
yield r
batch = []
if batch:
self._log.debug('-- tail batch (size: %d)', len(batch))
with conn:
with conn.cursor() as cur:
execute_hook(cur, batch)
if yield_result_rows:
for r in cur:
yield r
finally:
# conn.__exit__ doesn't close connection, just the transaction
if conn is not None:
conn.close()
self._log.debug('-- done')
def count(self):
"""
:return: Number of descriptor elements stored in this index.
:rtype: int | long
"""
q = self.SELECT_LIKE_TMPL.format(
element_col='count(*)',
table_name=self.table_name,
uuid_col=self.uuid_col,
)
def exec_hook(cur):
cur.execute(q, {'uuid_like': '%'})
# There's only going to be one row returned with one element in it
return list(self._single_execute(exec_hook, True))[0][0]
def clear(self):
"""
Clear this descriptor index's entries.
"""
if self.read_only:
raise ReadOnlyError("Cannot clear a read-only index.")
q = self.DELETE_LIKE_TMPL.format(
table_name=self.table_name,
uuid_col=self.uuid_col,
)
def exec_hook(cur):
cur.execute(q, {'uuid_like': '%'})
list(self._single_execute(exec_hook))
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
q = self.SELECT_LIKE_TMPL.format(
# hacking return value to something simple
element_col='true',
table_name=self.table_name,
uuid_col=self.uuid_col,
)
def exec_hook(cur):
cur.execute(q, {'uuid_like': str(uuid)})
# Should either yield one or zero rows
return bool(list(self._single_execute(exec_hook, True)))
def add_descriptor(self, descriptor):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
"""
if self.read_only:
raise ReadOnlyError("Cannot clear a read-only index.")
q = self.UPSERT_TMPL.format(
table_name=self.table_name,
uuid_col=self.uuid_col,
element_col=self.element_col,
)
v = {
'uuid_val': str(descriptor.uuid()),
'element_val': psycopg2.Binary(
cPickle.dumps(descriptor, self.pickle_protocol)
)
}
def exec_hook(cur):
cur.execute(q, v)
list(self._single_execute(exec_hook))
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
if self.read_only:
raise ReadOnlyError("Cannot clear a read-only index.")
q = self.UPSERT_TMPL.format(
table_name=self.table_name,
uuid_col=self.uuid_col,
element_col=self.element_col,
)
# Transform input into
def iter_elements():
for d in descriptors:
yield {
'uuid_val': str(d.uuid()),
'element_val': psycopg2.Binary(
cPickle.dumps(d, self.pickle_protocol)
)
}
def exec_hook(cur, batch):
cur.executemany(q, batch)
self._log.debug("Adding many descriptors")
list(self._batch_execute(iter_elements(), exec_hook))
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
q = self.SELECT_LIKE_TMPL.format(
element_col=self.element_col,
table_name=self.table_name,
uuid_col=self.uuid_col,
)
v = {'uuid_like': str(uuid)}
def eh(c):
c.execute(q, v)
if c.rowcount == 0:
raise KeyError(uuid)
elif c.rowcount != 1:
raise RuntimeError("Found more than one entry for the given "
"uuid '%s' (got: %d)"
% (uuid, c.rowcount))
r = list(self._single_execute(eh, True))
return cPickle.loads(str(r[0][0]))
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement]
"""
q = self.SELECT_MANY_ORDERED_TMPL.format(
table_name=self.table_name,
element_col=self.element_col,
uuid_col=self.uuid_col,
)
# Cache UUIDs received in order so we can check when we miss one in
# order to raise a KeyError.
uuid_order = []
def iterelems():
for uid in uuids:
uuid_order.append(uid)
yield str(uid)
def exec_hook(cur, batch):
v = {'uuid_list': batch}
# self._log.debug('query: %s', cur.mogrify(q, v))
cur.execute(q, v)
self._log.debug("Getting many descriptors")
# The SELECT_MANY_ORDERED_TMPL query ensures that elements returned are
# in the UUID order given to this method. Thus, if the iterated UUIDs
# and iterated return rows do not exactly line up, the query join
# failed to match a query UUID to something in the database.
# - We also check that the number of rows we got back is the same
# as elements yielded, else there were trailing UUIDs that did not
# match anything in the database.
g = self._batch_execute(iterelems(), exec_hook, True)
i = 0
for r, expected_uuid in itertools.izip(g, uuid_order):
d = cPickle.loads(str(r[0]))
if d.uuid() != expected_uuid:
raise KeyError(expected_uuid)
yield d
i += 1
if len(uuid_order) != i:
# just report the first one that's bad
raise KeyError(uuid_order[i])
def remove_descriptor(self, uuid):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
"""
if self.read_only:
raise ReadOnlyError("Cannot clear a read-only index.")
q = self.DELETE_LIKE_TMPL.format(
table_name=self.table_name,
uuid_col=self.uuid_col,
)
v = {'uuid_like': str(uuid)}
def execute(c):
c.execute(q, v)
# Nothing deleted if rowcount == 0
# (otherwise 1 when deleted a thing)
if c.rowcount == 0:
raise KeyError(uuid)
list(self._single_execute(execute))
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
if self.read_only:
raise ReadOnlyError("Cannot clear a read-only index.")
q = self.DELETE_MANY_TMPL.format(
table_name=self.table_name,
uuid_col=self.uuid_col,
)
str_uuid_set = set(str(uid) for uid in uuids)
v = {'uuid_tuple': tuple(str_uuid_set)}
def execute(c):
c.execute(q, v)
# Check query UUIDs against rows that would actually be deleted.
deleted_uuid_set = set(r[0] for r in c.fetchall())
for uid in str_uuid_set:
if uid not in deleted_uuid_set:
raise KeyError(uid)
list(self._single_execute(execute))
def iterkeys(self):
"""
Return an iterator over indexed descriptor keys, which are their UUIDs.
:rtype: collections.Iterator[collections.Hashable]
"""
# Getting UUID through the element because the UUID might not be a
# string type, and the true type is encoded with the DescriptorElement
# instance.
for d in self.iterdescriptors():
yield d.uuid()
def iterdescriptors(self):
"""
Return an iterator over indexed descriptor element instances.
:rtype: collections.Iterator[smqtk.representation.DescriptorElement]
"""
def execute(c):
c.execute(self.SELECT_TMPL.format(
col=self.element_col,
table_name=self.table_name
))
for r in self._single_execute(execute, True):
d = cPickle.loads(str(r[0]))
yield d
def iteritems(self):
"""
Return an iterator over indexed descriptor key and instance pairs.
:rtype: collections.Iterator[(collections.Hashable,
smqtk.representation.DescriptorElement)]
"""
for d in self.iterdescriptors():
yield d.uuid(), d
| |
# Copyright (c) 2014 The Bitcoin Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-sequencerpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-sequencerpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from sequencerpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting SEQUENCE values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
sequenced_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "sequence.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
sequenced and sequence-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run sequenceds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("SEQUENCED", "sequenced"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
sequenced_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("SEQUENCECLI", "sequence-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_sequenceds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in sequence.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a sequenced and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("SEQUENCED", "sequenced"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
sequenced_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("SEQUENCECLI", "sequence-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple sequenceds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
sequenced_processes[i].wait()
del sequenced_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_sequenceds():
# Wait for all sequenceds to cleanly exit
for sequenced in sequenced_processes.values():
sequenced.wait()
sequenced_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| |
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
from pandas.tests.apply.common import frame_transform_kernels
from pandas.tests.frame.common import zip_frames
def unpack_obj(obj, klass, axis):
"""
Helper to ensure we have the right type of object for a test parametrized
over frame_or_series.
"""
if klass is not DataFrame:
obj = obj["A"]
if axis != 0:
pytest.skip(f"Test is only for DataFrame with axis={axis}")
return obj
def test_transform_ufunc(axis, float_frame, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(obj)
# ufunc
result = obj.transform(np.sqrt, axis=axis)
expected = f_sqrt
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"ops, names",
[
([np.sqrt], ["sqrt"]),
([np.abs, np.sqrt], ["absolute", "sqrt"]),
(np.array([np.sqrt]), ["sqrt"]),
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
],
)
def test_transform_listlike(axis, float_frame, ops, names):
# GH 35964
other_axis = 1 if axis in {0, "index"} else 0
with np.errstate(all="ignore"):
expected = zip_frames([op(float_frame) for op in ops], axis=other_axis)
if axis in {0, "index"}:
expected.columns = MultiIndex.from_product([float_frame.columns, names])
else:
expected.index = MultiIndex.from_product([float_frame.index, names])
result = float_frame.transform(ops, axis=axis)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ops", [[], np.array([])])
def test_transform_empty_listlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("box", [dict, Series])
def test_transform_dictlike(axis, float_frame, box):
# GH 35964
if axis == 0 or axis == "index":
e = float_frame.columns[0]
expected = float_frame[[e]].transform(np.abs)
else:
e = float_frame.index[0]
expected = float_frame.iloc[[0]].transform(np.abs)
result = float_frame.transform(box({e: np.abs}), axis=axis)
tm.assert_frame_equal(result, expected)
def test_transform_dictlike_mixed():
# GH 40018 - mix of lists and non-lists in values of a dictionary
df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]})
result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"})
expected = DataFrame(
[[1.0, 1, 1.0], [2.0, 4, 2.0]],
columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ops",
[
{},
{"A": []},
{"A": [], "B": "cumsum"},
{"A": "cumsum", "B": []},
{"A": [], "B": ["cumsum"]},
{"A": ["cumsum"], "B": []},
],
)
def test_transform_empty_dictlike(float_frame, ops, frame_or_series):
obj = unpack_obj(float_frame, frame_or_series, 0)
with pytest.raises(ValueError, match="No transform functions were provided"):
obj.transform(ops)
@pytest.mark.parametrize("use_apply", [True, False])
def test_transform_udf(axis, float_frame, use_apply, frame_or_series):
# GH 35964
obj = unpack_obj(float_frame, frame_or_series, axis)
# transform uses UDF either via apply or passing the entire DataFrame
def func(x):
# transform is using apply iff x is not a DataFrame
if use_apply == isinstance(x, frame_or_series):
# Force transform to fallback
raise ValueError
return x + 1
result = obj.transform(func, axis=axis)
expected = obj + 1
tm.assert_equal(result, expected)
wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"]
frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail]
@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1])
def test_transform_bad_dtype(op, frame_or_series, request):
# GH 35964
if op == "rank":
request.node.add_marker(
pytest.mark.xfail(
raises=ValueError, reason="GH 40418: rank does not raise a TypeError"
)
)
obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms
if frame_or_series is not DataFrame:
obj = obj["A"]
# tshift is deprecated
warn = None if op != "tshift" else FutureWarning
with tm.assert_produces_warning(warn):
with pytest.raises(TypeError, match="unsupported operand|not supported"):
obj.transform(op)
with pytest.raises(TypeError, match="Transform function failed"):
obj.transform([op])
with pytest.raises(TypeError, match="Transform function failed"):
obj.transform({"A": op})
with pytest.raises(TypeError, match="Transform function failed"):
obj.transform({"A": [op]})
@pytest.mark.parametrize("op", frame_kernels_raise)
def test_transform_partial_failure_typeerror(op):
# GH 35964
if op == "rank":
pytest.skip("GH 40418: rank does not raise a TypeError")
# Using object makes most transform kernels fail
df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]})
expected = df[["B"]].transform([op])
match = r"\['A'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform([op])
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": op})
match = r"\['A'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": op, "B": op})
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": [op]})
match = r"\['A'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": [op], "B": [op]})
tm.assert_equal(result, expected)
expected = df.transform({"A": ["shift"], "B": [op]})
match = rf"\['{op}'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": [op, "shift"], "B": [op]})
tm.assert_equal(result, expected)
def test_transform_partial_failure_valueerror():
# GH 40211
match = ".*did not transform successfully"
def op(x):
if np.sum(np.sum(x)) < 10:
raise ValueError
return x
df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]})
expected = df[["B"]].transform([op])
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform([op])
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": op})
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": op, "B": op})
tm.assert_equal(result, expected)
expected = df[["B"]].transform({"B": [op]})
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": [op], "B": [op]})
tm.assert_equal(result, expected)
expected = df.transform({"A": ["shift"], "B": [op]})
with tm.assert_produces_warning(FutureWarning, match=match):
result = df.transform({"A": [op, "shift"], "B": [op]})
tm.assert_equal(result, expected)
@pytest.mark.parametrize("use_apply", [True, False])
def test_transform_passes_args(use_apply, frame_or_series):
# GH 35964
# transform uses UDF either via apply or passing the entire DataFrame
expected_args = [1, 2]
expected_kwargs = {"c": 3}
def f(x, a, b, c):
# transform is using apply iff x is not a DataFrame
if use_apply == isinstance(x, frame_or_series):
# Force transform to fallback
raise ValueError
assert [a, b] == expected_args
assert c == expected_kwargs["c"]
return x
frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs)
def test_transform_empty_dataframe():
# https://github.com/pandas-dev/pandas/issues/39636
df = DataFrame([], columns=["col1", "col2"])
result = df.transform(lambda x: x + 10)
tm.assert_frame_equal(result, df)
result = df["col1"].transform(lambda x: x + 10)
tm.assert_series_equal(result, df["col1"])
| |
# Copyright (c) 2015, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
from mcrouter.test.mock_servers import DeadServer
from mcrouter.test.mock_servers import SleepServer
def randstring(n):
s = "0123456789abcdef"
ans = ""
for i in range(n):
ans += random.choice(s)
return ans
class TestMcrouterSanity(McrouterTestCase):
config = './mcrouter/test/test_ascii.json'
def setUp(self):
mc_ports = [
11510, 11511, 11512, 11513,
11520, 11521, 11522, 11523,
11530, 11531, 11532, 11533,
11541]
mc_gut_port = 11540
tmo_port = 11555
# have to do these before starting mcrouter
self.mcs = [self.add_server(Memcached(), logical_port=port)
for port in mc_ports]
self.mc_gut = self.add_server(Memcached(), logical_port=mc_gut_port)
self.mcs.append(self.mc_gut)
self.add_server(SleepServer(), logical_port=tmo_port)
for port in [65532, 65522]:
self.add_server(DeadServer(), logical_port=port)
self.mcrouter = self.add_mcrouter(self.config)
def data(self, n):
""" generate n random (key, value) pairs """
prefixes = ['foo:', 'bar:', 'baz:', 'wc:', 'lat:']
keys = [random.choice(prefixes) + randstring(random.randint(3, 10))
for i in range(n)]
keys = list(set(keys))
vals = [randstring(random.randint(3, 10)) for i in range(len(keys))]
return zip(keys, vals)
def test_basic(self):
""" basic test that we get back what we put in """
for k, v in self.data(100):
self.mcrouter.set(k, v)
got = self.mcrouter.get(k)
self.assertEqual(v, got)
def test_getset(self):
"""
This test extends on the idea of test_basic() by doing a bunch more
in mcrouter
"""
data = self.data(5000)
for k, v in data:
self.mcrouter.set(k, v)
got = self.mcrouter.get(k)
self.assertEqual(v, got)
def test_settouch(self):
"""
This test extends on test_basic by doing a bunch more in mcrouter
"""
data = self.data(5000)
for k, v in data:
self.mcrouter.set(k, v)
self.assertEqual(self.mcrouter.touch(k, 100), "TOUCHED")
got = self.mcrouter.get(k)
self.assertEqual(v, got)
def test_append_prepend(self):
"""
This tests basic correctness of append/prepend.
"""
k = "key"
v = "value"
suffix = "suffix"
prefix = "prefix"
# Non-existent key
self.assertEqual(self.mcrouter.append(k, v), "NOT_STORED")
self.assertEqual(self.mcrouter.prepend(k, v), "NOT_STORED")
# Successful append/prepend
self.mcrouter.set(k, v)
self.assertEqual(self.mcrouter.get(k), v)
self.assertEqual(self.mcrouter.append(k, suffix), "STORED")
self.assertEqual(self.mcrouter.prepend(k, prefix), "STORED")
self.assertEqual(self.mcrouter.get(k), prefix + v + suffix)
def test_ops(self):
mcr = self.mcrouter
n = 100
data = self.data(n)
for k, v in data:
# set
self.assertTrue(mcr.set(k, v))
self.assertEqual(mcr.get(k), v)
# multiget
for i in range(2, n, 5):
res = mcr.get([k for k, v in data[0:i]])
self.assertEqual(len(res), i)
for k, v in data[0:i]:
self.assertEqual(mcr.get(k), v)
for k, v in data:
# add, replace
v2 = randstring(random.randint(3, 10))
self.assertFalse(mcr.add(k, v2))
self.assertTrue(mcr.replace(k, v2))
self.assertEqual(mcr.get(k), v2)
# delete
mcr.delete(k)
self.assertIsNone(mcr.get(k))
# add, replace
self.assertFalse(mcr.replace(k, v))
self.assertTrue(mcr.add(k, v))
self.assertEqual(mcr.get(k), v)
# append, prepend
self.assertEqual(mcr.append(k, v2), "STORED")
self.assertEqual(mcr.get(k), v + v2)
self.assertEqual(mcr.prepend(k, v2), "STORED")
self.assertEqual(mcr.get(k), v2 + v + v2)
# arith
i = 42
mcr.set(k, i)
self.assertEqual(mcr.incr(k), i + 1)
self.assertEqual(mcr.get(k), str(i + 1))
self.assertEqual(mcr.decr(k), i)
self.assertEqual(mcr.get(k), str(i))
self.assertEqual(mcr.incr(k, 4), i + 4)
self.assertEqual(mcr.get(k), str(i + 4))
self.assertEqual(mcr.decr(k, 4), i)
self.assertEqual(mcr.get(k), str(i))
def test_metaroute(self):
""" get the route and verify if we actually use it """
mcr = self.mcrouter
mcds = dict([(mcd.addr[1], mcd) for mcd in self.mcs])
for k, v in self.data(100):
d2 = mcr.get("__mcrouter__.route(set,%s)" % k).split("\r\n")[0]
p2 = int(d2.split(":")[1]) # host:port:protocol
# verify that we actually use that route
mcr.set(k, v)
got = mcds[p2].get(k)
self.assertEqual(got, v)
def test_metaconfig(self):
""" test __mcrouter__.config_file """
mcr = self.mcrouter
self.assertTrue(mcr.get("__mcrouter__.config_file"))
self.assertTrue(mcr.get("__mcrouter__.config_md5_digest"))
def test_down(self):
"""
Test responses for down server. Ideally we'd verify that we're retrying
when we should be, but no way to do that with black box, really. Maybe
when some kind of stats are implemented it might expose that.
"""
mcr = self.mcrouter
k = 'down:foo'
# get => None (NOT_FOUND)
self.assertIsNone(mcr.get(k))
# (set,append,prepend) => SERVER_ERRROR
self.assertIsNone(mcr.set(k, 'abc'))
self.assertEqual(mcr.append(k, 'abc'), "SERVER_ERROR")
self.assertEqual(mcr.prepend(k, 'abc'), "SERVER_ERROR")
# (delete,incr,decr,touch) => NOT_FOUND
self.assertIsNone(mcr.delete(k))
self.assertEqual(mcr.touch(k, 100), "NOT_FOUND")
self.assertIsNone(mcr.incr(k))
self.assertIsNone(mcr.decr(k))
def test_failover(self):
"""
The server in failover pool is not up, so it should failover
to wildcard.
The server in the tmo pool does not respond to any pings, so check
to make sure it fails over to wildcard.
"""
mcr = self.mcrouter
mcd_gut = self.mc_gut
# request should time out
t1 = time.time()
self.assertTrue(mcr.set("tmo:tko", "should time out"))
self.assertGreater(time.time() - 0.5, t1)
s = {}
s['failover:'] = 100
s['tmo:'] = 5
for key, mx in s.iteritems():
for i in range(1, mx):
k = key + str(i)
v = randstring(random.randint(3, 10))
self.assertTrue(mcr.set(k, v))
self.assertEqual(mcr.get(k), v)
self.assertEqual(mcd_gut.get(k), v)
for i in range(1, mx):
k = key + str(i)
# delete failover is not enabled by default
self.assertFalse(mcr.delete(k))
# The sets being failed over should have a max expiration time
# set of a few seconds for failover. The tmo pool should not
# have an expiration time.
k = "failover:expires"
v = "failover:expires_value"
self.assertTrue(mcr.set(k, v))
time.sleep(4)
self.assertIsNone(mcd_gut.get(k))
k = "tmo:does_not_expire"
v = "tmo:does_not_expire_value"
self.assertTrue(mcr.set(k, v))
time.sleep(4)
self.assertEqual(mcd_gut.get(k), v)
# Test the data miss path by setting the key in
# the gutter box and reading through mcrouter
# Then delete through mcrouter and check
# gutter to ensure it has been deleted
key = 'datamiss:'
for i in range(1, 100):
k = key + str(i)
v = randstring(random.randint(3, 10))
self.assertIsNone(mcr.get(k))
self.assertTrue(mcd_gut.set(k, v))
self.assertEqual(mcr.get(k), v)
self.assertTrue(mcd_gut.delete(k))
self.assertIsNone(mcr.get(k))
self.assertTrue(mcd_gut.set(k, v))
self.assertEqual(mcr.get(k), v)
self.assertTrue(mcr.delete(k))
self.assertIsNone(mcd_gut.get(k))
def test_version(self):
v = self.mcrouter.version()
self.assertTrue(v.startswith('VERSION mcrouter'))
def test_server_stats(self):
stats = self.mcrouter.stats('servers')
num_stats = 0
for stat_key, stat_value in stats.iteritems():
key_parts = stat_key.split(':')
self.assertEqual(4, len(key_parts)) # IP:port:transport:protocol
num_stats += 1
value_parts = stat_value.split(' ')
self.assertEqual(value_parts[0], 'avg_latency_us:0.000')
self.assertEqual(value_parts[1], 'pending_reqs:0')
self.assertEqual(value_parts[2], 'inflight_reqs:0')
# Not sure if there is an easy way to automate this
# Now that we have proxy destination - no of distinct servers
self.assertEqual(17, num_stats)
def test_bad_commands(self):
m = self.mcrouter
exp = "SERVER_ERROR Command not supported\r\n"
bad_commands = [
'flush_regex .*\r\n',
]
for bc in bad_commands:
self.assertEqual(m.issue_command(bc), exp)
def test_bad_key(self):
m = self.mcrouter
bad_key = 'foo:' + ('a' * 260)
try:
m.set(bad_key, bad_key)
assert False, "Expected exception"
except:
pass
try:
m.get(bad_key)
assert False, "Expected exception"
except:
pass
self.assertEqual(m.append(bad_key, bad_key), "CLIENT_ERROR")
self.assertEqual(m.prepend(bad_key, bad_key), "CLIENT_ERROR")
self.assertEqual(m.touch(bad_key, 100), "CLIENT_ERROR")
def test_bad_stats(self):
m = self.mcrouter
bad_stat_cmd = 'stats abcde\r\n'
expected_resp = 'CLIENT_ERROR bad stats command\r\n'
self.assertEqual(m.issue_command(bad_stat_cmd), expected_resp)
def test_server_error_message(self):
# Test involes trying to get a key that triggers a server error
m = self.mcrouter
exp = b"SERVER_ERROR returned error msg with binary data \xdd\xab\r\n"
bad_command = 'set __mockmc__.trigger_server_error 0 0 1\r\n0\r\n'
self.assertEqual(m.issue_command(bad_command), exp)
def test_reject_policy(self):
# Test the reject policy
m = self.mcrouter
exp = "SERVER_ERROR reject\r\n"
bad_command = 'set rej:foo 0 0 3\r\nrej\r\n'
self.assertEqual(m.issue_command(bad_command), exp)
class TestMcrouterSanityOverUmbrella(TestMcrouterSanity):
config = './mcrouter/test/test_umbrella.json'
| |
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv2 import Fmat_original_hshv
from data_variable_hslv2 import Fmat_original_hslv
from data_variable_lshv2 import Fmat_original_lshv
from data_variable_lslv2 import Fmat_original_lslv
# Scaling function
def scaling(mat):
Fvec_a = mat[0:81,0:]
Fvec_b = mat[81:162,0:]
Fvec_c = mat[162:243,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((20,1))
mu_2 = np.zeros((20,1))
cov = np.zeros((20,2,2))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
# Scaling wrt all data
Fmat_rf_hshv = scaling(Fmat_original_hshv[:,0:15])
Fmat_rm_hshv = Fmat_original_hshv[:,15:15]
Fmat_sf_hshv = scaling(Fmat_original_hshv[:,15:26])
Fmat_sm_hshv = scaling(Fmat_original_hshv[:,26:33])
Fmat_hshv = np.matrix(np.column_stack((Fmat_rf_hshv,Fmat_rm_hshv,Fmat_sf_hshv,Fmat_sm_hshv)))
Fmat_rf_hslv = scaling(Fmat_original_hslv[:,0:15])
Fmat_rm_hslv = scaling(Fmat_original_hslv[:,15:30])
Fmat_sf_hslv = scaling(Fmat_original_hslv[:,30:45])
Fmat_sm_hslv = scaling(Fmat_original_hslv[:,45:56])
Fmat_hslv = np.matrix(np.column_stack((Fmat_rf_hslv,Fmat_rm_hslv,Fmat_sf_hslv,Fmat_sm_hslv)))
Fmat_rf_lshv = scaling(Fmat_original_lshv[:,0:15])
Fmat_rm_lshv = scaling(Fmat_original_lshv[:,15:16])
Fmat_sf_lshv = scaling(Fmat_original_lshv[:,16:23])
Fmat_sm_lshv = scaling(Fmat_original_lshv[:,23:32])
Fmat_lshv = np.matrix(np.column_stack((Fmat_rf_lshv,Fmat_rm_lshv,Fmat_sf_lshv,Fmat_sm_lshv)))
Fmat_rf_lslv = scaling(Fmat_original_lslv[:,0:15])
Fmat_rm_lslv = scaling(Fmat_original_lslv[:,15:28])
Fmat_sf_lslv = scaling(Fmat_original_lslv[:,28:37])
Fmat_sm_lslv = scaling(Fmat_original_lslv[:,37:45])
Fmat_lslv = np.matrix(np.column_stack((Fmat_rf_lslv,Fmat_rm_lslv,Fmat_sf_lslv,Fmat_sm_lslv)))
Fmat = np.matrix(np.column_stack((Fmat_hshv,Fmat_hslv,Fmat_lshv,Fmat_lslv)))
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.05] * 20
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_force_hshv,mu_rf_motion_hshv,cov_rf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))))
mu_rm_force_hshv,mu_rm_motion_hshv,cov_rm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))))
mu_sf_force_hshv,mu_sf_motion_hshv,cov_sf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))))
mu_sm_force_hshv,mu_sm_motion_hshv,cov_sm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))), (np.matrix(np.column_stack((Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = [0.0]*20
B_rm_hshv = [0.0]*20
B_sf_hshv = [0.0]*20
B_sm_hshv = [0.0]*20
for num_states in range(20):
B_rf_hshv[num_states] = [[mu_rf_force_hshv[num_states][0],mu_rf_motion_hshv[num_states][0]],[cov_rf_hshv[num_states][0][0],cov_rf_hshv[num_states][0][1],cov_rf_hshv[num_states][1][0],cov_rf_hshv[num_states][1][1]]]
B_rm_hshv[num_states] = [[mu_rm_force_hshv[num_states][0],mu_rm_motion_hshv[num_states][0]],[cov_rm_hshv[num_states][0][0],cov_rm_hshv[num_states][0][1],cov_rm_hshv[num_states][1][0],cov_rm_hshv[num_states][1][1]]]
B_sf_hshv[num_states] = [[mu_sf_force_hshv[num_states][0],mu_sf_motion_hshv[num_states][0]],[cov_sf_hshv[num_states][0][0],cov_sf_hshv[num_states][0][1],cov_sf_hshv[num_states][1][0],cov_sf_hshv[num_states][1][1]]]
B_sm_hshv[num_states] = [[mu_sm_force_hshv[num_states][0],mu_sm_motion_hshv[num_states][0]],[cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]]]
print cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]
print "----"
#print B_sm_hshv
#print mu_sm_motion_hshv
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))
total_seq_rm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))
total_seq_sf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))
total_seq_sm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))
total_seq_rf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))
total_seq_rm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))
total_seq_sf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))
total_seq_sm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))
total_seq_rf_hshv = np.zeros((162,45))
total_seq_rm_hshv = np.zeros((162,29))
total_seq_sf_hshv = np.zeros((162,31))
total_seq_sm_hshv = np.zeros((162,28))
i = 0
j = 0
while i < 162:
total_seq_rf_hshv[i] = total_seq_rf_force_hshv[j]
total_seq_rf_hshv[i+1] = total_seq_rf_motion_hshv[j]
total_seq_rm_hshv[i] = total_seq_rm_force_hshv[j]
total_seq_rm_hshv[i+1] = total_seq_rm_motion_hshv[j]
total_seq_sf_hshv[i] = total_seq_sf_force_hshv[j]
total_seq_sf_hshv[i+1] = total_seq_sf_motion_hshv[j]
total_seq_sm_hshv[i] = total_seq_sm_force_hshv[j]
total_seq_sm_hshv[i+1] = total_seq_sm_motion_hshv[j]
j=j+1
i=i+2
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = np.zeros((162,33))
total_seq_obj_force_hshv = Fmat_hshv[0:81,:]
total_seq_obj_motion_hshv = Fmat_hshv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_hshv[i] = total_seq_obj_force_hshv[j]
total_seq_obj_hshv[i+1] = total_seq_obj_motion_hshv[j]
j=j+1
i=i+2
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[:,k]).T).tolist()
new_test_seq_obj_hshv = np.array(test_seq_obj_hshv)
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,15:26])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,26:33])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,15:26])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,26:33])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,15:26])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,26:33])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,15:26])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,26:33])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_force_hslv,mu_rf_motion_hslv,cov_rf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))))
mu_rm_force_hslv,mu_rm_motion_hslv,cov_rm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))))
mu_sf_force_hslv,mu_sf_motion_hslv,cov_sf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))))
mu_sm_force_hslv,mu_sm_motion_hslv,cov_sm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = [0.0]*20
B_rm_hslv = [0.0]*20
B_sf_hslv = [0.0]*20
B_sm_hslv = [0.0]*20
for num_states in range(20):
B_rf_hslv[num_states] = [[mu_rf_force_hslv[num_states][0],mu_rf_motion_hslv[num_states][0]],[cov_rf_hslv[num_states][0][0],cov_rf_hslv[num_states][0][1],cov_rf_hslv[num_states][1][0],cov_rf_hslv[num_states][1][1]]]
B_rm_hslv[num_states] = [[mu_rm_force_hslv[num_states][0],mu_rm_motion_hslv[num_states][0]],[cov_rm_hslv[num_states][0][0],cov_rm_hslv[num_states][0][1],cov_rm_hslv[num_states][1][0],cov_rm_hslv[num_states][1][1]]]
B_sf_hslv[num_states] = [[mu_sf_force_hslv[num_states][0],mu_sf_motion_hslv[num_states][0]],[cov_sf_hslv[num_states][0][0],cov_sf_hslv[num_states][0][1],cov_sf_hslv[num_states][1][0],cov_sf_hslv[num_states][1][1]]]
B_sm_hslv[num_states] = [[mu_sm_force_hslv[num_states][0],mu_sm_motion_hslv[num_states][0]],[cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]]]
print cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]
print "----"
#print B_sm_hslv
#print mu_sm_motion_hslv
# generate RF, RM, SF, SM models from parameters
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_lshv[0:81,0:15], Fmat_lslv[0:81,0:15])))
total_seq_rm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_lshv[0:81,15:16], Fmat_lslv[0:81,15:28])))
total_seq_sf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_lshv[0:81,16:23], Fmat_lslv[0:81,28:37])))
total_seq_sm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_lshv[0:81,23:32], Fmat_lslv[0:81,37:45])))
total_seq_rf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_lshv[162:243,0:15], Fmat_lslv[162:243,0:15])))
total_seq_rm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_lshv[162:243,15:16], Fmat_lslv[162:243,15:28])))
total_seq_sf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_lshv[162:243,16:23], Fmat_lslv[162:243,28:37])))
total_seq_sm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_lshv[162:243,23:32], Fmat_lslv[162:243,37:45])))
total_seq_rf_hslv = np.zeros((162,45))
total_seq_rm_hslv = np.zeros((162,14))
total_seq_sf_hslv = np.zeros((162,27))
total_seq_sm_hslv = np.zeros((162,24))
i = 0
j = 0
while i < 162:
total_seq_rf_hslv[i] = total_seq_rf_force_hslv[j]
total_seq_rf_hslv[i+1] = total_seq_rf_motion_hslv[j]
total_seq_rm_hslv[i] = total_seq_rm_force_hslv[j]
total_seq_rm_hslv[i+1] = total_seq_rm_motion_hslv[j]
total_seq_sf_hslv[i] = total_seq_sf_force_hslv[j]
total_seq_sf_hslv[i+1] = total_seq_sf_motion_hslv[j]
total_seq_sm_hslv[i] = total_seq_sm_force_hslv[j]
total_seq_sm_hslv[i+1] = total_seq_sm_motion_hslv[j]
j=j+1
i=i+2
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = np.zeros((162,56))
total_seq_obj_force_hslv = Fmat_hslv[0:81,:]
total_seq_obj_motion_hslv = Fmat_hslv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_hslv[i] = total_seq_obj_force_hslv[j]
total_seq_obj_hslv[i+1] = total_seq_obj_motion_hslv[j]
j=j+1
i=i+2
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[:,k]).T).tolist()
new_test_seq_obj_hslv = np.array(test_seq_obj_hslv)
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:56])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:56])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:56])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:56])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_force_lshv,mu_rf_motion_lshv,cov_rf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lslv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lslv[162:243,0:15])))))
mu_rm_force_lshv,mu_rm_motion_lshv,cov_rm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lslv[0:81,15:28])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lslv[162:243,15:28])))))
mu_sf_force_lshv,mu_sf_motion_lshv,cov_sf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lslv[0:81,28:37])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lslv[162:243,28:37])))))
mu_sm_force_lshv,mu_sm_motion_lshv,cov_sm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lslv[0:81,37:45])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lslv[162:243,37:45])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = [0.0]*20
B_rm_lshv = [0.0]*20
B_sf_lshv = [0.0]*20
B_sm_lshv = [0.0]*20
for num_states in range(20):
B_rf_lshv[num_states] = [[mu_rf_force_lshv[num_states][0],mu_rf_motion_lshv[num_states][0]],[cov_rf_lshv[num_states][0][0],cov_rf_lshv[num_states][0][1],cov_rf_lshv[num_states][1][0],cov_rf_lshv[num_states][1][1]]]
B_rm_lshv[num_states] = [[mu_rm_force_lshv[num_states][0],mu_rm_motion_lshv[num_states][0]],[cov_rm_lshv[num_states][0][0],cov_rm_lshv[num_states][0][1],cov_rm_lshv[num_states][1][0],cov_rm_lshv[num_states][1][1]]]
B_sf_lshv[num_states] = [[mu_sf_force_lshv[num_states][0],mu_sf_motion_lshv[num_states][0]],[cov_sf_lshv[num_states][0][0],cov_sf_lshv[num_states][0][1],cov_sf_lshv[num_states][1][0],cov_sf_lshv[num_states][1][1]]]
B_sm_lshv[num_states] = [[mu_sm_force_lshv[num_states][0],mu_sm_motion_lshv[num_states][0]],[cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]]]
print cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]
print "----"
#print B_sm_lshv
#print mu_sm_motion_lshv
# generate RF, RM, SF, SM models from parameters
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lslv[0:81,0:15])))
total_seq_rm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lslv[0:81,15:28])))
total_seq_sf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lslv[0:81,28:37])))
total_seq_sm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lslv[0:81,37:45])))
total_seq_rf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lslv[162:243,0:15])))
total_seq_rm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lslv[162:243,15:28])))
total_seq_sf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lslv[162:243,28:37])))
total_seq_sm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lslv[162:243,37:45])))
total_seq_rf_lshv = np.zeros((162,45))
total_seq_rm_lshv = np.zeros((162,28))
total_seq_sf_lshv = np.zeros((162,35))
total_seq_sm_lshv = np.zeros((162,26))
i = 0
j = 0
while i < 162:
total_seq_rf_lshv[i] = total_seq_rf_force_lshv[j]
total_seq_rf_lshv[i+1] = total_seq_rf_motion_lshv[j]
total_seq_rm_lshv[i] = total_seq_rm_force_lshv[j]
total_seq_rm_lshv[i+1] = total_seq_rm_motion_lshv[j]
total_seq_sf_lshv[i] = total_seq_sf_force_lshv[j]
total_seq_sf_lshv[i+1] = total_seq_sf_motion_lshv[j]
total_seq_sm_lshv[i] = total_seq_sm_force_lshv[j]
total_seq_sm_lshv[i+1] = total_seq_sm_motion_lshv[j]
j=j+1
i=i+2
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = np.zeros((162,32))
total_seq_obj_force_lshv = Fmat_lshv[0:81,:]
total_seq_obj_motion_lshv = Fmat_lshv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_lshv[i] = total_seq_obj_force_lshv[j]
total_seq_obj_lshv[i+1] = total_seq_obj_motion_lshv[j]
j=j+1
i=i+2
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[:,k]).T).tolist()
new_test_seq_obj_lshv = np.array(test_seq_obj_lshv)
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:23])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,23:32])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:23])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,23:32])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:23])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,23:32])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:23])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,23:32])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_force_lslv,mu_rf_motion_lslv,cov_rf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15])))))
mu_rm_force_lslv,mu_rm_motion_lslv,cov_rm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16])))))
mu_sf_force_lslv,mu_sf_motion_lslv,cov_sf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23])))))
mu_sm_force_lslv,mu_sm_motion_lslv,cov_sm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32])))), (np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = [0.0]*20
B_rm_lslv = [0.0]*20
B_sf_lslv = [0.0]*20
B_sm_lslv = [0.0]*20
for num_states in range(20):
B_rf_lslv[num_states] = [[mu_rf_force_lslv[num_states][0],mu_rf_motion_lslv[num_states][0]],[cov_rf_lslv[num_states][0][0],cov_rf_lslv[num_states][0][1],cov_rf_lslv[num_states][1][0],cov_rf_lslv[num_states][1][1]]]
B_rm_lslv[num_states] = [[mu_rm_force_lslv[num_states][0],mu_rm_motion_lslv[num_states][0]],[cov_rm_lslv[num_states][0][0],cov_rm_lslv[num_states][0][1],cov_rm_lslv[num_states][1][0],cov_rm_lslv[num_states][1][1]]]
B_sf_lslv[num_states] = [[mu_sf_force_lslv[num_states][0],mu_sf_motion_lslv[num_states][0]],[cov_sf_lslv[num_states][0][0],cov_sf_lslv[num_states][0][1],cov_sf_lslv[num_states][1][0],cov_sf_lslv[num_states][1][1]]]
B_sm_lslv[num_states] = [[mu_sm_force_lslv[num_states][0],mu_sm_motion_lslv[num_states][0]],[cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]]]
print cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]
print "----"
#print B_sm_lslv
#print mu_sm_motion_lslv
# generate RF, RM, SF, SM models from parameters
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,0:15], Fmat_hslv[0:81,0:15], Fmat_lshv[0:81,0:15])))
total_seq_rm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:15], Fmat_hslv[0:81,15:30], Fmat_lshv[0:81,15:16])))
total_seq_sf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,15:26], Fmat_hslv[0:81,30:45], Fmat_lshv[0:81,16:23])))
total_seq_sm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:81,26:33], Fmat_hslv[0:81,45:56], Fmat_lshv[0:81,23:32])))
total_seq_rf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,0:15], Fmat_hslv[162:243,0:15], Fmat_lshv[162:243,0:15])))
total_seq_rm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:15], Fmat_hslv[162:243,15:30], Fmat_lshv[162:243,15:16])))
total_seq_sf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,15:26], Fmat_hslv[162:243,30:45], Fmat_lshv[162:243,16:23])))
total_seq_sm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[162:243,26:33], Fmat_hslv[162:243,45:56], Fmat_lshv[162:243,23:32])))
total_seq_rf_lslv = np.zeros((162,45))
total_seq_rm_lslv = np.zeros((162,16))
total_seq_sf_lslv = np.zeros((162,33))
total_seq_sm_lslv = np.zeros((162,27))
i = 0
j = 0
while i < 162:
total_seq_rf_lslv[i] = total_seq_rf_force_lslv[j]
total_seq_rf_lslv[i+1] = total_seq_rf_motion_lslv[j]
total_seq_rm_lslv[i] = total_seq_rm_force_lslv[j]
total_seq_rm_lslv[i+1] = total_seq_rm_motion_lslv[j]
total_seq_sf_lslv[i] = total_seq_sf_force_lslv[j]
total_seq_sf_lslv[i+1] = total_seq_sf_motion_lslv[j]
total_seq_sm_lslv[i] = total_seq_sm_force_lslv[j]
total_seq_sm_lslv[i+1] = total_seq_sm_motion_lslv[j]
j=j+1
i=i+2
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = np.zeros((162,45))
total_seq_obj_force_lslv = Fmat_lslv[0:81,:]
total_seq_obj_motion_lslv = Fmat_lslv[162:243,:]
i = 0
j = 0
while i < 162:
total_seq_obj_lslv[i] = total_seq_obj_force_lslv[j]
total_seq_obj_lslv[i+1] = total_seq_obj_motion_lslv[j]
j=j+1
i=i+2
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[:,k]).T).tolist()
new_test_seq_obj_lslv = np.array(test_seq_obj_lslv)
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
# Find Viterbi Path
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:37])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,37:45])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:37])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,37:45])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:37])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,37:45])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:37])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,37:45])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_motion_20_states.png')
pp.show()
| |
# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for filter.py."""
import unittest
from webkitpy.style.filter import _CategoryFilter as CategoryFilter
from webkitpy.style.filter import validate_filter_rules
from webkitpy.style.filter import FilterConfiguration
# On Testing __eq__() and __ne__():
#
# In the tests below, we deliberately do not use assertEqual() or
# assertNotEquals() to test __eq__() or __ne__(). We do this to be
# very explicit about what we are testing, especially in the case
# of assertNotEquals().
#
# Part of the reason is that it is not immediately clear what
# expression the unittest module uses to assert "not equals" -- the
# negation of __eq__() or __ne__(), which are not necessarily
# equivalent expressions in Python. For example, from Python's "Data
# Model" documentation--
#
# "There are no implied relationships among the comparison
# operators. The truth of x==y does not imply that x!=y is
# false. Accordingly, when defining __eq__(), one should
# also define __ne__() so that the operators will behave as
# expected."
#
# (from http://docs.python.org/reference/datamodel.html#object.__ne__ )
class ValidateFilterRulesTest(unittest.TestCase):
"""Tests validate_filter_rules() function."""
def test_validate_filter_rules(self):
all_categories = ["tabs", "whitespace", "build/include"]
bad_rules = [
"tabs",
"*tabs",
" tabs",
" +tabs",
"+whitespace/newline",
"+xxx",
]
good_rules = [
"+tabs",
"-tabs",
"+build"
]
for rule in bad_rules:
self.assertRaises(ValueError, validate_filter_rules,
[rule], all_categories)
for rule in good_rules:
# This works: no error.
validate_filter_rules([rule], all_categories)
class CategoryFilterTest(unittest.TestCase):
"""Tests CategoryFilter class."""
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
filter = CategoryFilter(["+"])
self.assertEqual(["+"], filter._filter_rules)
def test_init_default_arguments(self):
"""Test __init__ method default arguments."""
filter = CategoryFilter()
self.assertEqual([], filter._filter_rules)
def test_str(self):
"""Test __str__ "to string" operator."""
filter = CategoryFilter(["+a", "-b"])
self.assertEqual(str(filter), "+a,-b")
def test_eq(self):
"""Test __eq__ equality function."""
filter1 = CategoryFilter(["+a", "+b"])
filter2 = CategoryFilter(["+a", "+b"])
filter3 = CategoryFilter(["+b", "+a"])
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(filter1.__eq__(filter2))
self.assertFalse(filter1.__eq__(filter3))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(CategoryFilter().__ne__(CategoryFilter()))
def test_should_check(self):
"""Test should_check() method."""
filter = CategoryFilter()
self.assertTrue(filter.should_check("everything"))
# Check a second time to exercise cache.
self.assertTrue(filter.should_check("everything"))
filter = CategoryFilter(["-"])
self.assertFalse(filter.should_check("anything"))
# Check a second time to exercise cache.
self.assertFalse(filter.should_check("anything"))
filter = CategoryFilter(["-", "+ab"])
self.assertTrue(filter.should_check("abc"))
self.assertFalse(filter.should_check("a"))
filter = CategoryFilter(["+", "-ab"])
self.assertFalse(filter.should_check("abc"))
self.assertTrue(filter.should_check("a"))
class FilterConfigurationTest(unittest.TestCase):
"""Tests FilterConfiguration class."""
def _config(self, base_rules, path_specific, user_rules):
"""Return a FilterConfiguration instance."""
return FilterConfiguration(base_rules=base_rules,
path_specific=path_specific,
user_rules=user_rules)
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
# We use parameter values that are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
config = self._config(base_rules, path_specific, user_rules)
self.assertEqual(base_rules, config._base_rules)
self.assertEqual(path_specific, config._path_specific)
self.assertEqual(user_rules, config._user_rules)
def test_default_arguments(self):
# Test that the attributes are getting set correctly to the defaults.
config = FilterConfiguration()
self.assertEqual([], config._base_rules)
self.assertEqual([], config._path_specific)
self.assertEqual([], config._user_rules)
def test_eq(self):
"""Test __eq__ method."""
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(FilterConfiguration().__eq__(FilterConfiguration()))
# Verify that a difference in any argument causes equality to fail.
config = FilterConfiguration()
# These parameter values are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
self.assertFalse(config.__eq__(FilterConfiguration(
base_rules=base_rules)))
self.assertFalse(config.__eq__(FilterConfiguration(
path_specific=path_specific)))
self.assertFalse(config.__eq__(FilterConfiguration(
user_rules=user_rules)))
def test_ne(self):
"""Test __ne__ method."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(FilterConfiguration().__ne__(FilterConfiguration()))
def test_base_rules(self):
"""Test effect of base_rules on should_check()."""
base_rules = ["-b"]
path_specific = []
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertTrue(config.should_check("a", "path"))
self.assertFalse(config.should_check("b", "path"))
def test_path_specific(self):
"""Test effect of path_rules_specifier on should_check()."""
base_rules = ["-"]
path_specific = [(["path1"], ["+b"]),
(["path2"], ["+c"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("c", "path1"))
self.assertTrue(config.should_check("c", "path2"))
# Test that first match takes precedence.
self.assertFalse(config.should_check("c", "path2/path1"))
def test_path_with_different_case(self):
"""Test a path that differs only in case."""
base_rules = ["-"]
path_specific = [(["Foo/"], ["+whitespace"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("whitespace", "Fooo/bar.txt"))
self.assertTrue(config.should_check("whitespace", "Foo/bar.txt"))
# Test different case.
self.assertTrue(config.should_check("whitespace", "FOO/bar.txt"))
def test_user_rules(self):
"""Test effect of user_rules on should_check()."""
base_rules = ["-"]
path_specific = []
user_rules = ["+b"]
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("a", "path"))
self.assertTrue(config.should_check("b", "path"))
| |
from heapy.util import *
class pqueue_min:
"""
Maintains a heap and an index mapping items to position
in the heap (for quickly modifying item values)
min - O(log(n))
insert - O(log(n)) (average: O(1))
update - O(log(n)) (average: O(1))
Useful for:
- Dijkstra
- Rolling Median
- A*
"""
def __init__(self, l=None):
self._index = {}
if l == None:
self._l = []
else:
self._l = build_heap(l, self._index)
# remove and return the min
def pop(self, n=None):
"""
Remove and return the min tuple, or the tuple for the specified key.
Returns
tuple (key, weight)
"""
if len(self._l) == 0 : return None
if n == None: # default get's the min
i = 0
else: # if an item is supplied, remove it
i = self._index[n]
m = self._l[i]
del self._index[m[0]]
if len(self._l) > 1:
n = self._l.pop()
self._l[i] = n
self._index[n[0]] = i
down_heapify(self._l, i, self._index) #_siftdown
else:
self._l.pop()
return m
# add an element
def push(self, t):
"""
Add an element as a key weight pair.
t - tuple (key, weight)
"""
# existing element?
if t[0] in self._index:
return self._update(t)
self._l.append(t)
i = len(self._l) - 1
self._index[t[0]] = i
up_heapify(self._l, i, self._index) #_siftup
# return the min (without removal)
def peek(self):
"""Return the minimum (as a tuple), without removing it."""
if len(self._l) == 0: return None
return self._l[0]
def remove(self, n):
"""Remove the element with the specified key, and return it's tuple."""
return self.pop(n)
def _update(self, t):
n = t[0]
w = t[1]
i = self._index[n]
t0 = self._l[i]
w0 = t0[1]
if w0 == w: return
if w < w0:
self._l[i] = t
up_heapify(self._l, i, self._index)
else:
self._l[i] = t
down_heapify(self._l, i, self._index)
# comtianer methods
def __len__(self):
return len(self._l)
def __contains__(self, item):
return item in self._index
def __getitem__(self, key):
return self._l[self._index[key]][1]
def __setitem__(self, key, value):
self.push((key, value))
class pqueue_max:
"""
Maintains a heap and an index mapping items to position
in the heap (for quickly modifying item values)
min - O(log(n))
insert - O(log(n)) (average: O(1))
update - O(log(n)) (average: O(1))
Useful for:
- Dijkstra
- Rolling Median
- A*
"""
def __init__(self, l=None):
self._index = {}
if l == None:
self._l = []
else:
self._l = build_heap_max(l, self._index)
# remove and return the min
def pop(self, t=None):
"""
Returns
tuple (item, weight)
"""
if len(self._l) == 0 : return None
if t == None: # default get's the min
i = 0
else: # if an item is supplied, remove it
i = self._index[t[0]]
m = self._l[i]
del self._index[m[0]]
if len(self._l) > 1:
n = self._l.pop()
self._l[i] = n
self._index[n[0]] = i
down_heapify_max(self._l, i, self._index) #_siftdown
else:
self._l.pop()
return m
# add an element
def push(self, t):
"""
t - tuple (item, weight)
"""
# existing element?
if t[0] in self._index:
return self._update(t)
self._l.append(t)
i = len(self._l) - 1
self._index[t[0]] = i
up_heapify_max(self._l, i, self._index) #_siftup
# return the min (without removal)
def peek(self):
if len(self._l) == 0: return None
return self._l[0]
def _update(self, t):
n = t[0]
w = t[1]
i = self._index[n]
t0 = self._l[i]
w0 = t0[1]
if w0 == w: return
if w < w0:
self._l[i] = t
up_heapify_max(self._l, i, self._index)
else:
self._l[i] = t
down_heapify_max(self._l, i, self._index)
# comtianer methods
def __len__(self):
return len(self._l)
def __contains__(self, item):
return item in self._index
def __getitem__(self, key):
return self._l[self._index[key]][1]
def __setitem__(self, key, value):
self.push((key, value))
| |
#!/usr/bin/env python
"""The main script for the RunSnakeRun profile viewer"""
import wx, sys, os, logging, traceback
log = logging.getLogger( __name__ )
import ConfigParser
try:
from wx.py import editor, editwindow
except ImportError, err:
log.info( 'No editor available: %s', err )
editor = None
from gettext import gettext as _
import pstats
from squaremap import squaremap
from runsnakerun import pstatsloader,pstatsadapter, meliaeloader, meliaeadapter
from runsnakerun import listviews
from runsnakerun import homedirectory
if sys.platform == 'win32':
windows = True
else:
windows = False
if sys.platform == 'darwin':
osx = True
else:
osx = False
log = logging.getLogger(__name__)
ID_OPEN = wx.NewId()
ID_OPEN_MEMORY = wx.NewId()
ID_EXIT = wx.NewId()
ID_TREE_TYPE = wx.NewId()
ID_PACKAGE_VIEW = wx.NewId()
ID_PERCENTAGE_VIEW = wx.NewId()
ID_ROOT_VIEW = wx.NewId()
ID_BACK_VIEW = wx.NewId()
ID_UP_VIEW = wx.NewId()
ID_DEEPER_VIEW = wx.NewId()
ID_SHALLOWER_VIEW = wx.NewId()
ID_MORE_SQUARE = wx.NewId()
PROFILE_VIEW_COLUMNS = [
listviews.ColumnDefinition(
name = _('Name'),
attribute = 'name',
defaultOrder = True,
targetWidth = 50,
),
listviews.ColumnDefinition(
name = _('Calls'),
attribute = 'calls',
defaultOrder = False,
targetWidth = 50,
),
listviews.ColumnDefinition(
name = _('RCalls'),
attribute = 'recursive',
defaultOrder = False,
targetWidth = 40,
),
listviews.ColumnDefinition(
name = _('Local'),
attribute = 'local',
format = '%0.5f',
defaultOrder = False,
percentPossible = True,
targetWidth = 50,
),
listviews.ColumnDefinition(
name = _('/Call'),
attribute = 'localPer',
defaultOrder = False,
format = '%0.5f',
targetWidth = 50,
),
listviews.ColumnDefinition(
name = _('Cum'),
attribute = 'cumulative',
format = '%0.5f',
percentPossible = True,
targetWidth = 50,
defaultOrder = False,
sortDefault = True,
),
listviews.ColumnDefinition(
name = _('/Call'),
attribute = 'cumulativePer',
format = '%0.5f',
defaultOrder = False,
targetWidth = 50,
),
listviews.ColumnDefinition(
name = _('File'),
attribute = 'filename',
sortOn = ('filename', 'lineno', 'directory',),
defaultOrder = True,
targetWidth = 70,
),
listviews.ColumnDefinition(
name = _('Line'),
attribute = 'lineno',
sortOn = ('filename', 'lineno', 'directory'),
defaultOrder = True,
targetWidth = 30,
),
listviews.ColumnDefinition(
name = _('Directory'),
attribute = 'directory',
sortOn = ('directory', 'filename', 'lineno'),
defaultOrder = True,
targetWidth = 90,
),
]
MAX_NAME_LEN = 64
def mem_name( x ):
if x.get('name'):
return x['name']
value = x.get('value')
if value:
if isinstance(value,(str,unicode)) and len(value) > MAX_NAME_LEN:
return value[:MAX_NAME_LEN-3]+'...'
else:
return value
return ''
MEMORY_VIEW_COLUMNS = [
listviews.DictColumn(
name = _('Type'),
attribute = 'type',
targetWidth = 20,
defaultOrder = True,
),
listviews.DictColumn(
name = _('Name'),
attribute = 'name',
targetWidth = 20,
getter = mem_name,
defaultOrder = True,
),
listviews.DictColumn(
name = _('Cum'),
attribute = 'totsize',
targetWidth = 5,
defaultOrder = False,
format = '%0.1f',
percentPossible = True,
sortDefault = True,
),
listviews.DictColumn(
name = _('Local'),
attribute = 'size',
defaultOrder = False,
format = '%0.1f',
percentPossible = True,
targetWidth = 5,
),
listviews.DictColumn(
name = _('Children'),
attribute = 'rsize',
format = '%0.1f',
percentPossible = True,
defaultOrder = False,
targetWidth = 5,
),
listviews.DictColumn(
name = _('/Refs'),
attribute = 'parents',
defaultOrder = False,
targetWidth = 4,
getter = lambda x: len(x.get('parents',())),
),
listviews.DictColumn(
name = _('Refs/'),
attribute = 'children',
defaultOrder = False,
targetWidth = 4,
getter = lambda x: len(x.get('children',())),
),
]
class MainFrame(wx.Frame):
"""The root frame for the display of a single data-set"""
loader = None
percentageView = False
historyIndex = -1
activated_node = None
selected_node = None
viewType = 'functions'
viewTypeTool = None
TBFLAGS = (
wx.TB_HORIZONTAL
#| wx.NO_BORDER
| wx.TB_FLAT
)
def __init__(
self, parent=None, id=-1,
title=_("Run Snake Run"),
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE|wx.CLIP_CHILDREN,
name= _("RunSnakeRun"),
config_parser=None,
):
"""Initialise the Frame"""
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
# TODO: toolbar for back, up, root, directory-view, percentage view
self.adapter = pstatsadapter.PStatsAdapter()
self.CreateControls(config_parser)
self.history = [] # set of (activated_node, selected_node) pairs...
icon = self.LoadRSRIcon()
if icon:
self.SetIcon( icon )
def CreateControls(self, config_parser):
"""Create our sub-controls"""
self.CreateMenuBar()
self.SetupToolBar()
self.CreateStatusBar()
self.leftSplitter = wx.SplitterWindow(
self
)
self.rightSplitter = wx.SplitterWindow(
self.leftSplitter
)
self.listControl = listviews.DataView(
self.leftSplitter,
columns = PROFILE_VIEW_COLUMNS,
name='mainlist',
)
self.squareMap = squaremap.SquareMap(
self.rightSplitter,
padding = 6,
labels = True,
adapter = self.adapter,
square_style = True,
)
self.tabs = wx.Notebook(
self.rightSplitter,
)
self.CreateSourceWindow(self.tabs)
self.calleeListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='callee',
)
self.allCalleeListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='allcallee',
)
self.allCallerListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='allcaller',
)
self.callerListControl = listviews.DataView(
self.tabs,
columns = PROFILE_VIEW_COLUMNS,
name='caller',
)
self.ProfileListControls = [
self.listControl,
self.calleeListControl,
self.allCalleeListControl,
self.callerListControl,
self.allCallerListControl,
]
self.tabs.AddPage(self.calleeListControl, _('Callees'), True)
self.tabs.AddPage(self.allCalleeListControl, _('All Callees'), False)
self.tabs.AddPage(self.callerListControl, _('Callers'), False)
self.tabs.AddPage(self.allCallerListControl, _('All Callers'), False)
if editor:
self.tabs.AddPage(self.sourceCodeControl, _('Source Code'), False)
self.rightSplitter.SetSashSize(10)
# calculate size as proportional value for initial display...
self.LoadState( config_parser )
width, height = self.GetSizeTuple()
rightsplit = 2 * (height // 3)
leftsplit = width // 3
self.rightSplitter.SplitHorizontally(self.squareMap, self.tabs,
rightsplit)
self.leftSplitter.SplitVertically(self.listControl, self.rightSplitter,
leftsplit)
squaremap.EVT_SQUARE_HIGHLIGHTED(self.squareMap,
self.OnSquareHighlightedMap)
squaremap.EVT_SQUARE_SELECTED(self.listControl,
self.OnSquareSelectedList)
squaremap.EVT_SQUARE_SELECTED(self.squareMap, self.OnSquareSelectedMap)
squaremap.EVT_SQUARE_ACTIVATED(self.squareMap, self.OnNodeActivated)
for control in self.ProfileListControls:
squaremap.EVT_SQUARE_ACTIVATED(control, self.OnNodeActivated)
squaremap.EVT_SQUARE_HIGHLIGHTED(control,
self.OnSquareHighlightedList)
self.moreSquareViewItem.Check(self.squareMap.square_style)
def CreateMenuBar(self):
"""Create our menu-bar for triggering operations"""
menubar = wx.MenuBar()
menu = wx.Menu()
menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file'))
menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file'))
menu.AppendSeparator()
menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window'))
menubar.Append(menu, _('&File'))
menu = wx.Menu()
self.packageMenuItem = menu.AppendCheckItem(
ID_PACKAGE_VIEW, _('&File View'),
_('View time spent by package/module')
)
self.percentageMenuItem = menu.AppendCheckItem(
ID_PERCENTAGE_VIEW, _('&Percentage View'),
_('View time spent as percent of overall time')
)
self.rootViewItem = menu.Append(
ID_ROOT_VIEW, _('&Root View (Home)'),
_('View the root of the tree')
)
self.backViewItem = menu.Append(
ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history')
)
self.upViewItem = menu.Append(
ID_UP_VIEW, _('&Up'),
_('Go "up" to the parent of this node with the largest cumulative total')
)
self.moreSquareViewItem = menu.AppendCheckItem(
ID_MORE_SQUARE, _('&Hierarchic Squares'),
_('Toggle hierarchic squares in the square-map view')
)
# This stuff isn't really all that useful for profiling,
# it's more about how to generate graphics to describe profiling...
self.deeperViewItem = menu.Append(
ID_DEEPER_VIEW, _('&Deeper'), _('View deeper squaremap views')
)
self.shallowerViewItem = menu.Append(
ID_SHALLOWER_VIEW, _('&Shallower'), _('View shallower squaremap views')
)
# wx.ToolTip.Enable(True)
menubar.Append(menu, _('&View'))
self.viewTypeMenu =wx.Menu( )
menubar.Append(self.viewTypeMenu, _('View &Type'))
self.SetMenuBar(menubar)
wx.EVT_MENU(self, ID_EXIT, lambda evt: self.Close(True))
wx.EVT_MENU(self, ID_OPEN, self.OnOpenFile)
wx.EVT_MENU(self, ID_OPEN_MEMORY, self.OnOpenMemory)
wx.EVT_MENU(self, ID_PERCENTAGE_VIEW, self.OnPercentageView)
wx.EVT_MENU(self, ID_UP_VIEW, self.OnUpView)
wx.EVT_MENU(self, ID_DEEPER_VIEW, self.OnDeeperView)
wx.EVT_MENU(self, ID_SHALLOWER_VIEW, self.OnShallowerView)
wx.EVT_MENU(self, ID_ROOT_VIEW, self.OnRootView)
wx.EVT_MENU(self, ID_BACK_VIEW, self.OnBackView)
wx.EVT_MENU(self, ID_MORE_SQUARE, self.OnMoreSquareToggle)
def LoadRSRIcon( self ):
try:
from runsnakerun.resources import rsricon_png
return getIcon( rsricon_png.data )
except Exception, err:
return None
sourceCodeControl = None
def CreateSourceWindow(self, tabs):
"""Create our source-view window for tabs"""
if editor and self.sourceCodeControl is None:
self.sourceCodeControl = wx.py.editwindow.EditWindow(
self.tabs, -1
)
self.sourceCodeControl.SetText(u"")
self.sourceFileShown = None
self.sourceCodeControl.setDisplayLineNumbers(True)
def SetupToolBar(self):
"""Create the toolbar for common actions"""
tb = self.CreateToolBar(self.TBFLAGS)
tsize = (24, 24)
tb.ToolBitmapSize = tsize
open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,
tsize)
tb.AddLabelTool(ID_OPEN, "Open", open_bmp, shortHelp="Open",
longHelp="Open a (c)Profile trace file")
if not osx:
tb.AddSeparator()
# self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN)
self.rootViewTool = tb.AddLabelTool(
ID_ROOT_VIEW, _("Root View"),
wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize),
shortHelp=_("Display the root of the current view tree (home view)")
)
self.rootViewTool = tb.AddLabelTool(
ID_BACK_VIEW, _("Back"),
wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize),
shortHelp=_("Back to the previously activated node in the call tree")
)
self.upViewTool = tb.AddLabelTool(
ID_UP_VIEW, _("Up"),
wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize),
shortHelp=_("Go one level up the call tree (highest-percentage parent)")
)
if not osx:
tb.AddSeparator()
# TODO: figure out why the control is sizing the label incorrectly on Linux
self.percentageViewTool = wx.CheckBox(tb, -1, _("Percent "))
self.percentageViewTool.SetToolTip(wx.ToolTip(
_("Toggle display of percentages in list views")))
tb.AddControl(self.percentageViewTool)
wx.EVT_CHECKBOX(self.percentageViewTool,
self.percentageViewTool.GetId(), self.OnPercentageView)
self.viewTypeTool= wx.Choice( tb, -1, choices= getattr(self.loader,'ROOTS',[]) )
self.viewTypeTool.SetToolTip(wx.ToolTip(
_("Switch between different hierarchic views of the data")))
wx.EVT_CHOICE( self.viewTypeTool, self.viewTypeTool.GetId(), self.OnViewTypeTool )
tb.AddControl( self.viewTypeTool )
tb.Realize()
def OnViewTypeTool( self, event ):
"""When the user changes the selection, make that our selection"""
new = self.viewTypeTool.GetStringSelection()
if new != self.viewType:
self.viewType = new
self.OnRootView( event )
def ConfigureViewTypeChoices( self, event=None ):
"""Configure the set of View types in the toolbar (and menus)"""
self.viewTypeTool.SetItems( getattr( self.loader, 'ROOTS', [] ))
if self.loader and self.viewType in self.loader.ROOTS:
self.viewTypeTool.SetSelection( self.loader.ROOTS.index( self.viewType ))
# configure the menu with the available choices...
def chooser( typ ):
def Callback( event ):
if typ != self.viewType:
self.viewType = typ
self.OnRootView( event )
return Callback
# Clear all previous items
for item in self.viewTypeMenu.GetMenuItems():
self.viewTypeMenu.DeleteItem( item )
if self.loader and self.loader.ROOTS:
for root in self.loader.ROOTS:
item = wx.MenuItem(
self.viewTypeMenu, -1, root.title(),
_("View hierarchy by %(name)s")%{
'name': root.title(),
},
kind=wx.ITEM_RADIO,
)
item.SetCheckable( True )
self.viewTypeMenu.AppendItem( item )
item.Check( root == self.viewType )
wx.EVT_MENU( self, item.GetId(), chooser( root ))
def OnOpenFile(self, event):
"""Request to open a new profile file"""
dialog = wx.FileDialog(self, style=wx.OPEN|wx.FD_MULTIPLE)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
if self.loader:
# we've already got a displayed data-set, open new window...
frame = MainFrame()
frame.Show(True)
frame.load(*paths)
else:
self.load(*paths)
def OnOpenMemory(self, event):
"""Request to open a new profile file"""
dialog = wx.FileDialog(self, style=wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
if self.loader:
# we've already got a displayed data-set, open new window...
frame = MainFrame()
frame.Show(True)
frame.load_memory(path)
else:
self.load_memory(path)
def OnShallowerView(self, event):
if not self.squareMap.max_depth:
new_depth = self.squareMap.max_depth_seen or 0 - 1
else:
new_depth = self.squareMap.max_depth - 1
self.squareMap.max_depth = max((1, new_depth))
self.squareMap.Refresh()
def OnDeeperView(self, event):
if not self.squareMap.max_depth:
new_depth = 1
else:
new_depth = self.squareMap.max_depth + 1
self.squareMap.max_depth = max((self.squareMap.max_depth_seen or 0,
new_depth))
self.squareMap.Refresh()
def OnPackageView(self, event):
self.SetPackageView(not self.directoryView)
def SetPackageView(self, directoryView):
"""Set whether to use directory/package based view"""
self.directoryView = not self.directoryView
self.packageMenuItem.Check(self.directoryView)
self.packageViewTool.SetValue(self.directoryView)
if self.loader:
self.SetModel(self.loader)
self.RecordHistory()
def OnPercentageView(self, event):
"""Handle percentage-view event from menu/toolbar"""
self.SetPercentageView(not self.percentageView)
def SetPercentageView(self, percentageView):
"""Set whether to display percentage or absolute values"""
self.percentageView = percentageView
self.percentageMenuItem.Check(self.percentageView)
self.percentageViewTool.SetValue(self.percentageView)
total = self.adapter.value( self.loader.get_root( self.viewType ) )
for control in self.ProfileListControls:
control.SetPercentage(self.percentageView, total)
self.adapter.SetPercentage(self.percentageView, total)
def OnUpView(self, event):
"""Request to move up the hierarchy to highest-weight parent"""
node = self.activated_node
parents = []
selected_parent = None
if node:
if hasattr( self.adapter, 'best_parent' ):
selected_parent = self.adapter.best_parent( node )
else:
parents = self.adapter.parents( node )
if parents:
if not selected_parent:
parents.sort(key = lambda a: self.adapter.value(node, a))
selected_parent = parents[-1]
class event:
node = selected_parent
self.OnNodeActivated(event)
else:
self.SetStatusText(_('No parents for the currently selected node: %(node_name)s')
% dict(node_name=self.adapter.label(node)))
else:
self.SetStatusText(_('No currently selected node'))
def OnBackView(self, event):
"""Request to move backward in the history"""
self.historyIndex -= 1
try:
self.RestoreHistory(self.history[self.historyIndex])
except IndexError, err:
self.SetStatusText(_('No further history available'))
def OnRootView(self, event):
"""Reset view to the root of the tree"""
self.adapter, tree, rows = self.RootNode()
self.squareMap.SetModel(tree, self.adapter)
self.RecordHistory()
self.ConfigureViewTypeChoices()
def OnNodeActivated(self, event):
"""Double-click or enter on a node in some control..."""
self.activated_node = self.selected_node = event.node
self.squareMap.SetModel(event.node, self.adapter)
self.squareMap.SetSelected( event.node )
if editor:
if self.SourceShowFile(event.node):
if hasattr(event.node,'lineno'):
self.sourceCodeControl.GotoLine(event.node.lineno)
self.RecordHistory()
def SourceShowFile(self, node):
"""Show the given file in the source-code view (attempt it anyway)"""
filename = self.adapter.filename( node )
if filename and self.sourceFileShown != filename:
try:
data = open(filename).read()
except Exception, err:
# TODO: load from zips/eggs? What about .pyc issues?
return None
else:
#self.sourceCodeControl.setText(data)
self.sourceCodeControl.ClearAll()
self.sourceCodeControl.AppendText( data )
return filename
def OnSquareHighlightedMap(self, event):
self.SetStatusText(self.adapter.label(event.node))
self.listControl.SetIndicated(event.node)
text = self.squareMap.adapter.label(event.node)
self.squareMap.SetToolTipString(text)
self.SetStatusText(text)
def OnSquareHighlightedList(self, event):
self.SetStatusText(self.adapter.label(event.node))
self.squareMap.SetHighlight(event.node, propagate=False)
def OnSquareSelectedList(self, event):
self.SetStatusText(self.adapter.label(event.node))
self.squareMap.SetSelected(event.node)
self.OnSquareSelected(event)
self.RecordHistory()
def OnSquareSelectedMap(self, event):
self.listControl.SetSelected(event.node)
self.OnSquareSelected(event)
self.RecordHistory()
def OnSquareSelected(self, event):
"""Update all views to show selection children/parents"""
self.selected_node = event.node
self.calleeListControl.integrateRecords(self.adapter.children( event.node) )
self.callerListControl.integrateRecords(self.adapter.parents( event.node) )
#self.allCalleeListControl.integrateRecords(event.node.descendants())
#self.allCallerListControl.integrateRecords(event.node.ancestors())
def OnMoreSquareToggle( self, event ):
"""Toggle the more-square view (better looking, but more likely to filter records)"""
self.squareMap.square_style = not self.squareMap.square_style
self.squareMap.Refresh()
self.moreSquareViewItem.Check(self.squareMap.square_style)
restoringHistory = False
def RecordHistory(self):
"""Add the given node to the history-set"""
if not self.restoringHistory:
record = self.activated_node
if self.historyIndex < -1:
try:
del self.history[self.historyIndex+1:]
except AttributeError, err:
pass
if (not self.history) or record != self.history[-1]:
self.history.append(record)
del self.history[:-200]
self.historyIndex = -1
def RestoreHistory(self, record):
self.restoringHistory = True
try:
activated = record
class activated_event:
node = activated
if activated:
self.OnNodeActivated(activated_event)
self.squareMap.SetSelected(activated_event.node)
self.listControl.SetSelected(activated_event.node)
finally:
self.restoringHistory = False
def load(self, *filenames):
"""Load our dataset (iteratively)"""
if len(filenames) == 1:
if os.path.basename( filenames[0] ) == 'index.coldshot':
return self.load_coldshot( os.path.dirname( filenames[0]) )
elif os.path.isdir( filenames[0] ):
return self.load_coldshot( filenames[0] )
try:
self.loader = pstatsloader.PStatsLoader(*filenames)
self.ConfigureViewTypeChoices()
self.SetModel( self.loader )
self.viewType = self.loader.ROOTS[0]
self.SetTitle(_("Run Snake Run: %(filenames)s")
% {'filenames': ', '.join(filenames)[:120]})
except (IOError, OSError, ValueError,MemoryError), err:
self.SetStatusText(
_('Failure during load of %(filenames)s: %(err)s'
) % dict(
filenames=" ".join([repr(x) for x in filenames]),
err=err
))
def load_memory(self, filename ):
self.viewType = 'memory'
for view in self.ProfileListControls:
view.SetColumns( MEMORY_VIEW_COLUMNS )
self.loader = meliaeloader.Loader( filename )
self.ConfigureViewTypeChoices()
self.viewType = self.loader.ROOTS[0]
self.SetModel( self.loader )
def load_coldshot(self, dirname ):
from runsnakerun import coldshotadapter
self.loader = coldshotadapter.Loader( dirname )
self.loader.load()
self.ConfigureViewTypeChoices()
self.viewType = self.loader.ROOTS[0]
self.SetModel( self.loader )
def SetModel(self, loader):
"""Set our overall model (a loader object) and populate sub-controls"""
self.loader = loader
self.adapter, tree, rows = self.RootNode()
self.listControl.integrateRecords(rows.values())
self.activated_node = tree
self.squareMap.SetModel(tree, self.adapter)
self.RecordHistory()
def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType )
adapter.SetPercentage(self.percentageView, adapter.value( tree ))
return adapter, tree, rows
def SaveState( self, config_parser ):
"""Retrieve window state to be restored on the next run..."""
if not config_parser.has_section( 'window' ):
config_parser.add_section( 'window' )
if self.IsMaximized():
config_parser.set( 'window', 'maximized', str(True))
else:
config_parser.set( 'window', 'maximized', str(False))
size = self.GetSizeTuple()
position = self.GetPositionTuple()
config_parser.set( 'window', 'width', str(size[0]) )
config_parser.set( 'window', 'height', str(size[1]) )
config_parser.set( 'window', 'x', str(position[0]) )
config_parser.set( 'window', 'y', str(position[1]) )
for control in self.ProfileListControls:
control.SaveState( config_parser )
return config_parser
def LoadState( self, config_parser ):
"""Set our window state from the given config_parser instance"""
if not config_parser:
return
if (
not config_parser.has_section( 'window' ) or (
config_parser.has_option( 'window','maximized' ) and
config_parser.getboolean( 'window', 'maximized' )
)
):
self.Maximize(True)
try:
width,height,x,y = [
config_parser.getint( 'window',key )
for key in ['width','height','x','y']
]
self.SetPosition( (x,y))
self.SetSize( (width,height))
except ConfigParser.NoSectionError, err:
# the file isn't written yet, so don't even warn...
pass
except Exception, err:
# this is just convenience, if it breaks in *any* way, ignore it...
log.error(
"Unable to load window preferences, ignoring: %s", traceback.format_exc()
)
try:
font_size = config_parser.getint('window', 'font_size')
except Exception:
pass # use the default, by default
else:
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(font_size)
for ctrl in self.ProfileListControls:
ctrl.SetFont(font)
for control in self.ProfileListControls:
control.LoadState( config_parser )
self.config = config_parser
wx.EVT_CLOSE( self, self.OnCloseWindow )
def OnCloseWindow( self, event=None ):
try:
self.SaveState( self.config )
config = config_file()
temp = config + '~'
self.config.write( open( temp,'w') )
os.rename( temp, config )
except Exception, err:
log.error( "Unable to write window preferences, ignoring: %s", traceback.format_exc())
self.Destroy()
class RunSnakeRunApp(wx.App):
"""Basic application for holding the viewing Frame"""
handler = wx.PNGHandler()
def OnInit(self):
"""Initialise the application"""
wx.Image.AddHandler(self.handler)
frame = MainFrame( config_parser = load_config())
frame.Show(True)
self.SetTopWindow(frame)
if sys.argv[1:]:
if sys.argv[1] == '-m':
if sys.argv[2:]:
wx.CallAfter( frame.load_memory, sys.argv[2] )
else:
log.warn( 'No memory file specified' )
else:
wx.CallAfter(frame.load, *sys.argv[1:])
return True
class MeliaeViewApp(wx.App):
handler = wx.PNGHandler()
def OnInit(self):
"""Initialise the application"""
wx.Image.AddHandler(self.handler)
frame = MainFrame( config_parser = load_config())
frame.Show(True)
self.SetTopWindow(frame)
if sys.argv[1:]:
wx.CallAfter( frame.load_memory, sys.argv[1] )
else:
log.warn( 'No memory file specified' )
return True
def getIcon( data ):
"""Return the data from the resource as a wxIcon"""
import cStringIO
stream = cStringIO.StringIO(data)
image = wx.ImageFromStream(stream)
icon = wx.EmptyIcon()
icon.CopyFromBitmap(wx.BitmapFromImage(image))
return icon
def config_directory():
base = homedirectory.appdatadirectory()
directory = os.path.join( base, 'RunSnakeRun' )
if not os.path.exists( directory ):
os.makedirs( directory )
return directory
def config_file():
directory = config_directory()
return os.path.join( directory, 'runsnake.conf' )
def load_config( ):
config = ConfigParser.SafeConfigParser()
filename = config_file()
if os.path.exists( filename ):
config.read( filename )
return config
usage = """runsnake.py profilefile
runsnake.py -m meliae.memoryfile
profilefile -- a file generated by a HotShot profile run from Python
"""
def main():
"""Mainloop for the application"""
logging.basicConfig(level=logging.INFO)
app = RunSnakeRunApp(0)
app.MainLoop()
def meliaemain():
logging.basicConfig(level=logging.INFO)
app = MeliaeViewApp(0)
app.MainLoop()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| |
import logging
from typing import List, Optional, Union
import numpy as np
from gi.repository import Gtk
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg
from matplotlib.figure import Figure
from sastool.misc.basicfit import findpeak_single
from .functions import savefiguretoclipboard
from .plotimage import PlotImageWindow
from .toolwindow import ToolWindow
from ...core.devices import Motor
from ...core.instrument.instrument import Instrument
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ScanGraph(ToolWindow):
widgets_to_make_insensitive = ['buttonbox', 'scalebox']
def __init__(self, signals: List[str], data: Union[np.ndarray, int], windowtitle: Union[int, str], comment: str,
instrument: Optional[Instrument] = None):
"""if data is an integer, we assume scanning mode, up to this number of data points. It can also be a numpy
structured array with dtype `[(s,float) for s in signals]`: then we are in plotting mode. If in scan mode,
self._dataindex <len(self._data). In plotting mode, self._dataindex>=len(self._data).
signals should be a list of signal labels. The first one is the abscissa. This list has to have at least two
elements.
if instrument is given, motors can be moved.
"""
self._in_scalechanged = False
self.fig = None
self.axes = None
self.canvas = None
self.toolbox = None
if isinstance(windowtitle, int):
windowtitle = 'Scan #{:d}'.format(windowtitle)
self.comment = comment
if isinstance(data, np.ndarray):
self._data = data
self._dataindex = len(self._data)
elif isinstance(data, int):
self._data = np.zeros(data, dtype=[(s, float) for s in signals])
self._dataindex = 0
else:
raise TypeError('Unknown type for data: %s' % type(data))
if len(signals) < 2:
raise ValueError('At least one signal has to be given apart from the abscissa')
if instrument.online:
self.required_devices = ['Motor_' + self.abscissaname]
self._cursorindex = 0
self._cursor = None
self._lastimage = None
self._lastpeakposition = None
super().__init__('core_scangraph.glade', 'scangraph',
instrument, windowtitle)
def init_gui(self, *args, **kwargs):
self.fig = Figure()
self.axes = self.fig.add_subplot(1, 1, 1)
self.canvas = FigureCanvasGTK3Agg(self.fig)
self.canvas.set_size_request(-1, 400)
self.toolbox = NavigationToolbar2GTK3(self.canvas, self.widget)
b = Gtk.ToolButton(icon_widget=Gtk.Image.new_from_icon_name('view-refresh', Gtk.IconSize.LARGE_TOOLBAR),
label='Redraw')
b.set_tooltip_text('Redraw the signals')
b.connect('clicked', lambda b_: self.redraw_signals())
self.toolbox.insert(b, 9)
b = Gtk.ToolButton(icon_widget=Gtk.Image.new_from_icon_name('edit-copy', Gtk.IconSize.LARGE_TOOLBAR),
label='Copy')
b.set_tooltip_text('Copy the image to the clipboard')
b.connect('clicked', lambda b_, f=self.fig: savefiguretoclipboard(f))
self.toolbox.insert(b, 9)
# pack the figure into the appropriate vbox
figbox = self.builder.get_object('figbox')
figbox.pack_start(self.canvas, True, True, 0)
figbox.pack_start(self.toolbox, False, True, 0)
# adjust the treeview of the counters
counterview = self.builder.get_object('counterview')
assert isinstance(counterview, Gtk.TreeView)
countermodel = counterview.get_model()
assert isinstance(countermodel, Gtk.ListStore)
# the model columns are:
# signal name, visibility, scaling adjustment, scale value.
for c in self.signals:
countermodel.append((c, c != 'FSN', Gtk.Adjustment(
value=1.0, lower=0.0, upper=1.0e6, step_increment=1.0,
page_increment=10.0, page_size=0.0), 1.0))
# create the needed treeview columns
# Signal name column
tc = Gtk.TreeViewColumn('Signal', Gtk.CellRendererText(), text=0)
tc.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
counterview.append_column(tc)
# Signal visibility column
cr = Gtk.CellRendererToggle()
cr.connect('toggled', self.on_column_visibility_changed, countermodel)
tc = Gtk.TreeViewColumn('Show', cr, active=1)
tc.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
counterview.append_column(tc)
# Signal scaling column
cr = Gtk.CellRendererSpin()
cr.set_property('digits', 2)
cr.set_property('editable', True)
cr.connect('edited', self.on_scaling_edited, countermodel)
tc = Gtk.TreeViewColumn('Scaling', cr, adjustment=2, text=3)
tc.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
counterview.append_column(tc)
# Select the second counter: first is always the FSN of the image.
it = countermodel.get_iter_first()
it = countermodel.iter_next(it)
counterview.get_selection().select_iter(it)
del it
# set visibility of the buttonbox and the cursor movement box.
self.builder.get_object('buttonbox').set_visible(not self.is_scan_mode())
self.builder.get_object('scalebox').set_visible(not self.is_scan_mode())
if not self.is_scan_mode():
self.start_view_mode()
# if we have self.instrument, make motor moving buttons visible and sensitive.
self.builder.get_object('move_to_cursor_button').set_visible(self.instrument.online)
self.builder.get_object('move_to_peak_button').set_visible(self.instrument.online)
self.builder.get_object('move_to_cursor_button').set_sensitive(self.instrument.online)
self.builder.get_object('move_to_peak_button').set_sensitive(False)
self.redraw_signals()
def is_scan_mode(self) -> bool:
"""Decide if we are in scan mode: if the data array is not yet full."""
return self._dataindex < len(self._data)
def start_view_mode(self):
if self.is_scan_mode():
raise ValueError('Cannot start view mode: a scan is running.')
if not len(self._data) or not self._dataindex:
# empty scan
self.error_message('No scan points.')
return
# set button box and cursor box visible.
self.builder.get_object('buttonbox').set_visible(True)
self.builder.get_object('scalebox').set_visible(True)
# adjust the limits and increments of the cursor movement scale widget
abscissa = self.abscissa
self.builder.get_object('cursorscale').set_range(
abscissa.min(),
abscissa.max())
step = (abscissa.max() - abscissa.min()) / (len(abscissa) - 1)
self.builder.get_object('cursorscale').set_increments(step, 10 * step)
self.builder.get_object('cursorscale').set_value(abscissa[self._cursorindex])
# we don't need to `self.redraw_cursor()` because self.redraw_signals() already took care of it.
def truncate_scan(self):
"""Can be used for user-broken scans"""
self._data = self._data[:self._dataindex]
self.start_view_mode()
def append_data(self, datatuple):
"""Append a new scan point"""
if not self.is_scan_mode():
raise ValueError('Cannot append data: not in scan mode')
self._data[self._dataindex] = datatuple
self._dataindex += 1
self.redraw_signals()
if not self.is_scan_mode(): # self._dataindex reached len(self._data)
self.start_view_mode()
def new_image(self, matrix, param, mask):
self._lastimage = matrix
self.redraw_2dimage()
def on_column_visibility_changed(self, cellrenderer, treepath, model):
model[treepath][1] = not model[treepath][1]
self.redraw_signals()
def on_scaling_edited(self, cellrenderer, treepath, newvalue, model):
model[treepath][2].set_value(float(newvalue))
model[treepath][3] = float(newvalue)
self.redraw_signals()
@property
def abscissaname(self):
return self._data.dtype.names[0]
@property
def signals(self):
return self._data.dtype.names[1:]
@property
def visible_signals(self):
return [row[0] for row in self.builder.get_object('counterstore') if row[1]]
@property
def abscissa(self):
return self._data[self.abscissaname]
def __len__(self):
return self._dataindex
def redraw_cursor(self):
if self.is_scan_mode() or (not len(self._data)) or (not self._dataindex):
# do not draw cursor in scan mode and when no points are available
return
try:
self._cursor.remove()
except AttributeError:
pass
finally:
self._cursor = None
cursorpos = self.abscissa[self._cursorindex]
cursorwidth = (self.abscissa.max() - self.abscissa.min()) / (len(self) - 1) / 5
self._cursor = self.axes.axvspan(cursorpos - cursorwidth * 0.5, cursorpos + cursorwidth * 0.5,
facecolor='yellow', alpha=0.5)
self.axes.legend(self.axes.lines,
['%s: %f' % (s, self._data[s][self._cursorindex]) for s in self.visible_signals],
fontsize='small', loc='best')
self.canvas.draw()
if not self._in_scalechanged:
self._in_scalechanged = True
try:
self.builder.get_object('cursorscale').set_value(cursorpos)
finally:
self._in_scalechanged = False
self.redraw_2dimage()
def redraw_2dimage(self):
if not self.builder.get_object('show2d_checkbutton').get_active():
return
if not self.is_scan_mode():
fsn = int(self._data['FSN'][self._cursorindex])
data = self.instrument.services['filesequence'].load_cbf(
self.instrument.config['path']['prefixes']['scn'], fsn)
imgindex = self._cursorindex + 1
else:
data = self._lastimage
if self._lastimage is None:
return
imgindex = self._dataindex
mask = self.instrument.services['filesequence'].get_mask(self.instrument.config['scan']['mask_total'])
piw = PlotImageWindow.get_latest_window()
piw.set_image(data)
piw.set_distance(self.instrument.config['geometry']['dist_sample_det'])
piw.set_wavelength(self.instrument.config['geometry']['wavelength'])
piw.set_pixelsize(self.instrument.config['geometry']['pixelsize'])
piw.set_beampos(self.instrument.config['geometry']['beamposy'],
self.instrument.config['geometry']['beamposx'])
piw.set_mask(mask)
piw.set_title('{:d}/{:d} point of {}'.format(imgindex, len(self), self.widget.get_title()))
def redraw_signals(self):
try:
self._cursor.remove()
except AttributeError:
pass
finally:
self._cursor = None
self._lastpeakposition = None
self.builder.get_object('move_to_peak_button').set_sensitive(False)
self.axes.clear()
if not self._dataindex:
# no data point, do not plot anything.
return
model = self.builder.get_object('counterstore')
for row in model:
if not row[1]:
continue # signal not visible
signal = row[0] # signal name
scaling = row[3] # scaling factor
self.axes.plot(self.abscissa[0:self._dataindex],
self._data[signal][0:self._dataindex] * scaling, '.-', label=signal)
self.axes.legend(loc='best', fontsize='small')
self.axes.xaxis.set_label_text(self.abscissaname)
if self.comment is not None:
self.axes.set_title(self.comment)
self.redraw_cursor()
self.canvas.draw()
def on_gofirst(self, button):
self._cursorindex = 0
self.redraw_cursor()
def on_goprevious(self, button):
self._cursorindex = max(0, self._cursorindex - 1)
self.redraw_cursor()
def on_gonext(self, button):
self._cursorindex = min(self._dataindex - 1, self._cursorindex + 1)
self.redraw_cursor()
def on_golast(self, button):
self._cursorindex = self._dataindex - 1
self.redraw_cursor()
def on_scalechanged(self, scale):
if self._in_scalechanged:
return
self._in_scalechanged = True
try:
val = scale.get_value()
self._cursorindex = np.abs(self.abscissa - val).argmin()
scale.set_value(self.abscissa[self._cursorindex])
self.redraw_cursor()
finally:
self._in_scalechanged = False
def on_cursortomax(self, button):
model, it = self.builder.get_object('counterview').get_selection().get_selected()
if it is None:
return
signal = model[it][0]
self._cursorindex = self._data[signal].argmax()
self.redraw_cursor()
def on_cursortomin(self, button):
model, it = self.builder.get_object('counterview').get_selection().get_selected()
if it is None:
return
signal = model[it][0]
self._cursorindex = self._data[signal].argmin()
self.redraw_cursor()
def on_show2d_toggled(self, checkbutton):
if checkbutton.get_active():
self.redraw_2dimage()
def on_fitpeak(self, menuentry: Gtk.MenuItem):
curvetype = menuentry.get_name()[:-1]
if menuentry.get_name().endswith('0'):
signs = (1, -1)
elif menuentry.get_name().endswith('+'):
signs = (1,)
elif menuentry.get_name().endswith('-'):
signs = (-1,)
else:
raise ValueError(menuentry.get_name())
model, it = self.builder.get_object('counterview').get_selection().get_selected()
if it is None:
return False
signalname = model[it][0]
abscissa = self.abscissa
signal = self._data[signalname]
left, right, bottom, top = self.axes.axis()
index = (abscissa >= left) & (abscissa <= right) & (signal <= top) & (signal >= bottom)
try:
position, hwhm, baseline, amplitude, stat = findpeak_single(abscissa[index], signal[index], None,
return_stat=True, curve=curvetype, signs=signs)
except ValueError:
self.error_message('Fitting error: Probably no points of the selected curve are in the zoomed area.')
return
x = np.linspace(abscissa[index].min(), abscissa[index].max(), index.sum() * 5)
assert isinstance(x, np.ndarray)
if curvetype == 'Gaussian':
y = amplitude * np.exp(-0.5 * (x - position) ** 2 / hwhm ** 2) + baseline
elif curvetype == 'Lorentzian':
y = amplitude * hwhm ** 2 / (hwhm ** 2 + (position - x) ** 2) + baseline
else:
raise ValueError(curvetype)
self.axes.plot(x, y, 'r-', label='Fit')
self.axes.text(position.val, amplitude.val + baseline.val, str(position), ha='center', va='bottom')
self.canvas.draw()
self._lastpeakposition = position
self.builder.get_object('move_to_peak_button').set_sensitive(True)
def on_movetocursor(self, button):
self.set_sensitive(False, 'Moving motor {} to cursor.'.format(self.abscissaname), ['move_to_cursor_button'])
try:
self.instrument.motors[self.abscissaname].moveto(self.abscissa[self._cursorindex])
except Exception as exc:
self.error_message('Cannot move motor: {}'.format(exc.args[0]))
self.set_sensitive(True)
def on_motor_stop(self, motor: Motor, targetreached: bool):
if not self.get_sensitive():
# the motor was moving because of a Move to cursor or
# Move to peak operation
self.set_sensitive(True)
def on_movetopeak(self, button):
self.set_sensitive(False, 'Moving motor {} to peak.'.format(self.abscissaname), ['move_to_peak_button'])
try:
self.instrument.motors[self.abscissaname].moveto(self._lastpeakposition.val)
except Exception as exc:
self.error_message('Cannot move motor: {}'.format(exc.args[0]))
def on_showallsignals(self, button):
for row in self.builder.get_object('counterstore'):
row[1] = True
self.redraw_signals()
def on_hideallsignals(self, button):
for row in self.builder.get_object('counterstore'):
row[1] = False
self.redraw_signals()
def on_differentiate(self, button):
newdata = np.zeros(self._dataindex - 1, dtype=self._data.dtype)
abscissaname = self.abscissaname
steps = self.abscissa[1:self._dataindex] - self.abscissa[0:self._dataindex - 1]
for field in self._data.dtype.names:
if field == abscissaname:
continue
newdata[field] = (self._data[field][1:self._dataindex] - self._data[field][0:self._dataindex - 1]) / steps
newdata[abscissaname] = 0.5 * (
self.abscissa[1:self._dataindex] + self.abscissa[0:self._dataindex - 1])
sg = self.__class__(self._data.dtype.names, newdata, 'Derivative of ' + self.widget.get_title(),
'Derivative of ' + self.comment, self.instrument)
sg.show_all()
def on_integrate(self, button):
newdata = np.zeros(self._dataindex - 1, dtype=self._data.dtype)
abscissaname = self.abscissaname
steps = self.abscissa[1:self._dataindex] - self.abscissa[0:self._dataindex - 1]
for field in self._data.dtype.names:
newdata[field] = (self._data[field][1:self._dataindex] + self._data[field][
0:self._dataindex - 1]) * 0.5 * steps
newdata[abscissaname] = 0.5 * (
self.abscissa[1:self._dataindex] + self.abscissa[0:self._dataindex - 1])
sg = self.__class__(self._data.dtype.names, newdata, 'Integral of ' + self.widget.get_title(),
'Integral of ' + self.comment, self.instrument)
sg.show_all()
def cleanup(self):
assert isinstance(self.fig, Figure)
self.fig.clear()
del self.axes
del self.fig
del self.canvas
del self.toolbox
del self._data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.