gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import datetime
import time
from django.db import models
from django.db.models import Q
from django.db.utils import DatabaseError
from django.test import TestCase
from django.utils import unittest
from google.appengine.api.datastore import Get, Key
from ..db.utils import get_cursor, set_cursor
from .testmodels import FieldsWithOptionsModel, EmailModel, DateTimeModel, \
OrderedModel, BlobModel
class FilterTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58]
emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com']
datetimes = [datetime.datetime(2010, 1, 1, 0, 0, 0, 0),
datetime.datetime(2010, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2011, 1, 1, 0, 0, 0, 0),
datetime.datetime(2013, 7, 28, 22, 30, 20, 50)]
def setUp(self):
for index, (float, email, datetime_value) in enumerate(zip(
FilterTest.floats, FilterTest.emails, FilterTest.datetimes)):
# Ensure distinct times when saving entities.
time.sleep(0.01)
self.last_save_datetime = datetime.datetime.now()
self.last_save_time = self.last_save_datetime.time()
ordered_instance = OrderedModel(priority=index, pk=index + 1)
ordered_instance.save()
FieldsWithOptionsModel(floating_point=float,
integer=int(float), email=email,
time=self.last_save_time,
foreign_key=ordered_instance).save()
EmailModel(email=email).save()
DateTimeModel(datetime=datetime_value).save()
def test_startswith(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__startswith='r').order_by('email')],
['rasengan@naruto.com', 'rinnengan@sage.de'])
self.assertEquals(
[entity.email for entity in EmailModel.objects
.filter(email__startswith='r').order_by('email')],
['rasengan@naruto.com', 'rinnengan@sage.de'])
def test_gt(self):
# Test gt on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__gt=3.1).order_by('floating_point')],
[5.3, 9.1])
# Test gt on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer')],
[5, 9])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__gt='as').order_by('email')],
['rasengan@naruto.com', 'rinnengan@sage.de',
'sharingan@uchias.com', ])
# Test ForeignKeys with id.
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__gt=2)]),
['rasengan@naruto.com', 'rinnengan@sage.de'])
# And with instance.
ordered_instance = OrderedModel.objects.get(priority=1)
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__gt=ordered_instance)]),
['rasengan@naruto.com', 'rinnengan@sage.de'])
def test_lt(self):
# Test lt on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__lt=3.1).order_by('floating_point')],
[1.58, 2.6])
# Test lt on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__lt=3).order_by('integer')],
[1, 2])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lt='as').order_by('email')],
['app-engine@scholardocs.com', ])
# Filter on datetime.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(time__lt=self.last_save_time).order_by('time')],
['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de'])
# Test ForeignKeys with id.
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__lt=3)]),
['app-engine@scholardocs.com', 'sharingan@uchias.com'])
# And with instance.
ordered_instance = OrderedModel.objects.get(priority=2)
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.filter(foreign_key__lt=ordered_instance)]),
['app-engine@scholardocs.com', 'sharingan@uchias.com'])
def test_gte(self):
# Test gte on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__gte=2.6).order_by('floating_point')],
[2.6, 5.3, 9.1])
# Test gte on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__gte=2).order_by('integer')],
[2, 5, 9])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__gte='rinnengan@sage.de').order_by('email')],
['rinnengan@sage.de', 'sharingan@uchias.com', ])
def test_lte(self):
# Test lte on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__lte=5.3).order_by('floating_point')],
[1.58, 2.6, 5.3])
# Test lte on integer.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.filter(integer__lte=5).order_by('integer')],
[1, 2, 5])
# Test filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de').order_by('email')],
['app-engine@scholardocs.com', 'rasengan@naruto.com',
'rinnengan@sage.de'])
def test_equals(self):
# Test equality filter on primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email='rinnengan@sage.de').order_by('email')],
['rinnengan@sage.de'])
def test_is_null(self):
self.assertEquals(FieldsWithOptionsModel.objects.filter(
floating_point__isnull=True).count(), 0)
FieldsWithOptionsModel(
integer=5.4, email='shinra.tensai@sixpaths.com',
time=datetime.datetime.now().time()).save()
self.assertEquals(FieldsWithOptionsModel.objects.filter(
floating_point__isnull=True).count(), 1)
# XXX: These filters will not work because of a Django bug.
# self.assertEquals(FieldsWithOptionsModel.objects.filter(
# foreign_key=None).count(), 1)
# (it uses left outer joins if checked against isnull)
# self.assertEquals(FieldsWithOptionsModel.objects.filter(
# foreign_key__isnull=True).count(), 1)
def test_exclude(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.all().exclude(floating_point__lt=9.1)
.order_by('floating_point')],
['rinnengan@sage.de', ])
# Test exclude with ForeignKey.
ordered_instance = OrderedModel.objects.get(priority=1)
self.assertEquals(
sorted([entity.email for entity in FieldsWithOptionsModel.objects
.all().exclude(foreign_key__gt=ordered_instance)]),
['app-engine@scholardocs.com', 'sharingan@uchias.com'])
def test_exclude_pk(self):
self.assertEquals(
[entity.pk for entity in OrderedModel.objects
.exclude(pk__in=[2, 3]).order_by('pk')],
[1, 4])
def test_chained_filter(self):
# Additionally tests count :)
self.assertEquals(FieldsWithOptionsModel.objects.filter(
floating_point__lt=5.3, floating_point__gt=2.6).count(), 0)
# Test across multiple columns. On App Engine only one filter
# is allowed to be an inequality filter.
self.assertEquals(
[(entity.floating_point, entity.integer)
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__lte=5.3, integer=2)
.order_by('floating_point')],
[(2.6, 2), ])
# Test multiple filters including the primary_key field.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__gte='rinnengan@sage.de', integer=2)
.order_by('email')],
['sharingan@uchias.com', ])
# Test in filter on primary key with another arbitrary filter.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__in=['rinnengan@sage.de',
'sharingan@uchias.com'],
integer__gt=2)
.order_by('integer')],
['rinnengan@sage.de', ])
# Test exceptions.
# Test multiple filters exception when filtered and not ordered
# against the first filter.
self.assertRaises(
DatabaseError,
lambda: FieldsWithOptionsModel.objects
.filter(email__gte='rinnengan@sage.de', floating_point=5.3)
.order_by('floating_point')[0])
# Test exception if filtered across multiple columns with
# inequality filter.
self.assertRaises(
DatabaseError,
FieldsWithOptionsModel.objects
.filter(floating_point__lte=5.3, integer__gte=2)
.order_by('floating_point').get)
# Test exception if filtered across multiple columns with
# inequality filter with exclude.
self.assertRaises(
DatabaseError,
FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de')
.exclude(floating_point__lt=9.1).order_by('email').get)
self.assertRaises(
DatabaseError,
lambda: FieldsWithOptionsModel.objects
.all().exclude(floating_point__lt=9.1).order_by('email')[0])
# TODO: Maybe check all possible exceptions.
def test_slicing(self):
# Test slicing on filter with primary_key.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de')
.order_by('email')[:2]],
['app-engine@scholardocs.com', 'rasengan@naruto.com', ])
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__lte='rinnengan@sage.de')
.order_by('email')[1:2]],
['rasengan@naruto.com', ])
# Test on non pk field.
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.all().order_by('integer')[:2]],
[1, 2, ])
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.all().order_by('email')[::2]],
['app-engine@scholardocs.com', 'rinnengan@sage.de'])
def test_cursor(self):
results = list(FieldsWithOptionsModel.objects.all())
cursor = None
for item in results:
query = FieldsWithOptionsModel.objects.all()[:1]
if cursor is not None:
query = set_cursor(query, cursor)
next = query[0]
self.assertEqual(next.pk, item.pk)
cursor = get_cursor(query)
query = set_cursor(FieldsWithOptionsModel.objects.all(), cursor)
self.assertEqual(list(query[:1]), [])
def test_Q_objects(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(Q(email__lte='rinnengan@sage.de'))
.order_by('email')][:2],
['app-engine@scholardocs.com', 'rasengan@naruto.com', ])
self.assertEquals(
[entity.integer for entity in FieldsWithOptionsModel.objects
.exclude(Q(integer__lt=5) | Q(integer__gte=9))
.order_by('integer')],
[5, ])
self.assertRaises(
TypeError,
FieldsWithOptionsModel.objects
.filter(Q(floating_point=9.1), Q(integer=9) | Q(integer=2)))
def test_pk_in(self):
# Test pk__in with field name email.
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(email__in=['app-engine@scholardocs.com',
'rasengan@naruto.com'])],
['app-engine@scholardocs.com', 'rasengan@naruto.com'])
def test_in(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(floating_point__in=[5.3, 2.6, 1.58])
.filter(integer__in=[1, 5, 9])],
['app-engine@scholardocs.com', 'rasengan@naruto.com'])
def test_in_with_pk_in(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(floating_point__in=[5.3, 2.6, 1.58])
.filter(email__in=['app-engine@scholardocs.com',
'rasengan@naruto.com'])],
['app-engine@scholardocs.com', 'rasengan@naruto.com'])
def test_in_with_order_by(self):
class Post(models.Model):
writer = models.IntegerField()
order = models.IntegerField()
Post(writer=1, order=1).save()
Post(writer=1, order=2).save()
Post(writer=1, order=3).save()
Post(writer=2, order=4).save()
Post(writer=2, order=5).save()
posts = Post.objects.filter(writer__in=[1, 2]).order_by('order')
orders = [post.order for post in posts]
self.assertEqual(orders, range(1, 6))
posts = Post.objects.filter(writer__in=[1, 2]).order_by('-order')
orders = [post.order for post in posts]
self.assertEqual(orders, range(5, 0, -1))
def test_inequality(self):
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.exclude(floating_point=5.3).filter(integer__in=[1, 5, 9])],
['rasengan@naruto.com', 'rinnengan@sage.de'])
def test_values(self):
# Test values().
self.assertEquals(
[entity['pk'] for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer').values('pk')],
['app-engine@scholardocs.com', 'rinnengan@sage.de'])
self.assertEquals(FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer').values('pk').count(), 2)
# These queries first fetch the whole entity and then only
# return the desired fields selected in .values.
self.assertEquals(
[entity['integer'] for entity in FieldsWithOptionsModel.objects
.filter(email__startswith='r')
.order_by('email').values('integer')],
[1, 9])
self.assertEquals(
[entity['floating_point']
for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3)
.order_by('integer').values('floating_point')],
[5.3, 9.1])
# Test values_list.
self.assertEquals(
[entity[0] for entity in FieldsWithOptionsModel.objects
.filter(integer__gt=3).order_by('integer').values_list('pk')],
['app-engine@scholardocs.com', 'rinnengan@sage.de'])
def test_range(self):
# Test range on float.
self.assertEquals(
[entity.floating_point
for entity in FieldsWithOptionsModel.objects
.filter(floating_point__range=(2.6, 9.1))
.order_by('floating_point')],
[2.6, 5.3, 9.1])
# Test range on pk.
self.assertEquals(
[entity.pk for entity in FieldsWithOptionsModel.objects
.filter(pk__range=('app-engine@scholardocs.com',
'rinnengan@sage.de'))
.order_by('pk')],
['app-engine@scholardocs.com', 'rasengan@naruto.com',
'rinnengan@sage.de'])
# Test range on date/datetime objects.
start_time = self.last_save_datetime - datetime.timedelta(minutes=1)
self.assertEquals(
[entity.email for entity in FieldsWithOptionsModel.objects
.filter(time__range=(start_time, self.last_save_time))
.order_by('time')],
['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com'])
def test_date(self):
# Test year on date range boundaries.
self.assertEquals(
[entity.datetime for entity in DateTimeModel.objects
.filter(datetime__year=2010).order_by('datetime')],
[datetime.datetime(2010, 1, 1, 0, 0, 0, 0),
datetime.datetime(2010, 12, 31, 23, 59, 59, 999999)])
# Test year on non boundary date.
self.assertEquals(
[entity.datetime for entity in DateTimeModel.objects
.filter(datetime__year=2013).order_by('datetime')],
[datetime.datetime(2013, 7, 28, 22, 30, 20, 50)])
def test_auto_now(self):
time.sleep(0.1)
entity = DateTimeModel.objects.all()[0]
auto_now = entity.datetime_auto_now
entity.save()
entity = DateTimeModel.objects.get(pk=entity.pk)
self.assertNotEqual(auto_now, entity.datetime_auto_now)
def test_auto_now_add(self):
time.sleep(0.1)
entity = DateTimeModel.objects.all()[0]
auto_now_add = entity.datetime_auto_now_add
entity.save()
entity = DateTimeModel.objects.get(pk=entity.pk)
self.assertEqual(auto_now_add, entity.datetime_auto_now_add)
def test_latest(self):
self.assertEquals(FieldsWithOptionsModel.objects
.latest('time').floating_point, 1.58)
def test_blob(self):
x = BlobModel(data='lalala')
x.full_clean()
x.save()
e = Get(Key.from_path(BlobModel._meta.db_table, x.pk))
self.assertEqual(e['data'], x.data)
x = BlobModel.objects.all()[0]
self.assertEqual(e['data'], x.data)
| |
# Test properties of bool promised by PEP 285
import unittest
from test import support
import os
class BoolTest(unittest.TestCase):
def test_subclass(self):
try:
class C(bool):
pass
except TypeError:
pass
else:
self.fail("bool should not be subclassable")
self.assertRaises(TypeError, int.__new__, bool, 0)
def test_print(self):
try:
with open(support.TESTFN, "w") as fo:
print(False, True, file=fo)
with open(support.TESTFN, "r") as fi:
self.assertEqual(fi.read(), 'False True\n')
finally:
os.remove(support.TESTFN)
def test_repr(self):
self.assertEqual(repr(False), 'False')
self.assertEqual(repr(True), 'True')
self.assertEqual(eval(repr(False)), False)
self.assertEqual(eval(repr(True)), True)
def test_str(self):
self.assertEqual(str(False), 'False')
self.assertEqual(str(True), 'True')
def test_int(self):
self.assertEqual(int(False), 0)
self.assertIsNot(int(False), False)
self.assertEqual(int(True), 1)
self.assertIsNot(int(True), True)
def test_float(self):
self.assertEqual(float(False), 0.0)
self.assertIsNot(float(False), False)
self.assertEqual(float(True), 1.0)
self.assertIsNot(float(True), True)
def test_math(self):
self.assertEqual(+False, 0)
self.assertIsNot(+False, False)
self.assertEqual(-False, 0)
self.assertIsNot(-False, False)
self.assertEqual(abs(False), 0)
self.assertIsNot(abs(False), False)
self.assertEqual(+True, 1)
self.assertIsNot(+True, True)
self.assertEqual(-True, -1)
self.assertEqual(abs(True), 1)
self.assertIsNot(abs(True), True)
self.assertEqual(~False, -1)
self.assertEqual(~True, -2)
self.assertEqual(False+2, 2)
self.assertEqual(True+2, 3)
self.assertEqual(2+False, 2)
self.assertEqual(2+True, 3)
self.assertEqual(False+False, 0)
self.assertIsNot(False+False, False)
self.assertEqual(False+True, 1)
self.assertIsNot(False+True, True)
self.assertEqual(True+False, 1)
self.assertIsNot(True+False, True)
self.assertEqual(True+True, 2)
self.assertEqual(True-True, 0)
self.assertIsNot(True-True, False)
self.assertEqual(False-False, 0)
self.assertIsNot(False-False, False)
self.assertEqual(True-False, 1)
self.assertIsNot(True-False, True)
self.assertEqual(False-True, -1)
self.assertEqual(True*1, 1)
self.assertEqual(False*1, 0)
self.assertIsNot(False*1, False)
self.assertEqual(True/1, 1)
self.assertIsNot(True/1, True)
self.assertEqual(False/1, 0)
self.assertIsNot(False/1, False)
self.assertEqual(True%1, 0)
self.assertIsNot(True%1, False)
self.assertEqual(True%2, 1)
self.assertIsNot(True%2, True)
self.assertEqual(False%1, 0)
self.assertIsNot(False%1, False)
for b in False, True:
for i in 0, 1, 2:
self.assertEqual(b**i, int(b)**i)
self.assertIsNot(b**i, bool(int(b)**i))
for a in False, True:
for b in False, True:
self.assertIs(a&b, bool(int(a)&int(b)))
self.assertIs(a|b, bool(int(a)|int(b)))
self.assertIs(a^b, bool(int(a)^int(b)))
self.assertEqual(a&int(b), int(a)&int(b))
self.assertIsNot(a&int(b), bool(int(a)&int(b)))
self.assertEqual(a|int(b), int(a)|int(b))
self.assertIsNot(a|int(b), bool(int(a)|int(b)))
self.assertEqual(a^int(b), int(a)^int(b))
self.assertIsNot(a^int(b), bool(int(a)^int(b)))
self.assertEqual(int(a)&b, int(a)&int(b))
self.assertIsNot(int(a)&b, bool(int(a)&int(b)))
self.assertEqual(int(a)|b, int(a)|int(b))
self.assertIsNot(int(a)|b, bool(int(a)|int(b)))
self.assertEqual(int(a)^b, int(a)^int(b))
self.assertIsNot(int(a)^b, bool(int(a)^int(b)))
self.assertIs(1==1, True)
self.assertIs(1==0, False)
self.assertIs(0<1, True)
self.assertIs(1<0, False)
self.assertIs(0<=0, True)
self.assertIs(1<=0, False)
self.assertIs(1>0, True)
self.assertIs(1>1, False)
self.assertIs(1>=1, True)
self.assertIs(0>=1, False)
self.assertIs(0!=1, True)
self.assertIs(0!=0, False)
x = [1]
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
x = {1: 2}
self.assertIs(x is x, True)
self.assertIs(x is not x, False)
self.assertIs(1 in x, True)
self.assertIs(0 in x, False)
self.assertIs(1 not in x, False)
self.assertIs(0 not in x, True)
self.assertIs(not True, False)
self.assertIs(not False, True)
def test_convert(self):
self.assertRaises(TypeError, bool, 42, 42)
self.assertIs(bool(10), True)
self.assertIs(bool(1), True)
self.assertIs(bool(-1), True)
self.assertIs(bool(0), False)
self.assertIs(bool("hello"), True)
self.assertIs(bool(""), False)
self.assertIs(bool(), False)
def test_keyword_args(self):
with self.assertRaisesRegex(TypeError, 'keyword argument'):
bool(x=10)
def test_format(self):
self.assertEqual("%d" % False, "0")
self.assertEqual("%d" % True, "1")
self.assertEqual("%x" % False, "0")
self.assertEqual("%x" % True, "1")
def test_hasattr(self):
self.assertIs(hasattr([], "append"), True)
self.assertIs(hasattr([], "wobble"), False)
def test_callable(self):
self.assertIs(callable(len), True)
self.assertIs(callable(1), False)
def test_isinstance(self):
self.assertIs(isinstance(True, bool), True)
self.assertIs(isinstance(False, bool), True)
self.assertIs(isinstance(True, int), True)
self.assertIs(isinstance(False, int), True)
self.assertIs(isinstance(1, bool), False)
self.assertIs(isinstance(0, bool), False)
def test_issubclass(self):
self.assertIs(issubclass(bool, int), True)
self.assertIs(issubclass(int, bool), False)
def test_contains(self):
self.assertIs(1 in {}, False)
self.assertIs(1 in {1:1}, True)
def test_string(self):
self.assertIs("xyz".endswith("z"), True)
self.assertIs("xyz".endswith("x"), False)
self.assertIs("xyz0123".isalnum(), True)
self.assertIs("@#$%".isalnum(), False)
self.assertIs("xyz".isalpha(), True)
self.assertIs("@#$%".isalpha(), False)
self.assertIs("0123".isdigit(), True)
self.assertIs("xyz".isdigit(), False)
self.assertIs("xyz".islower(), True)
self.assertIs("XYZ".islower(), False)
self.assertIs("0123".isdecimal(), True)
self.assertIs("xyz".isdecimal(), False)
self.assertIs("0123".isnumeric(), True)
self.assertIs("xyz".isnumeric(), False)
self.assertIs(" ".isspace(), True)
self.assertIs("\xa0".isspace(), True)
self.assertIs("\u3000".isspace(), True)
self.assertIs("XYZ".isspace(), False)
self.assertIs("X".istitle(), True)
self.assertIs("x".istitle(), False)
self.assertIs("XYZ".isupper(), True)
self.assertIs("xyz".isupper(), False)
self.assertIs("xyz".startswith("x"), True)
self.assertIs("xyz".startswith("z"), False)
def test_boolean(self):
self.assertEqual(True & 1, 1)
self.assertNotIsInstance(True & 1, bool)
self.assertIs(True & True, True)
self.assertEqual(True | 1, 1)
self.assertNotIsInstance(True | 1, bool)
self.assertIs(True | True, True)
self.assertEqual(True ^ 1, 0)
self.assertNotIsInstance(True ^ 1, bool)
self.assertIs(True ^ True, False)
def test_fileclosed(self):
try:
with open(support.TESTFN, "w") as f:
self.assertIs(f.closed, False)
self.assertIs(f.closed, True)
finally:
os.remove(support.TESTFN)
def test_types(self):
# types are always true.
for t in [bool, complex, dict, float, int, list, object,
set, str, tuple, type]:
self.assertIs(bool(t), True)
def test_operator(self):
import operator
self.assertIs(operator.truth(0), False)
self.assertIs(operator.truth(1), True)
self.assertIs(operator.not_(1), False)
self.assertIs(operator.not_(0), True)
self.assertIs(operator.contains([], 1), False)
self.assertIs(operator.contains([1], 1), True)
self.assertIs(operator.lt(0, 0), False)
self.assertIs(operator.lt(0, 1), True)
self.assertIs(operator.is_(True, True), True)
self.assertIs(operator.is_(True, False), False)
self.assertIs(operator.is_not(True, True), False)
self.assertIs(operator.is_not(True, False), True)
def test_marshal(self):
import marshal
self.assertIs(marshal.loads(marshal.dumps(True)), True)
self.assertIs(marshal.loads(marshal.dumps(False)), False)
def test_pickle(self):
import pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertIs(pickle.loads(pickle.dumps(True, proto)), True)
self.assertIs(pickle.loads(pickle.dumps(False, proto)), False)
def test_picklevalues(self):
# Test for specific backwards-compatible pickle values
import pickle
self.assertEqual(pickle.dumps(True, protocol=0), b"I01\n.")
self.assertEqual(pickle.dumps(False, protocol=0), b"I00\n.")
self.assertEqual(pickle.dumps(True, protocol=1), b"I01\n.")
self.assertEqual(pickle.dumps(False, protocol=1), b"I00\n.")
self.assertEqual(pickle.dumps(True, protocol=2), b'\x80\x02\x88.')
self.assertEqual(pickle.dumps(False, protocol=2), b'\x80\x02\x89.')
def test_convert_to_bool(self):
# Verify that TypeError occurs when bad things are returned
# from __bool__(). This isn't really a bool test, but
# it's related.
check = lambda o: self.assertRaises(TypeError, bool, o)
class Foo(object):
def __bool__(self):
return self
check(Foo())
class Bar(object):
def __bool__(self):
return "Yes"
check(Bar())
class Baz(int):
def __bool__(self):
return self
check(Baz())
# __bool__() must return a bool not an int
class Spam(int):
def __bool__(self):
return 1
check(Spam())
class Eggs:
def __len__(self):
return -1
self.assertRaises(ValueError, bool, Eggs())
def test_from_bytes(self):
self.assertIs(bool.from_bytes(b'\x00'*8, 'big'), False)
self.assertIs(bool.from_bytes(b'abcd', 'little'), True)
def test_sane_len(self):
# this test just tests our assumptions about __len__
# this will start failing if __len__ changes assertions
for badval in ['illegal', -1, 1 << 32]:
class A:
def __len__(self):
return badval
try:
bool(A())
except (Exception) as e_bool:
try:
len(A())
except (Exception) as e_len:
self.assertEqual(str(e_bool), str(e_len))
def test_blocked(self):
class A:
__bool__ = None
self.assertRaises(TypeError, bool, A())
class B:
def __len__(self):
return 10
__bool__ = None
self.assertRaises(TypeError, bool, B())
def test_real_and_imag(self):
self.assertEqual(True.real, 1)
self.assertEqual(True.imag, 0)
self.assertIs(type(True.real), int)
self.assertIs(type(True.imag), int)
self.assertEqual(False.real, 0)
self.assertEqual(False.imag, 0)
self.assertIs(type(False.real), int)
self.assertIs(type(False.imag), int)
def test_main():
support.run_unittest(BoolTest)
if __name__ == "__main__":
test_main()
| |
# -*- coding: utf-8 -*-
__author__ = "Andre Merzky, Christian P.-Llamas, Ole Weidner, Thomas Schatz, Alexander Grill"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" SGE job adaptor implementation
"""
import saga.utils.pty_shell
import saga.url as surl
import saga.adaptors.base
import saga.adaptors.cpi.job
from saga.job.constants import *
import os
import re
import time
from cgi import parse_qs
from StringIO import StringIO
from datetime import datetime
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
_QSTAT_JOB_STATE_RE = re.compile(r"^([^ ]+) ([0-9]{2}/[0-9]{2}/[0-9]{4} [0-9]{2}:[0-9]{2}:[0-9]{2}) (.+)$")
class SgeKeyValueParser(object):
"""
Parser for SGE commands returning lines with key-value pairs.
It takes into account multi-line key-value pairs.
It works as an iterator returning (key, value) tuples or as a dictionary.
It allows to filter for keys.
"""
KEY_VALUE_RE = re.compile(r"^([^ ]+) +(.+)$")
def __init__(self, stream, filter_keys=None, key_suffix=None):
"""
:param stream: an string or a file-like object implementing readline()
:param filter_keys: an iterable with the list of keys of interest.
:param key_suffix: a key suffix to remove when parsing
"""
# check whether it is an string or a file-like object
if isinstance(stream, basestring):
self.stream = StringIO(stream)
else:
self.stream = stream
self.filter_keys = set(filter_keys) if filter_keys is not None else None
self.key_suffix = key_suffix
def next(self):
"""
Return the next key-value pair.
:return: (key, value)
"""
key, value = None, None
while key is None:
line = self.stream.readline()
if len(line) == 0:
raise StopIteration
line = line.rstrip(" \n")
# check for multi-line options
while len(line) > 0 and line[-1] == "\\":
line = line[:-1] + self.stream.readline().rstrip(" \n").lstrip(" ")
m = self.KEY_VALUE_RE.match(line)
if m is not None:
key, value = m.groups()
if self.key_suffix is not None and key.endswith(self.key_suffix):
key = key[:-len(self.key_suffix)]
if self.filter_keys is not None and key not in self.filter_keys:
key = None # skip this pair
return key, value
def __iter__(self):
return self
def as_dict(self):
"""
Parses the key-value pairs and return them as a dictionary.
:return: a dictionary containing key-value pairs parsed from a SGE command.
"""
d = dict()
for key, value in self:
d[key] = value
return d
# --------------------------------------------------------------------
#
def log_error_and_raise(message, exception, logger):
logger.error(message)
raise exception(message)
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.sgejob"
_ADAPTOR_SCHEMAS = ["sge", "sge+ssh", "sge+gsissh"]
_ADAPTOR_OPTIONS = [
{
'category' : 'saga.adaptor.sgejob',
'name' : 'purge_on_start',
'type' : bool,
'default' : True,
'valid_options' : [True, False],
'documentation' : '''Purge temporary job information for all
jobs which are older than a number of days.
The number of days can be configured with <purge_older_than>.''',
'env_variable' : None
},
{
'category' : 'saga.adaptor.sgejob',
'name' : 'purge_older_than',
'type' : int,
'default' : 30,
#'valid_options' : [True, False],
'documentation' : '''When <purge_on_start> is enabled this specifies the number
of days to consider a temporary file older enough to be deleted.''',
'env_variable' : None
},
]
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [saga.job.NAME,
saga.job.EXECUTABLE,
saga.job.ARGUMENTS,
saga.job.ENVIRONMENT,
saga.job.INPUT,
saga.job.OUTPUT,
saga.job.ERROR,
saga.job.QUEUE,
saga.job.PROJECT,
saga.job.WALL_TIME_LIMIT,
saga.job.WORKING_DIRECTORY,
saga.job.SPMD_VARIATION,
saga.job.TOTAL_CPU_COUNT,
saga.job.PROCESSES_PER_HOST,
saga.job.TOTAL_PHYSICAL_MEMORY],
"job_attributes": [saga.job.EXIT_CODE,
saga.job.EXECUTION_HOSTS,
saga.job.CREATED,
saga.job.STARTED,
saga.job.FINISHED],
"metrics": [saga.job.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "username/password pair (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"cfg_options": _ADAPTOR_OPTIONS,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """
The SGE (Sun/Oracle Grid Engine) adaptor allows to run and manage jobs on
`SGE <http://en.wikipedia.org/wiki/Oracle_Grid_Engine>`_ controlled HPC clusters.
""",
"example": "examples/jobs/sgejob.py",
"schemas": {"sge": "connect to a local cluster",
"sge+ssh": "conenct to a remote cluster via SSH",
"sge+gsissh": "connect to a remote cluster via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name" : _ADAPTOR_NAME,
"version" : "v0.1",
"schemas" : _ADAPTOR_SCHEMAS,
"capabilities" : _ADAPTOR_CAPABILITIES,
"cpis": [
{
"type": "saga.job.Service",
"class": "SGEJobService"
},
{
"type": "saga.job.Job",
"class": "SGEJob"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (saga.adaptors.base.Base):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
saga.adaptors.base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.opts = self.get_config (_ADAPTOR_NAME)
self.purge_on_start = self.opts['purge_on_start'].get_value()
self.purge_older_than = self.opts['purge_older_than'].get_value()
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise saga.BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class SGEJobService (saga.adaptors.cpi.job.Service):
""" implements saga.adaptors.cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
_cpi_base = super(SGEJobService, self)
_cpi_base.__init__(api, adaptor)
self._adaptor = adaptor
# ----------------------------------------------------------------
#
def __del__(self):
self.finalize(kill_shell=True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
self.rm = rm_url
self.session = session
self.pe_list = list()
self.jobs = dict()
self.queue = None
self.memreqs = None
self.shell = None
self.mandatory_memreqs = list()
self.accounting = False
self.temp_path = "$HOME/.saga/adaptors/sge_job"
rm_scheme = rm_url.scheme
pty_url = surl.Url (rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query is not None:
for key, val in parse_qs(rm_url.query).iteritems():
if key == 'queue':
self.queue = val[0]
elif key == 'memreqs':
self.memreqs = val[0]
# we need to extrac the scheme for PTYShell. That's basically the
# job.Serivce Url withou the sge+ part. We use the PTYShell to execute
# pbs commands either locally or via gsissh or ssh.
if rm_scheme == "sge":
pty_url.scheme = "fork"
elif rm_scheme == "sge+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "sge+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with SGE.
# the adaptor will try to find them during initialize(self) and bail
# out in case they are not available.
self._commands = {'qstat': None,
'qsub': None,
'qdel': None,
'qconf': None,
'qacct': None}
self.shell = saga.utils.pty_shell.PTYShell(pty_url, self.session)
# self.shell.set_initialize_hook(self.initialize)
# self.shell.set_finalize_hook(self.finalize)
self.initialize()
return self.get_api ()
# ----------------------------------------------------------------
#
def close (self) :
if self.shell :
self.shell.finalize (True)
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required sge tools are available
for cmd in self._commands.keys():
ret, out, _ = self.shell.run_sync("which %s " % cmd)
if ret != 0:
message = "Error finding SGE tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
path = out.strip() # strip removes newline
ret, out, _ = self.shell.run_sync("%s -help" % cmd)
if ret != 0:
# fix for a bug in certain qstat versions that return
# '1' after a successfull qstat -help:
# https://github.com/saga-project/saga-python/issues/163
if cmd == 'qstat':
version = out.strip().split('\n')[0]
else:
message = "Error finding SGE tools: %s" % out
log_error_and_raise(message, saga.NoSuccess,
self._logger)
else:
# version is reported in the first row of the
# help screen, e.g., GE 6.2u5_1
version = out.strip().split('\n')[0]
# add path and version to the command dictionary
self._commands[cmd] = {"path": "unset GREP_OPTIONS; %s" % path,
"version": version}
self._logger.info("Found SGE tools: %s" % self._commands)
# determine the available processing elements
ret, out, _ = self.shell.run_sync('%s -spl' %
(self._commands['qconf']['path']))
if ret != 0:
message = "Error running 'qconf': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
for pe in out.split('\n'):
if pe != '':
self.pe_list.append(pe)
self._logger.debug("Available processing elements: %s" %
(self.pe_list))
# find out mandatory and optional memory attributes
ret, out, _ = self.shell.run_sync('%s -sc' % (self._commands['qconf']['path']))
if ret != 0:
message = "Error running 'qconf': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
mandatory_attrs = []
optional_attrs = []
for line in out.split('\n'):
if (line != '') and (line[0] != '#'):
[name, _, att_type, _, requestable, _, _, _] = line.split()
if att_type == 'MEMORY' and requestable == 'YES':
optional_attrs.append(name)
elif att_type == 'MEMORY' and requestable == 'FORCED':
mandatory_attrs.append(name)
self._logger.debug("Optional memory attributes: %s" % (mandatory_attrs))
self._logger.debug("Mandatory memory attributes: %s" % (optional_attrs))
# find out user specified memory attributes in job.Service URL
if self.memreqs is None:
flags = []
else:
flags, _ = self.__parse_memreqs(self.memreqs)
# if there are mandatory memory attributes store them and check that they were specified in the job.Service URL
if not (mandatory_attrs == []):
self.mandatory_memreqs = mandatory_attrs
missing_flags = []
for attr in mandatory_attrs:
if not attr in flags:
missing_flags.append(attr)
if not (missing_flags == []):
message = "The following memory attribute(s) are mandatory in your SGE environment and thus " \
"must be specified in the job service URL: %s" % ' '.join(missing_flags)
log_error_and_raise(message, saga.BadParameter, self._logger)
# if memory attributes were specified in the job.Service URL, check that they correspond to existing optional or mandatory memory attributes
invalid_attrs = []
for f in flags:
if not (f in optional_attrs or f in mandatory_attrs):
invalid_attrs.append(f)
if not (invalid_attrs == []):
message = "The following memory attribute(s) were specified in the job.Service URL but are not valid " \
"memory attributes in your SGE environment: %s" % ' '.join(invalid_attrs)
log_error_and_raise(message, saga.BadParameter, self._logger)
# check if accounting is activated
qres = self.__kvcmd_results('qconf', '-sconf', filter_keys=["reporting_params"])
self.accounting = "reporting_params" in qres and "accounting=true" in qres["reporting_params"]
self._logger.info("Accounting is %sabled" % ("en" if self.accounting else "dis"))
# purge temporary files
if self._adaptor.purge_on_start:
cmd = "find $HOME/.saga/adaptors/sge_job" \
" -type f -mtime +%d -print -delete | wc -l" % self._adaptor.purge_older_than
ret, out, _ = self.shell.run_sync(cmd)
if ret == 0 and out != "0":
self._logger.info("Purged %s temporary files" % out)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
if kill_shell :
if self.shell :
self.shell.finalize (True)
# ----------------------------------------------------------------
#
# private members
#
def __sge_to_saga_jobstate(self, sge_state):
"""
Translates an SGE one-letter state to SAGA
"""
try:
if sge_state.startswith("d"):
# when a qdel is done the state is prefixed with a d while the termination signal is queued
sge_state = sge_state[1:]
return {
'c' : saga.job.DONE,
'E' : saga.job.RUNNING,
'H' : saga.job.PENDING,
'qw' : saga.job.PENDING,
'r' : saga.job.RUNNING,
't' : saga.job.RUNNING,
'w' : saga.job.PENDING,
's' : saga.job.PENDING,
'X' : saga.job.CANCELED,
'Eqw' : saga.job.FAILED
}[sge_state]
except:
return saga.job.UNKNOWN
def __parse_memreqs(self, s):
"""
Simple parser for getting memory requirements flags and multipliers from the memreqs part of the job.Service url
"""
flags = []
multipliers = []
while len(s) != 0:
# find multiplier
m = re.match(r'\d+\.?\d*|\d*\.?\d+', s)
if m:
multipliers.append(float(s[m.start():m.end()]))
s = s[m.end():]
else:
multipliers.append(1.0)
# find flag
pos = s.find('~')
if pos < 0:
flags.append(s)
s = ''
else:
flags.append(s[:pos])
s = s[pos+1:]
return flags, multipliers
def __kvcmd_results(self, cmd, cmd_args, *args, **kwargs):
"""
Runs a SGE command that returns key-value pairs as result and parses the results.
:param cmd: command alias
:param cmd_args: command arguments
:param args: parser arguments
:param kwargs: parser keyword arguments
:returns: a dictionary if succeeded or None otherwise
"""
ret, out, _ = self.shell.run_sync('%s %s' % (self._commands[cmd]['path'], cmd_args))
if ret == 0:
return SgeKeyValueParser(out, *args, **kwargs).as_dict()
return None
def __remote_mkdir(self, path):
"""
Creates a directory on the remote host.
:param path: the remote directory to be created.
"""
# check if the path exists
ret, out, _ = self.shell.run_sync(
"(test -d %s && echo -n 0) || (mkdir -p %s && echo -n 1)" % (path, path))
if ret == 0 and out == "1":
self._logger.info("Remote directory created: %s" % path)
elif ret != 0:
# something went wrong
message = "Couldn't create remote directory - %s" % (out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
def __job_info_from_accounting(self, sge_job_id, max_retries=10):
""" Returns job information from the SGE accounting using qacct.
It may happen that when the job exits from the queue system the results in
the accounting database take some time to appear. To avoid premature failing
several tries can be done (up to a maximum) with delays of 1 second in between.
:param sge_job_id: SGE job id
:param max_retries: The maximum number of retries in case qacct fails
:return: job information dictionary
"""
job_info = None
retries = max_retries
while job_info is None and retries > 0:
retries -= 1
qres = self.__kvcmd_results('qacct', "-j %s | grep -E '%s'" % (
sge_job_id, "hostname|qsub_time|start_time|end_time|exit_status|failed"))
if qres is not None: # ok, extract job info from qres
# hostname sge
# qsub_time Mon Jun 24 17:24:43 2013
# start_time Mon Jun 24 17:24:50 2013
# end_time Mon Jun 24 17:44:50 2013
# failed 0
# exit_status 0
job_info = dict(
state=saga.job.DONE if qres.get("failed") == "0" else saga.job.FAILED,
exec_hosts=qres.get("hostname"),
returncode=int(qres.get("exit_status", -1)),
create_time=qres.get("qsub_time"),
start_time=qres.get("start_time"),
end_time=qres.get("end_time"),
gone=False)
elif retries > 0:
# sometimes there is a lapse between the job exits from the queue and
# its information enters in the accounting database
# let's run qacct again after a delay
time.sleep(1)
return job_info
def __remote_job_info_path(self, sge_job_id="$JOB_ID"):
"""
Returns the path of the remote job info file.
:param sge_job_id: the SGE job id, if omitted an enviroment variable representing the job id will be used.
:return: path to the remote job info file
"""
return "%s/%s" % (self.temp_path, sge_job_id)
def __clean_remote_job_info(self, sge_job_id):
"""
Removes the temporary remote file containing job info.
:param sge_job_id: the SGE job id
"""
path = self.__remote_job_info_path(sge_job_id)
ret, out, _ = self.shell.run_sync("rm %s" % path)
if ret != 0:
self._logger.debug("Remote job info couldn't be removed: %s" % path)
def __get_remote_job_info(self, sge_job_id):
"""
Obtains the job info from a temporary remote file created by the qsub script.
:param sge_job_id: the SGE job id
:return: a dictionary with the job info
"""
ret, out, _ = self.shell.run_sync("cat %s" % self.__remote_job_info_path(sge_job_id))
if ret != 0:
return None
qres = SgeKeyValueParser(out, key_suffix=":").as_dict()
if "signal" in qres:
state = saga.job.CANCELED
elif "exit_status" in qres:
state = saga.job.DONE
else:
state = saga.job.RUNNING
job_info = dict(
state=state,
exec_hosts=qres.get("hostname"),
returncode=int(qres.get("exit_status", -1)),
create_time=qres.get("qsub_time"),
start_time=qres.get("start_time"),
end_time=qres.get("end_time"),
gone=False)
return job_info
def __generate_qsub_script(self, jd):
"""
Generates an SGE script from a SAGA job description
:param jd: job descriptor
:return: the qsub script
"""
# SGE parameters
sge_params = ["#$ -S /bin/bash"]
if jd.name is not None:
sge_params += ["#$ -N %s" % jd.name]
sge_params += ["#$ -V"]
if jd.environment is not None and len(jd.environment) > 0:
env_list = ",".join(["%s=%s" % (key, value) for key, value in jd.environment.items()])
sge_params += ["#$ -v %s" % env_list]
if jd.working_directory is not None:
sge_params += ["#$ -wd %s" % jd.working_directory]
if jd.output is not None:
sge_params += ["#$ -o %s" % jd.output]
if jd.error is not None:
sge_params += ["#$ -e %s" % jd.error]
if jd.wall_time_limit is not None:
hours = jd.wall_time_limit / 60
minutes = jd.wall_time_limit % 60
sge_params += ["#$ -l h_rt=%s:%s:00" % (str(hours), str(minutes))]
queue = self.queue or jd.queue
if queue is not None:
sge_params += ["#$ -q %s" % queue]
if jd.project is not None:
sge_params += ["#$ -A %s" % str(jd.project)]
if jd.job_contact is not None:
sge_params += ["#$ -m be", "#$ -M %s" % jd.contact]
# memory requirements - TOTAL_PHYSICAL_MEMORY
# it is assumed that the value passed through jd is always in Megabyte
if jd.total_physical_memory is not None:
# this is (of course) not the same for all SGE installations. some
# use virtual_free, some use a combination of mem_req / h_vmem.
# It is very annoying. We need some sort of configuration variable
# that can control this. Yes, ugly and not very saga-ish, but
# the only way to do this, IMHO...
if self.memreqs is None:
raise Exception("When using 'total_physical_memory' with the SGE adaptor, the query parameters "
"of the job.Service URL must define the attributes used by your particular instance "
"of SGE to control memory allocation.\n"
"'virtual_free', 'h_vmem' or 'mem_req' are commonly encountered examples of "
"such attributes.\n"
"A valid job.Service URL could be for instance:\n"
"'sge+ssh://myserver.edu?memreqs=virtual_free~1.5h_vmem'\n"
"here the attribute 'virtual_free' would be set to 'total_physical_memory' and "
"the attribute 'h_vmem' would be set to 1.5*'total_physical_memory', "
"'~' is used as a separator.")
flags, multipliers = self.__parse_memreqs(self.memreqs)
for flag, mult in zip(flags, multipliers):
sge_params += ["#$ -l %s=%sm" % (flag, int(round(mult * int(jd.total_physical_memory))))]
# check spmd variation. this translates to the SGE qsub -pe flag.
if jd.spmd_variation is not None:
if jd.spmd_variation not in self.pe_list:
raise Exception("'%s' is not a valid option for jd.spmd_variation. "
"Valid options are: %s" % (jd.spmd_variation, self.pe_list))
# if no cores are requested at all, we default to 1
# we need to translate the # cores requested into
# multiplicity, i.e., if one core is requested and
# the cluster consists of 16-way SMP nodes, we will
# request 16. If 17 cores are requested, we will
# request 32... and so on ... self.__ppn represents
# the core count per single node
#count = int(int(jd.total_cpu_count) / int(ppn))
#if int(jd.total_cpu_count) % int(ppn) != 0:
# count = count + 1
#count = count * int(ppn)
sge_params += ["#$ -pe %s %s" % (jd.spmd_variation, jd.total_cpu_count or 1)]
elif jd.total_cpu_count is not None and jd.total_cpu_count > 1:
raise Exception("jd.total_cpu_count requires that jd.spmd_variation is not empty. "
"Valid options for jd.spmd_variation are: %s" % (self.pe_list))
# convert sge params into an string
sge_params = "\n".join(sge_params)
# Job info, executable and arguments
job_info_path = self.__remote_job_info_path()
script_body = [
'function aborted() {',
' echo Aborted with signal $1.',
' echo "signal: $1" >>%s' % job_info_path,
' echo "end_time: $(LC_ALL=en_US.utf8 date \'+%%a %%b %%d %%H:%%M:%%S %%Y\')" >>%s' % job_info_path,
' exit -1',
'}',
'mkdir -p %s' % self.temp_path,
'for sig in SIGHUP SIGINT SIGQUIT SIGTERM SIGUSR1 SIGUSR2; do trap "aborted $sig" $sig; done',
'echo "hostname: $HOSTNAME" >%s' % job_info_path,
'echo "qsub_time: %s" >>%s' % (datetime.now().strftime("%a %b %d %H:%M:%S %Y"), job_info_path),
'echo "start_time: $(LC_ALL=en_US.utf8 date \'+%%a %%b %%d %%H:%%M:%%S %%Y\')" >>%s' % job_info_path
]
exec_n_args = None
if jd.executable is not None:
exec_n_args = jd.executable
if jd.arguments is not None:
exec_n_args += " %s" % " ".join(jd.arguments)
elif jd.arguments is not None:
raise Exception("jd.arguments defined without jd.executable being defined")
if exec_n_args is not None:
script_body += [exec_n_args]
script_body += [
'echo "exit_status: $?" >>%s' % job_info_path,
'echo "end_time: $(LC_ALL=en_US.utf8 date \'+%%a %%b %%d %%H:%%M:%%S %%Y\')" >>%s' % job_info_path
]
# convert exec and args into an string and
# escape all double quotes and dollar signs, otherwise 'echo |'
# further down won't work.
# only escape '$' in args and exe. not in the params
script_body = "\n".join(script_body).replace('$', '\\$')
sgescript = "\n#!/bin/bash \n%s \n%s" % (sge_params, script_body)
return sgescript.replace('"', '\\"')
# ----------------------------------------------------------------
#
# Adaptor internal methods
#
def _job_run(self, jd):
"""
Runs a job via qsub
"""
if self.queue is not None and jd.queue is not None and self.queue != jd.queue:
self._logger.warning("Job service was instantiated explicitly with 'queue=%s', "
"but job description tries to a different queue: '%s'. Using '%s'." % (
self.queue, jd.queue, self.queue))
# In SGE environments with mandatory memory attributes, 'total_physical_memory' must be specified
if len(self.mandatory_memreqs) != 0 and jd.total_physical_memory is None:
log_error_and_raise("Your SGE environments has mandatory memory attributes, so 'total_physical_memory' "
"must be specified in your job descriptor", saga.BadParameter, self._logger)
try:
# create a SGE job script from SAGA job description
script = self.__generate_qsub_script(jd)
self._logger.info("Generated SGE script: %s" % script)
except Exception, ex:
log_error_and_raise(str(ex), saga.BadParameter, self._logger)
# try to create the working/output/error directories (if defined)
# WARNING: this assumes a shared filesystem between login node and
# compute nodes.
if jd.working_directory is not None and len(jd.working_directory) > 0:
self.__remote_mkdir(jd.working_directory)
if jd.output is not None and len(jd.output) > 0:
self.__remote_mkdir(os.path.dirname(jd.output))
if jd.error is not None and len(jd.error) > 0:
self.__remote_mkdir(os.path.dirname(jd.output))
# submit the SGE script
# Now we want to execute the script. This process consists of two steps:
# (1) we create a temporary file with 'mktemp' and write the contents of
# the generated PBS script into it
# (2) we call 'qsub <tmpfile>' to submit the script to the queueing system
cmdline = """SCRIPTFILE=`mktemp -t SAGA-Python-SGEJobScript.XXXXXX` && echo "%s" > $SCRIPTFILE && %s -notify $SCRIPTFILE && rm -f $SCRIPTFILE""" % (script, self._commands['qsub']['path'])
#cmdline = 'echo "%s" | %s -notify' % (script, self._commands['qsub']['path'])
ret, out, _ = self.shell.run_sync(cmdline)
if ret != 0:
# something went wrong
message = "Error running job via 'qsub': %s. Commandline was: %s" % (out, cmdline)
log_error_and_raise(message, saga.NoSuccess, self._logger)
# stdout contains the job id:
# Your job 1036608 ("testjob") has been submitted
sge_job_id = None
for line in out.split('\n'):
if line.find("Your job") != -1:
sge_job_id = line.split()[2]
if sge_job_id is None:
message = "Couldn't parse job id from 'qsub' output: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
job_id = "[%s]-[%s]" % (self.rm, sge_job_id)
self._logger.info("Submitted SGE job with id: %s" % job_id)
# add job to internal list of known jobs.
self.jobs[job_id] = {
'state': saga.job.PENDING,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id):
""" retrieve job information
:param job_id: SAGA job id
:return: job information dictionary
"""
rm, pid = self._adaptor.parse_id(job_id)
# check the state of the job
ret, out, _ = self.shell.run_sync(
"%s | tail -n+3 | awk '($1==%s) {{print $5,$6,$7,$8}}'" % (
self._commands['qstat']['path'], pid))
out = out.strip()
job_info = None
if ret == 0 and len(out) > 0: # job is still in the queue
# output is something like
# r 06/24/2013 17:24:50
m = _QSTAT_JOB_STATE_RE.match(out)
if m is None: # something wrong with the result of qstat
message = "Unexpected qstat results retrieving job info:\n%s" % out.rstrip()
log_error_and_raise(message, saga.NoSuccess, self._logger)
state, start_time, queue = m.groups()
# Convert start time into POSIX format
try:
dt = datetime.strptime(start_time, "%m/%d/%Y %H:%M:%S")
start_time = dt.strftime("%a %b %d %H:%M:%S %Y")
except:
start_time = None
if state not in ["r", "t", "s", "S", "T", "d", "E", "Eqw"]:
start_time = None
exec_host = None
if "@" in queue:
queue, exec_host = queue.split("@")
exec_host = exec_host.rstrip()
if self.accounting and state == "Eqw": # if it is an Eqw job it is better to retrieve the information from qacct
job_info = self.__job_info_from_accounting(pid)
# TODO remove the job from the queue ?
# self.__shell_run("%s %s" % (self._commands['qdel']['path'], pid))
if job_info is None: # use qstat -j pid
qres = self.__kvcmd_results('qstat', "-j %s | grep -E 'submission_time|sge_o_host'" % pid,
key_suffix=":")
if qres is not None: # when qstat fails it will fall back to qacct
# output is something like
# submission_time: Mon Jun 24 17:24:43 2013
# sge_o_host: sge
job_info = dict(
state=self.__sge_to_saga_jobstate(state),
exec_hosts=exec_host or qres.get("sge_o_host"),
returncode=None, # it can not be None because it will be casted to int()
create_time=qres.get("submission_time"),
start_time=start_time,
end_time=None,
gone=False)
# if job already finished or there was an error with qstat
# try to read the remote job info
if job_info is None:
job_info = self.__get_remote_job_info(pid)
# none of the previous methods gave us job info
# if accounting is activated use qacct
if self.accounting and job_info is None:
job_info = self.__job_info_from_accounting(pid)
if job_info is None: # Oooops, we couldn't retrieve information from SGE
message = "Couldn't reconnect to job '%s'" % job_id
log_error_and_raise(message, saga.NoSuccess, self._logger)
self._logger.debug("job_info(%s)=[%s]" % (pid, ", ".join(["%s=%s" % (k, str(job_info[k])) for k in [
"state", "returncode", "exec_hosts", "create_time", "start_time", "end_time", "gone"]])))
return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_id):
""" get job attributes
"""
# if we don't have the job in our dictionary, we don't want it
if job_id not in self.jobs:
message = "Unkown job ID: %s. Can't update state." % job_id
log_error_and_raise(message, saga.NoSuccess, self._logger)
# prev. info contains the info collect when _job_get_info
# was called the last time
prev_info = self.jobs[job_id]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if prev_info['gone'] is True:
self._logger.warning("Job information is not available anymore.")
return prev_info
# if the job is in a terminal state don't expect it to change anymore
if prev_info["state"] in [saga.job.CANCELED, saga.job.FAILED, saga.job.DONE]:
return prev_info
# retrieve updated job information
curr_info = self._retrieve_job(job_id)
if curr_info is None:
prev_info["gone"] = True
return prev_info
# update the job info cache and return it
self.jobs[job_id] = curr_info
return curr_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_id):
""" get the job's state
"""
# check if we have already reach a terminal state
if self.jobs[job_id]['state'] == saga.job.CANCELED \
or self.jobs[job_id]['state'] == saga.job.FAILED \
or self.jobs[job_id]['state'] == saga.job.DONE:
return self.jobs[job_id]['state']
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['returncode'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
ret = self.jobs[job_id]['returncode']
# FIXME: 'None' should cause an exception
if ret == None : return None
else : return int(ret)
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['exec_hosts'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_id):
""" get the job's creation time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['create_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_id):
""" get the job's start time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['start_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_id):
""" get the job's end time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['end_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_id):
""" cancel the job via 'qdel'
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s %s\n" \
% (self._commands['qdel']['path'], pid))
if ret != 0:
message = "Error canceling job via 'qdel': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
self.__clean_remote_job_info(pid)
# assume the job was succesfully canceld
self.jobs[job_id]['state'] = saga.job.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_id, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_id)
while True:
state = self._job_get_state(job_id=job_id)
if state == saga.job.UNKNOWN :
log_error_and_raise("cannot get job state", saga.IncorrectState, self._logger)
if state == saga.job.DONE or \
state == saga.job.FAILED or \
state == saga.job.CANCELED:
self.__clean_remote_job_info(pid)
return True
# avoid busy poll
time.sleep(0.5)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, jobid):
""" Implements saga.adaptors.cpi.job.Service.get_job()
"""
# try to get some information about this job
job_info = self._retrieve_job(jobid)
# save it into our job dictionary.
self.jobs[jobid] = job_info
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": saga.job.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": jobid
}
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements saga.adaptors.cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s | grep `whoami`"\
% self._commands['qstat']['path'])
if ret != 0 and len(out) > 0:
message = "Failed to list jobs via 'qstat': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
elif ret != 0 and len(out) == 0:
# qstat | grep `whoami` exits with 1 if the list is empty
pass
else:
for line in out.split("\n"):
if len(line.split()) > 1:
jobid = "[%s]-[%s]" % (self.rm, line.split()[0])
ids.append(jobid)
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise saga.NoSuccess ("Not Implemented");
###############################################################################
#
class SGEJob (saga.adaptors.cpi.job.Job):
""" implements saga.adaptors.cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
_cpi_base = super(SGEJob, self)
_cpi_base.__init__(api, adaptor)
@SYNC_CALL
def init_instance(self, job_info):
""" implements saga.adaptors.cpi.job.Job.init_instance()
"""
# init_instance is called for every new saga.job.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._started = True
else:
self._id = None
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_description (self):
return self.jd
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" mplements saga.adaptors.cpi.job.Job.get_state()
"""
if self._started is False:
# jobs that are not started are always in 'NEW' state
return saga.job.NEW
else:
return self.js._job_get_state(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements saga.adaptors.cpi.job.Job.wait()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_wait(self._id, timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements saga.adaptors.cpi.job.Job.cancel()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_cancel(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements saga.adaptors.cpi.job.Job.run()
"""
self._id = self.js._job_run(self.jd)
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements saga.adaptors.cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements saga.adaptors.cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements saga.adaptors.cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements saga.adaptors.cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
return self.js._job_get_create_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements saga.adaptors.cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
return self.js._job_get_start_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements saga.adaptors.cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self._id)
| |
from __future__ import division, absolute_import, print_function
# Code common to build tools
import sys
import warnings
import copy
import binascii
import textwrap
from numpy.distutils.misc_util import mingw32
#-------------------
# Versioning support
#-------------------
# How to change C_API_VERSION ?
# - increase C_API_VERSION value
# - record the hash for the new C API with the cversions.py script
# and add the hash to cversions.txt
# The hash values are used to remind developers when the C API number was not
# updated - generates a MismatchCAPIWarning warning which is turned into an
# exception for released version.
# Binary compatibility version number. This number is increased whenever the
# C-API is changed such that binary compatibility is broken, i.e. whenever a
# recompile of extension modules is needed.
C_ABI_VERSION = 0x01000009
# Minor API version. This number is increased whenever a change is made to the
# C-API -- whether it breaks binary compatibility or not. Some changes, such
# as adding a function pointer to the end of the function table, can be made
# without breaking binary compatibility. In this case, only the C_API_VERSION
# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
#
# 0x00000008 - 1.7.x
# 0x00000009 - 1.8.x
# 0x00000009 - 1.9.x
# 0x0000000a - 1.10.x
# 0x0000000a - 1.11.x
# 0x0000000a - 1.12.x
# 0x0000000b - 1.13.x
# 0x0000000c - 1.14.x
# 0x0000000c - 1.15.x
# 0x0000000d - 1.16.x
C_API_VERSION = 0x0000000d
class MismatchCAPIWarning(Warning):
pass
def is_released(config):
"""Return True if a released version of numpy is detected."""
from distutils.version import LooseVersion
v = config.get_version('../version.py')
if v is None:
raise ValueError("Could not get version")
pv = LooseVersion(vstring=v).version
if len(pv) > 3:
return False
return True
def get_api_versions(apiversion, codegen_dir):
"""
Return current C API checksum and the recorded checksum.
Return current C API checksum and the recorded checksum for the given
version of the C API version.
"""
# Compute the hash of the current API as defined in the .txt files in
# code_generators
sys.path.insert(0, codegen_dir)
try:
m = __import__('genapi')
numpy_api = __import__('numpy_api')
curapi_hash = m.fullapi_hash(numpy_api.full_api)
apis_hash = m.get_versions_hash()
finally:
del sys.path[0]
return curapi_hash, apis_hash[apiversion]
def check_api_version(apiversion, codegen_dir):
"""Emits a MismatchCAPIWarning if the C API version needs updating."""
curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
# If different hash, it means that the api .txt files in
# codegen_dir have been updated without the API version being
# updated. Any modification in those .txt files should be reflected
# in the api and eventually abi versions.
# To compute the checksum of the current API, use numpy/core/cversions.py
if not curapi_hash == api_hash:
msg = ("API mismatch detected, the C API version "
"numbers have to be updated. Current C api version is %d, "
"with checksum %s, but recorded checksum for C API version %d "
"in core/codegen_dir/cversions.txt is %s. If functions were "
"added in the C API, you have to update C_API_VERSION in %s."
)
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
MismatchCAPIWarning, stacklevel=2)
# Mandatory functions: if not found, fail the build
MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
"floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
"acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter", "ftello", "fseeko",
"strtoll", "strtoull", "cbrt", "strtold_l", "fallocate",
"backtrace", "madvise"]
OPTIONAL_HEADERS = [
# sse headers only enabled automatically on amd64/x32 builds
"xmmintrin.h", # SSE
"emmintrin.h", # SSE2
"immintrin.h", # AVX
"features.h", # for glibc version linux
"xlocale.h", # see GH#8367
"dlfcn.h", # dladdr
"sys/mman.h", #madvise
]
# optional gcc compiler builtins and their call arguments and optional a
# required header and definition name (HAVE_ prepended)
# call arguments are required as the compiler will do strict signature checking
OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
("__builtin_isinf", '5.'),
("__builtin_isfinite", '5.'),
("__builtin_bswap32", '5u'),
("__builtin_bswap64", '5u'),
("__builtin_expect", '5, 0'),
("__builtin_mul_overflow", '5, 5, (int*)5'),
# broken on OSX 10.11, make sure its not optimized away
("volatile int r = __builtin_cpu_supports", '"sse"',
"stdio.h", "__BUILTIN_CPU_SUPPORTS"),
("volatile int r = __builtin_cpu_supports", '"avx512f"',
"stdio.h", "__BUILTIN_CPU_SUPPORTS_AVX512F"),
# MMX only needed for icc, but some clangs don't have it
("_m_from_int64", '0', "emmintrin.h"),
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
"xmmintrin.h"), # SSE
("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
("__builtin_prefetch", "(float*)0, 0, 3"),
# check that the linker can handle avx
("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"',
"stdio.h", "LINK_AVX"),
("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
"stdio.h", "LINK_AVX2"),
("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"',
"stdio.h", "LINK_AVX512F"),
("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
]
# function attributes
# tested via "int %s %s(void *);" % (attribute, name)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
'attribute_optimize_unroll_loops'),
('__attribute__((optimize("O3")))',
'attribute_optimize_opt_3'),
('__attribute__((nonnull (1)))',
'attribute_nonnull'),
('__attribute__((target ("avx")))',
'attribute_target_avx'),
('__attribute__((target ("avx2")))',
'attribute_target_avx2'),
('__attribute__((target ("avx512f")))',
'attribute_target_avx512f'),
]
# function attributes with intrinsics
# To ensure your compiler can compile avx intrinsics with just the attributes
# gcc 4.8.4 support attributes but not with intrisics
# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))',
'attribute_target_avx2_with_intrinsics',
'__m256 temp = _mm256_set1_ps(1.0); temp = \
_mm256_fmadd_ps(temp, temp, temp)',
'immintrin.h'),
('__attribute__((target("avx512f")))',
'attribute_target_avx512f_with_intrinsics',
'__m512 temp = _mm512_set1_ps(1.0)',
'immintrin.h'),
]
# variable attributes tested via "int %s a" % attribute
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h
OPTIONAL_STDFUNCS_MAYBE = [
"expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
"ftello", "fseeko"
]
# C99 functions: float and long double versions
C99_FUNCS = [
"sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil",
"rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1",
"asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2",
"pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign",
"nextafter", "cbrt"
]
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
C99_COMPLEX_TYPES = [
'complex double', 'complex float', 'complex long double'
]
C99_COMPLEX_FUNCS = [
"cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
"catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow",
"cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh"
]
def fname2def(name):
return "HAVE_%s" % name.upper()
def sym2def(symbol):
define = symbol.replace(' ', '')
return define.upper()
def type2def(symbol):
define = symbol.replace(' ', '_')
return define.upper()
# Code to detect long double representation taken from MPFR m4 macro
def check_long_double_representation(cmd):
cmd._check_compiler()
body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
# Disable whole program optimization (the default on vs2015, with python 3.5+)
# which generates intermediary object files and prevents checking the
# float representation.
if sys.platform == "win32" and not mingw32():
try:
cmd.compiler.compile_options.remove("/GL")
except (AttributeError, ValueError):
pass
# Disable multi-file interprocedural optimization in the Intel compiler on Linux
# which generates intermediary object files and prevents checking the
# float representation.
elif (sys.platform != "win32"
and cmd.compiler.compiler_type.startswith('intel')
and '-ipo' in cmd.compiler.cc_exe):
newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '')
cmd.compiler.set_executables(
compiler=newcompiler,
compiler_so=newcompiler,
compiler_cxx=newcompiler,
linker_exe=newcompiler,
linker_so=newcompiler + ' -shared'
)
# We need to use _compile because we need the object filename
src, obj = cmd._compile(body, None, None, 'c')
try:
ltype = long_double_representation(pyod(obj))
return ltype
except ValueError:
# try linking to support CC="gcc -flto" or icc -ipo
# struct needs to be volatile so it isn't optimized away
# additionally "clang -flto" requires the foo struct to be used
body = body.replace('struct', 'volatile struct')
body += "int main(void) { return foo.before[0]; }\n"
src, obj = cmd._compile(body, None, None, 'c')
cmd.temp_files.append("_configtest")
cmd.compiler.link_executable([obj], "_configtest")
ltype = long_double_representation(pyod("_configtest"))
return ltype
finally:
cmd._clean()
LONG_DOUBLE_REPRESENTATION_SRC = r"""
/* "before" is 16 bytes to ensure there's no padding between it and "x".
* We're not expecting any "long double" bigger than 16 bytes or with
* alignment requirements stricter than 16 bytes. */
typedef %(type)s test_type;
struct {
char before[16];
test_type x;
char after[8];
} foo = {
{ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
-123456789.0,
{ '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
};
"""
def pyod(filename):
"""Python implementation of the od UNIX utility (od -b, more exactly).
Parameters
----------
filename : str
name of the file to get the dump from.
Returns
-------
out : seq
list of lines of od output
Note
----
We only implement enough to get the necessary information for long double
representation, this is not intended as a compatible replacement for od.
"""
def _pyod2():
out = []
with open(filename, 'rb') as fid:
yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
for i in range(0, len(yo), 16):
line = ['%07d' % int(oct(i))]
line.extend(['%03d' % c for c in yo[i:i+16]])
out.append(" ".join(line))
return out
def _pyod3():
out = []
with open(filename, 'rb') as fid:
yo2 = [oct(o)[2:] for o in fid.read()]
for i in range(0, len(yo2), 16):
line = ['%07d' % int(oct(i)[2:])]
line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
out.append(" ".join(line))
return out
if sys.version_info[0] < 3:
return _pyod2()
else:
return _pyod3()
_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
'001', '043', '105', '147', '211', '253', '315', '357']
_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000']
_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000', '000', '000', '000', '000']
_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
'242', '240', '000', '000', '000', '000']
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
'000', '000', '000', '000', '000', '000', '000', '000']
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
['000'] * 8)
_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
['000'] * 8)
def long_double_representation(lines):
"""Given a binary dump as given by GNU od -b, look for long double
representation."""
# Read contains a list of 32 items, each item is a byte (in octal
# representation, as a string). We 'slide' over the output until read is of
# the form before_seq + content + after_sequence, where content is the long double
# representation:
# - content is 12 bytes: 80 bits Intel representation
# - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
# - content is 8 bytes: same as double (not implemented yet)
read = [''] * 32
saw = None
for line in lines:
# we skip the first word, as od -b output an index at the beginning of
# each line
for w in line.split()[1:]:
read.pop(0)
read.append(w)
# If the end of read is equal to the after_sequence, read contains
# the long double
if read[-8:] == _AFTER_SEQ:
saw = copy.copy(read)
# if the content was 12 bytes, we only have 32 - 8 - 12 = 12
# "before" bytes. In other words the first 4 "before" bytes went
# past the sliding window.
if read[:12] == _BEFORE_SEQ[4:]:
if read[12:-8] == _INTEL_EXTENDED_12B:
return 'INTEL_EXTENDED_12_BYTES_LE'
if read[12:-8] == _MOTOROLA_EXTENDED_12B:
return 'MOTOROLA_EXTENDED_12_BYTES_BE'
# if the content was 16 bytes, we are left with 32-8-16 = 16
# "before" bytes, so 8 went past the sliding window.
elif read[:8] == _BEFORE_SEQ[8:]:
if read[8:-8] == _INTEL_EXTENDED_16B:
return 'INTEL_EXTENDED_16_BYTES_LE'
elif read[8:-8] == _IEEE_QUAD_PREC_BE:
return 'IEEE_QUAD_BE'
elif read[8:-8] == _IEEE_QUAD_PREC_LE:
return 'IEEE_QUAD_LE'
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE:
return 'IBM_DOUBLE_DOUBLE_LE'
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE:
return 'IBM_DOUBLE_DOUBLE_BE'
# if the content was 8 bytes, left with 32-8-8 = 16 bytes
elif read[:16] == _BEFORE_SEQ:
if read[16:-8] == _IEEE_DOUBLE_LE:
return 'IEEE_DOUBLE_LE'
elif read[16:-8] == _IEEE_DOUBLE_BE:
return 'IEEE_DOUBLE_BE'
if saw is not None:
raise ValueError("Unrecognized format (%s)" % saw)
else:
# We never detected the after_sequence
raise ValueError("Could not lock sequences (%s)" % saw)
def check_for_right_shift_internal_compiler_error(cmd):
"""
On our arm CI, this fails with an internal compilation error
The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
<source>: In function 'right_shift':
<source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
ip1[i] = ip1[i] >> in2;
^
Please submit a full bug report,
with preprocessed source if appropriate.
See <http://gcc.gnu.org/bugs.html> for instructions.
Compiler returned: 1
This function returns True if this compiler bug is present, and we need to
turn off optimization for the function
"""
cmd._check_compiler()
has_optimize = cmd.try_compile(textwrap.dedent("""\
__attribute__((optimize("O3"))) void right_shift() {}
"""), None, None)
if not has_optimize:
return False
no_err = cmd.try_compile(textwrap.dedent("""\
typedef long the_type; /* fails also for unsigned and long long */
__attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
for (int i = 0; i < n; i++) {
if (in2 < (the_type)sizeof(the_type) * 8) {
ip1[i] = ip1[i] >> in2;
}
}
}
"""), None, None)
return not no_err
| |
"""
sentry.models.group
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import re
import six
import time
import warnings
from base64 import b16decode, b16encode
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.app import buffer
from sentry.constants import (
DEFAULT_LOGGER_NAME, EVENT_ORDERING_KEY, LOG_LEVELS, MAX_CULPRIT_LENGTH
)
from sentry.db.models import (
BaseManager, BoundedBigIntegerField, BoundedIntegerField,
BoundedPositiveIntegerField, FlexibleForeignKey, GzippedDictField, Model,
sane_repr
)
from sentry.utils.http import absolute_uri
from sentry.utils.numbers import base32_decode, base32_encode
from sentry.utils.strings import strip, truncatechars
logger = logging.getLogger(__name__)
_short_id_re = re.compile(r'^(.*?)(?:[\s_-])([A-Za-z0-9-._]+)$')
def looks_like_short_id(value):
return _short_id_re.match((value or '').strip()) is not None
# TODO(dcramer): pull in enum library
class GroupStatus(object):
UNRESOLVED = 0
RESOLVED = 1
MUTED = 2
PENDING_DELETION = 3
DELETION_IN_PROGRESS = 4
PENDING_MERGE = 5
def get_group_with_redirect(id, queryset=None):
"""
Retrieve a group by ID, checking the redirect table if the requested group
does not exist. Returns a two-tuple of ``(object, redirected)``.
"""
if queryset is None:
queryset = Group.objects.all()
# When not passing a queryset, we want to read from cache
getter = Group.objects.get_from_cache
else:
getter = queryset.get
try:
return getter(id=id), False
except Group.DoesNotExist as error:
from sentry.models import GroupRedirect
qs = GroupRedirect.objects.filter(previous_group_id=id).values_list('group_id', flat=True)
try:
return queryset.get(id=qs), True
except Group.DoesNotExist:
raise error # raise original `DoesNotExist`
class GroupManager(BaseManager):
use_for_related_fields = True
def by_qualified_short_id(self, org, short_id):
match = _short_id_re.match(short_id.strip())
if match is None:
raise Group.DoesNotExist()
callsign, id = match.groups()
callsign = callsign.lower()
try:
short_id = base32_decode(id)
# We need to make sure the short id is not overflowing the
# field's max or the lookup will fail with an assertion error.
max_id = Group._meta.get_field_by_name('short_id')[0].MAX_VALUE
if short_id > max_id:
raise ValueError()
except ValueError:
raise Group.DoesNotExist()
return Group.objects.get(
project__organization=org,
project__slug=callsign,
short_id=short_id,
)
def from_kwargs(self, project, **kwargs):
from sentry.event_manager import EventManager
manager = EventManager(kwargs)
manager.normalize()
return manager.save(project)
def add_tags(self, group, tags):
from sentry.models import TagValue, GroupTagValue
project_id = group.project_id
date = group.last_seen
for tag_item in tags:
if len(tag_item) == 2:
(key, value), data = tag_item, None
else:
key, value, data = tag_item
buffer.incr(TagValue, {
'times_seen': 1,
}, {
'project_id': project_id,
'key': key,
'value': value,
}, {
'last_seen': date,
'data': data,
})
buffer.incr(GroupTagValue, {
'times_seen': 1,
}, {
'group_id': group.id,
'key': key,
'value': value,
}, {
'project': project_id,
'last_seen': date,
})
class Group(Model):
"""
Aggregated message which summarizes a set of Events.
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', null=True)
logger = models.CharField(
max_length=64, blank=True, default=DEFAULT_LOGGER_NAME, db_index=True)
level = BoundedPositiveIntegerField(
choices=LOG_LEVELS.items(), default=logging.ERROR, blank=True,
db_index=True)
message = models.TextField()
culprit = models.CharField(
max_length=MAX_CULPRIT_LENGTH, blank=True, null=True,
db_column='view')
num_comments = BoundedPositiveIntegerField(default=0, null=True)
platform = models.CharField(max_length=64, null=True)
status = BoundedPositiveIntegerField(default=0, choices=(
(GroupStatus.UNRESOLVED, _('Unresolved')),
(GroupStatus.RESOLVED, _('Resolved')),
(GroupStatus.MUTED, _('Muted')),
), db_index=True)
times_seen = BoundedPositiveIntegerField(default=1, db_index=True)
last_seen = models.DateTimeField(default=timezone.now, db_index=True)
first_seen = models.DateTimeField(default=timezone.now, db_index=True)
first_release = FlexibleForeignKey('sentry.Release', null=True,
on_delete=models.PROTECT)
resolved_at = models.DateTimeField(null=True, db_index=True)
# active_at should be the same as first_seen by default
active_at = models.DateTimeField(null=True, db_index=True)
time_spent_total = BoundedIntegerField(default=0)
time_spent_count = BoundedIntegerField(default=0)
score = BoundedIntegerField(default=0)
is_public = models.NullBooleanField(default=False, null=True)
data = GzippedDictField(blank=True, null=True)
short_id = BoundedBigIntegerField(null=True)
objects = GroupManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupedmessage'
verbose_name_plural = _('grouped messages')
verbose_name = _('grouped message')
permissions = (
("can_view", "Can view"),
)
index_together = (
('project', 'first_release'),
)
unique_together = (
('project', 'short_id'),
)
__repr__ = sane_repr('project_id')
def __unicode__(self):
return "(%s) %s" % (self.times_seen, self.error())
def save(self, *args, **kwargs):
if not self.last_seen:
self.last_seen = timezone.now()
if not self.first_seen:
self.first_seen = self.last_seen
if not self.active_at:
self.active_at = self.first_seen
# We limit what we store for the message body
self.message = strip(self.message)
if self.message:
self.message = truncatechars(self.message.splitlines()[0], 255)
super(Group, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-group', args=[
self.organization.slug, self.project.slug, self.id]))
@property
def qualified_short_id(self):
if self.short_id is not None:
return '%s-%s' % (
self.project.slug.upper(),
base32_encode(self.short_id),
)
@property
def event_set(self):
from sentry.models import Event
return Event.objects.filter(group_id=self.id)
def is_over_resolve_age(self):
resolve_age = self.project.get_option('sentry:resolve_age', None)
if not resolve_age:
return False
return self.last_seen < timezone.now() - timedelta(hours=int(resolve_age))
def is_muted(self):
return self.get_status() == GroupStatus.MUTED
def is_resolved(self):
return self.get_status() == GroupStatus.RESOLVED
def get_status(self):
# XXX(dcramer): GroupSerializer reimplements this logic
from sentry.models import GroupSnooze
if self.status == GroupStatus.MUTED:
try:
snooze = GroupSnooze.objects.get(group=self)
except GroupSnooze.DoesNotExist:
pass
else:
# XXX(dcramer): if the snooze row exists then we need
# to confirm its still valid
if snooze.until > timezone.now():
return GroupStatus.MUTED
else:
return GroupStatus.UNRESOLVED
if self.status == GroupStatus.UNRESOLVED and self.is_over_resolve_age():
return GroupStatus.RESOLVED
return self.status
def get_share_id(self):
return b16encode(
('{}.{}'.format(self.project_id, self.id)).encode('utf-8')
).lower().decode('utf-8')
@classmethod
def from_share_id(cls, share_id):
if not share_id:
raise cls.DoesNotExist
try:
project_id, group_id = b16decode(share_id.upper()).decode('utf-8').split('.')
except (ValueError, TypeError):
raise cls.DoesNotExist
if not (project_id.isdigit() and group_id.isdigit()):
raise cls.DoesNotExist
return cls.objects.get(project=project_id, id=group_id)
def get_score(self):
return int(math.log(self.times_seen) * 600 + float(time.mktime(self.last_seen.timetuple())))
def get_latest_event(self):
from sentry.models import Event
if not hasattr(self, '_latest_event'):
latest_events = sorted(
Event.objects.filter(
group_id=self.id,
).order_by('-datetime')[0:5],
key=EVENT_ORDERING_KEY,
reverse=True,
)
try:
self._latest_event = latest_events[0]
except IndexError:
self._latest_event = None
return self._latest_event
def get_oldest_event(self):
from sentry.models import Event
if not hasattr(self, '_oldest_event'):
oldest_events = sorted(
Event.objects.filter(
group_id=self.id,
).order_by('datetime')[0:5],
key=EVENT_ORDERING_KEY,
)
try:
self._oldest_event = oldest_events[0]
except IndexError:
self._oldest_event = None
return self._oldest_event
def get_unique_tags(self, tag, since=None, order_by='-times_seen'):
# TODO(dcramer): this has zero test coverage and is a critical path
from sentry.models import GroupTagValue
queryset = GroupTagValue.objects.filter(
group=self,
key=tag,
)
if since:
queryset = queryset.filter(last_seen__gte=since)
return queryset.values_list(
'value',
'times_seen',
'first_seen',
'last_seen',
).order_by(order_by)
def get_tags(self, with_internal=True):
from sentry.models import GroupTagKey, TagKey
if not hasattr(self, '_tag_cache'):
group_tags = GroupTagKey.objects.filter(
group=self,
project=self.project,
)
if not with_internal:
group_tags = group_tags.exclude(key__startswith='sentry:')
group_tags = list(group_tags.values_list('key', flat=True))
tag_keys = dict(
(t.key, t)
for t in TagKey.objects.filter(
project=self.project,
key__in=group_tags
)
)
results = []
for key in group_tags:
try:
tag_key = tag_keys[key]
except KeyError:
label = key.replace('_', ' ').title()
else:
label = tag_key.get_label()
results.append({
'key': key,
'label': label,
})
self._tag_cache = sorted(results, key=lambda x: x['label'])
return self._tag_cache
def error(self):
return self.message
error.short_description = _('error')
def has_two_part_message(self):
message = strip(self.message)
return '\n' in message or len(message) > 100
@property
def title(self):
culprit = strip(self.culprit)
if culprit:
return culprit
return self.message
@property
def message_short(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 100)
return message
@property
def organization(self):
return self.project.organization
@property
def team(self):
return self.project.team
@property
def checksum(self):
warnings.warn('Group.checksum is no longer used', DeprecationWarning)
return ''
def get_email_subject(self):
return '[%s] %s: %s' % (
self.project.get_full_name().encode('utf-8'),
six.text_type(self.get_level_display()).upper().encode('utf-8'),
self.message_short.encode('utf-8')
)
| |
# -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class bitmarket (Exchange):
def describe(self):
return self.deep_extend(super(bitmarket, self).describe(), {
'id': 'bitmarket',
'name': 'BitMarket',
'countries': ['PL', 'EU'],
'rateLimit': 1500,
'hasCORS': False,
'hasFetchOHLCV': True,
'hasWithdraw': True,
'timeframes': {
'90m': '90m',
'6h': '6h',
'1d': '1d',
'1w': '7d',
'1M': '1m',
'3M': '3m',
'6M': '6m',
'1y': '1y',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27767256-a8555200-5ef9-11e7-96fd-469a65e2b0bd.jpg',
'api': {
'public': 'https://www.bitmarket.net',
'private': 'https://www.bitmarket.pl/api2/', # last slash is critical
},
'www': [
'https://www.bitmarket.pl',
'https://www.bitmarket.net',
],
'doc': [
'https://www.bitmarket.net/docs.php?file=api_public.html',
'https://www.bitmarket.net/docs.php?file=api_private.html',
'https://github.com/bitmarket-net/api',
],
},
'api': {
'public': {
'get': [
'json/{market}/ticker',
'json/{market}/orderbook',
'json/{market}/trades',
'json/ctransfer',
'graphs/{market}/90m',
'graphs/{market}/6h',
'graphs/{market}/1d',
'graphs/{market}/7d',
'graphs/{market}/1m',
'graphs/{market}/3m',
'graphs/{market}/6m',
'graphs/{market}/1y',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orders',
'trades',
'history',
'withdrawals',
'tradingdesk',
'tradingdeskStatus',
'tradingdeskConfirm',
'cryptotradingdesk',
'cryptotradingdeskStatus',
'cryptotradingdeskConfirm',
'withdraw',
'withdrawFiat',
'withdrawPLNPP',
'withdrawFiatFast',
'deposit',
'transfer',
'transfers',
'marginList',
'marginOpen',
'marginClose',
'marginCancel',
'marginModify',
'marginBalanceAdd',
'marginBalanceRemove',
'swapList',
'swapOpen',
'swapClose',
],
},
},
'markets': {
'BCH/PLN': {'id': 'BCCPLN', 'symbol': 'BCH/PLN', 'base': 'BCH', 'quote': 'PLN'},
'BTG/PLN': {'id': 'BTGPLN', 'symbol': 'BTG/PLN', 'base': 'BTG', 'quote': 'PLN'},
'BTC/PLN': {'id': 'BTCPLN', 'symbol': 'BTC/PLN', 'base': 'BTC', 'quote': 'PLN'},
'BTC/EUR': {'id': 'BTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'},
'LTC/PLN': {'id': 'LTCPLN', 'symbol': 'LTC/PLN', 'base': 'LTC', 'quote': 'PLN'},
'LTC/BTC': {'id': 'LTCBTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC'},
'LiteMineX/BTC': {'id': 'LiteMineXBTC', 'symbol': 'LiteMineX/BTC', 'base': 'LiteMineX', 'quote': 'BTC'},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.45 / 100,
'maker': 0.15 / 100,
'tiers': {
'taker': [
[0, 0.45 / 100],
[99.99, 0.44 / 100],
[299.99, 0.43 / 100],
[499.99, 0.42 / 100],
[999.99, 0.41 / 100],
[1999.99, 0.40 / 100],
[2999.99, 0.39 / 100],
[4999.99, 0.38 / 100],
[9999.99, 0.37 / 100],
[19999.99, 0.36 / 100],
[29999.99, 0.35 / 100],
[49999.99, 0.34 / 100],
[99999.99, 0.33 / 100],
[199999.99, 0.32 / 100],
[299999.99, 0.31 / 100],
[499999.99, 0.0 / 100],
],
'maker': [
[0, 0.15 / 100],
[99.99, 0.14 / 100],
[299.99, 0.13 / 100],
[499.99, 0.12 / 100],
[999.99, 0.11 / 100],
[1999.99, 0.10 / 100],
[2999.99, 0.9 / 100],
[4999.99, 0.8 / 100],
[9999.99, 0.7 / 100],
[19999.99, 0.6 / 100],
[29999.99, 0.5 / 100],
[49999.99, 0.4 / 100],
[99999.99, 0.3 / 100],
[199999.99, 0.2 / 100],
[299999.99, 0.1 / 100],
[499999.99, 0.0 / 100],
],
},
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.0008,
'LTC': 0.005,
'BCH': 0.0008,
'BTG': 0.0008,
'DOGE': 1,
'EUR': 2,
'PLN': 2,
},
'deposit': {
'BTC': 0,
'LTC': 0,
'BCH': 0,
'BTG': 0,
'DOGE': 25,
'EUR': 2, # SEPA. Transfer INT(SHA): 5 EUR
'PLN': 0,
},
},
},
})
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostInfo()
data = response['data']
balance = data['balances']
result = {'info': data}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balance['available']:
account['free'] = balance['available'][currency]
if currency in balance['blocked']:
account['used'] = balance['blocked'][currency]
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
orderbook = self.publicGetJsonMarketOrderbook(self.extend({
'market': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
return {
'bids': orderbook['bids'],
'asks': orderbook['asks'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
def fetch_ticker(self, symbol, params={}):
ticker = self.publicGetJsonMarketTicker(self.extend({
'market': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': vwap,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
side = 'buy' if (trade['type'] == 'bid') else 'sell'
timestamp = trade['date'] * 1000
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': None,
'type': None,
'side': side,
'price': trade['price'],
'amount': trade['amount'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = self.publicGetJsonMarketTrades(self.extend({
'market': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='90m', since=None, limit=None):
return [
ohlcv['time'] * 1000,
float(ohlcv['open']),
float(ohlcv['high']),
float(ohlcv['low']),
float(ohlcv['close']),
float(ohlcv['vol']),
]
def fetch_ohlcv(self, symbol, timeframe='90m', since=None, limit=None, params={}):
self.load_markets()
method = 'publicGetGraphsMarket' + self.timeframes[timeframe]
market = self.market(symbol)
response = getattr(self, method)(self.extend({
'market': market['id'],
}, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
response = self.privatePostTrade(self.extend({
'market': self.market_id(symbol),
'type': side,
'amount': amount,
'rate': price,
}, params))
result = {
'info': response,
}
if 'id' in response['order']:
result['id'] = response['id']
return result
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostCancel({'id': id})
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, currency, amount, address, params={}):
self.load_markets()
method = None
request = {
'currency': currency,
'quantity': amount,
}
if self.is_fiat(currency):
method = 'privatePostWithdrawFiat'
if 'account' in params:
request['account'] = params['account'] # bank account code for withdrawal
else:
raise ExchangeError(self.id + ' requires account parameter to withdraw fiat currency')
if 'account2' in params:
request['account2'] = params['account2'] # bank SWIFT code(EUR only)
else:
if currency == 'EUR':
raise ExchangeError(self.id + ' requires account2 parameter to withdraw EUR')
if 'withdrawal_note' in params:
request['withdrawal_note'] = params['withdrawal_note'] # a 10-character user-specified withdrawal note(PLN only)
else:
if currency == 'PLN':
raise ExchangeError(self.id + ' requires withdrawal_note parameter to withdraw PLN')
else:
method = 'privatePostWithdraw'
request['address'] = address
response = getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
url += '/' + self.implode_params(path + '.json', params)
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'tonce': nonce,
'method': path,
}, params)
body = self.urlencode(query)
headers = {
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import re
import math
from io import open
from collections import OrderedDict
import numpy as np
from monty.json import MSONable, MontyDecoder
from pymatgen.core.structure import Molecule, Structure
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.lattice import Lattice
"""
This module implements classes for generating/parsing Lammps data file i.e
the file that defines the system configuration(atomic positions, bonds,
angles and dihedrals) + values of various fit paramters.
Restrictions:
The ATOMS section in the data file that defines the atomic positions
is assumed to be in the following format(atom style = full, this is the
superset of several other atom styles such as angle, bond, atomic, charge
and molecular):
atom_id, molecule_id, atom_type, charge(optional), x, y, z
For more info, please refer to: http://lammps.sandia.gov/doc/read_data.html
"""
__author__ = 'Kiran Mathew'
__email__ = "kmathew@lbl.gov"
__credits__ = 'Brandon Wood'
# exhaustive(as far i know) list of lammps data file keywords
HEADER_KEYWORDS = {"atoms", "bonds", "angles", "dihedrals", "impropers",
"atom types", "bond types", "angle types", "dihedral types",
"improper types", "extra bond per atom", "extra angle per atom",
"extra dihedral per atom", "extra improper per atom",
"extra special per atom", "ellipsoids", "lines", "triangles",
"bodies", "xlo xhi", "ylo yhi", "zlo zhi", "xy xz yz"}
SECTION_KEYWORDS = {"atoms", "velocities", "masses", "ellipsoids", "lines",
"triangles", "bodies", "bonds", "angles", "dihedrals",
"impropers", "pair coeffs", "pairij coeffs", "bond coeffs",
"angle coeffs", "dihedral coeffs", "improper coeffs",
"bondbond coeffs", "bondangle coeffs",
"middlebondtorsion coeffs", "endbondtorsion coeffs",
"angletorsion coeffs", "angleangletorsion coeffs",
"bondbond13 coeffs", "angleangle coeffs"}
class LammpsData(MSONable):
"""
Basic Lammps data: just the atoms section
Args:
box_size (list): [[x_min, x_max], [y_min,y_max], [z_min,z_max]]
atomic_masses (list): [[atom type, mass],...]
atoms_data (list): [[atom id, mol id, atom type, charge, x, y, z ...], ... ]
box_tilt (list): [xy, xz, yz] for non-orthogonal systems
"""
TEMPLATE = """Generated by pymatgen.io.lammps.data.LammpsData
{natoms} atoms
{natom_types} atom types
{xlo:.6f} {xhi:.6f} xlo xhi
{ylo:.6f} {yhi:.6f} ylo yhi
{zlo:.6f} {zhi:.6f} zlo zhi
{tilt}
Masses
{masses}
Atoms
{atoms}
"""
def __init__(self, box_size, atomic_masses, atoms_data, box_tilt=None, atom_style='full'):
self.box_size = box_size
self.natoms = len(atoms_data)
self.natom_types = len(atomic_masses)
self.atomic_masses = list(atomic_masses)
self.atoms_data = atoms_data
self.box_tilt = box_tilt
self.atom_style = atom_style
def __str__(self, significant_figures=6):
"""
string representation of LammpsData
Args:
significant_figures (int): No. of significant figures to
output. Default to 6.
Returns:
String representation of the data file
"""
float_fmt = '%.{}f'.format(significant_figures)
def list_str(l, accu_sw=False):
"""
Need to use accu_sw to control if to use float_format or not.
Since for atomic mass, Lammps cannot read in formatted value.
"""
if accu_sw:
return "\n".join([" ".join([str(float_fmt %x) for x in ad])
for ad in l])
else:
return "\n".join([" ".join([str(x) for x in ad])
for ad in l])
d = {k: v for k, v in self.__dict__.items()}
return LammpsData.TEMPLATE.format(
xlo=self.box_size[0][0],
xhi=self.box_size[0][1],
ylo=self.box_size[1][0],
yhi=self.box_size[1][1],
zlo=self.box_size[2][0],
zhi=self.box_size[2][1],
tilt="{:.6f} {:.6f} {:.6f} xy xz yz".format(
self.box_tilt[0], self.box_tilt[1], self.box_tilt[2]) if self.box_tilt else "",
masses=list_str(self.atomic_masses),
atoms=list_str(self.atoms_data, accu_sw=True),
**d
)
@property
def structure(self):
"""
Transform from LammpsData file to a pymatgen structure object
Return:
A pymatgen structure object
"""
species_map = {}
for sp in self.atomic_masses:
for el in Element:
if abs(el.atomic_mass - sp[1]) < 0.05:
species_map[sp[0]] = el
xhi, yhi, zhi = self.box_size[0][1] - self.box_size[0][0], self.box_size[1][1] - self.box_size[1][0], \
self.box_size[2][1] - self.box_size[0][0]
xy, xz, yz = self.box_tilt if self.box_tilt is not None else [0.0, 0.0, 0.0]
a = xhi
b = np.sqrt(yhi ** 2 + xy ** 2)
c = np.sqrt(zhi ** 2 + xz ** 2 + yz ** 2)
gamma = math.degrees(math.acos(xy / b))
beta = math.degrees(math.acos(xz / c))
alpha = math.degrees(math.acos((yhi * yz + xy * xz) / b / c))
lattice = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
species = []
coords = []
for d in self.atoms_data:
if self.atom_style == 'full':
if d[3] != 0:
species.append(Specie(species_map[d[2]].symbol, d[3]))
else:
species.append(species_map[d[1]])
coords.append(d[4:7])
elif self.atom_style == 'charge':
if d[2] != 0:
species.append(Specie(species_map[d[1]].symbol, d[2]))
else:
species.append(species_map[d[1]])
coords.append(d[3:6])
elif self.atom_style == 'atomic':
species.append(species_map[d[1]])
coords.append(d[2:5])
else:
raise RuntimeError('data style not implemented')
return Structure(lattice, species, coords, coords_are_cartesian=True)
@staticmethod
def check_box_size(structure, box_size, translate=False):
"""
For Molecule objects: check the box size and if necessary translate
the molecule so that all the sites are contained within the bounding box.
Structure objects: compute the tilt. See
http://lammps.sandia.gov/doc/Section_howto.html#howto-12
Args:
structure(Structure/Molecule)
box_size (list): [[x_min, x_max], [y_min, y_max], [z_min, z_max]]
translate (bool): if true move the molecule to the center of the
new box.
Returns:
box_size, box_tilt
"""
box_tilt = None
if isinstance(structure, Molecule):
box_size = box_size or [[0, 10], [0, 10], [0, 10]]
box_lengths_req = [
np.max(structure.cart_coords[:, i]) - np.min(structure.cart_coords[:, i])
for i in range(3)]
box_lengths = [min_max[1] - min_max[0] for min_max in box_size]
try:
np.testing.assert_array_less(box_lengths_req, box_lengths)
except AssertionError:
box_size = [[0.0, np.ceil(i * 1.1)] for i in box_lengths_req]
print("Minimum required box lengths {} larger than the provided "
"box lengths{}. Resetting the box size to {}".format(
box_lengths_req, box_lengths, box_size))
translate = True
if translate:
com = structure.center_of_mass
new_com = [(side[1] + side[0]) / 2 for side in box_size]
translate_by = np.array(new_com) - np.array(com)
structure.translate_sites(range(len(structure)), translate_by)
# Structure
else:
a, b, c = structure.lattice.abc
m = structure.lattice.matrix.copy()
xhi = a
xy = np.dot(m[1], m[0] / xhi)
yhi = np.sqrt(b ** 2 - xy ** 2)
xz = np.dot(m[2], m[0] / xhi)
yz = (np.dot(m[1], m[2]) - xy * xz) / yhi
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)
box_size = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
return box_size, box_tilt
def write_file(self, filename, significant_figures=6):
"""
write lammps data input file from the string representation
of the data.
Args:
filename (string): data file name
significant_figures (int): No. of significant figures to
output. Default to 6.
"""
with open(filename, 'w') as f:
f.write(self.__str__(significant_figures=significant_figures))
@staticmethod
def get_basic_system_info(structure):
"""
Return basic system info from the given structure.
Args:
structure (Structure/Molecule)
Returns:
number of atoms, number of atom types, box size, mapping
between the atom id and corresponding atomic masses
"""
s = structure.copy()
if isinstance(s, Structure):
s.remove_oxidation_states()
natoms = len(s)
natom_types = len(s.symbol_set)
elements = s.composition.elements
elements = sorted(elements, key=lambda el: el.atomic_mass)
atomic_masses_dict = OrderedDict(
[(el.symbol, [i + 1, float(el.data["Atomic mass"])])
for i, el in enumerate(elements)])
return natoms, natom_types, atomic_masses_dict
@classmethod
def get_atoms_data(cls, structure, atomic_masses_dict, set_charge=True):
"""
return the atoms data:
Molecule:
atom_id, molecule tag, atom_type, charge(if present else 0),
x, y, z.
The molecule_tag is set to 1(i.e the whole structure corresponds to
just one molecule).
This corresponds to lammps command: "atom_style charge" or
"atom_style full"
Structure:
atom_id, atom_type, species oxidation state, x, y, z
atom_style = atomic/charge
Args:
structure (Structure/Molecule)
atomic_masses_dict (dict):
{ atom symbol : [atom_id, atomic mass], ... }
set_charge (bool): whether or not to set the charge field in Atoms
Returns:
For Molecule:
[[atom_id, molecule tag, atom_type, charge(if present),
x, y, z], ... ]
For Structure:
[[atom_id, atom_type, charge(if present), x, y, z], ... ]
"""
# for structure,create new lattice. Lammps data requires lattice vector a is in x-direction.
# The plane formed by lattice vector a,b should be perpendicular to z-direction.
if not isinstance(structure, Molecule):
box_size = None
box_size, box_tilt = cls.check_box_size(structure, box_size)
# xy, xz, yz = box_tilt if box_tilt is not None else [0.0, 0.0, 0.0]
matrix = np.array([[box_size[0][1], 0, 0], [box_tilt[0], box_size[1][1], 0],
[box_tilt[1], box_tilt[2], box_size[2][1]]])
new_lattice = Lattice(matrix)
# rotate the original structure according to the new lattice.
structure.modify_lattice(new_lattice)
atoms_data = []
# to comply with atom_style='molecular' and 'full'
mol_id = 1 if isinstance(structure, Molecule) else None
for i, site in enumerate(structure):
atom_type = atomic_masses_dict[site.specie.symbol][0]
line = [i + 1]
line += [mol_id] if mol_id else []
line.append(atom_type)
if set_charge:
if isinstance(site, PeriodicSite):
charge = getattr(site.specie, "oxi_state", 0.0)
else:
charge = getattr(site, "charge", 0.0)
line.append(charge)
line.extend([site.x, site.y, site.z])
atoms_data.append(line)
return atoms_data
@classmethod
def from_structure(cls, input_structure, box_size=None, set_charge=True,
translate=True):
"""
Set LammpsData from the given structure or molecule object. If the input
structure is a Molecule and if it doesnt fit in the input box then the
box size is updated based on the max and min site coordinates of the
molecules.
Args:
input_structure (Molecule/Structure)
box_size (list): [[x_min, x_max], [y_min, y_max], [z_min, z_max]]
set_charge (bool): whether or not to set the charge field in
Atoms. If true, the charge will be non-zero only if the
input_structure has the "charge" site property set.
translate (bool): if true move the molecule to the center of the
new box(it that is required).
Returns:
LammpsData
"""
box_size, box_tilt = cls.check_box_size(input_structure, box_size,
translate=translate)
natoms, natom_types, atomic_masses_dict = \
cls.get_basic_system_info(input_structure.copy())
atoms_data = cls.get_atoms_data(input_structure, atomic_masses_dict,
set_charge=set_charge)
atom_style = 'full' if isinstance(input_structure, Molecule) else 'charge'
return cls(box_size, atomic_masses_dict.values(), atoms_data,
box_tilt=box_tilt, atom_style=atom_style)
@classmethod
def from_file(cls, data_file, atom_style="full"):
"""
Return LammpsData object from the data file.
Note: use this to read in data files that conform with
atom_style = charge or atomic or full
Args:
data_file (string): data file name
atom_style (string): "full" or "charge" or "atomic"
Returns:
LammpsData
"""
atoms_data = []
data = parse_data_file(data_file)
atomic_masses = [[int(x[0]), float(x[1])] for x in data["masses"]]
box_size = [data['x'], data['y'], data['z']]
int_limit = 2 if atom_style == "atomic" else 3
if "atoms" in data:
for x in data["atoms"]:
atoms_data.append([int(xi) for xi in x[:int_limit]] + x[int_limit:])
box_tilt = data.get("xy-xz-yz", None)
return cls(box_size, atomic_masses, atoms_data, box_tilt=box_tilt, atom_style=atom_style)
def as_dict(self):
d = MSONable.as_dict(self)
if hasattr(self, "kwargs"):
d.update(**self.kwargs)
return d
@classmethod
def from_dict(cls, d):
decoded = {k: MontyDecoder().process_decoded(v) for k, v in d.items()
if not k.startswith("@")}
return cls(**decoded)
class LammpsForceFieldData(LammpsData):
"""
Sets Lammps data input file from force field parameters. It is recommended
that the the convenience method from_forcefield_and_topology be used to
create the object.
Args:
box_size (list): [[x_min,x_max], [y_min,y_max], [z_min,z_max]]
atomic_masses (list): [ [atom type, atomic mass], ... ]
pair_coeffs (list): pair coefficients,
[[unique id, sigma, epsilon ], ... ]
bond_coeffs (list): bond coefficients,
[[unique id, value1, value2 ], ... ]
angle_coeffs (list): angle coefficients,
[[unique id, value1, value2, value3 ], ... ]
dihedral_coeffs (list): dihedral coefficients,
[[unique id, value1, value2, value3, value4], ... ]
improper_coeffs (list): improper dihedral coefficients,
[[unique id, value1, value2, value3, value4], ... ]
atoms_data (list): [[atom id, mol id, atom type, charge, x,y,z, ...], ... ]
bonds_data (list): [[bond id, bond type, value1, value2], ... ]
angles_data (list): [[angle id, angle type, value1, value2, value3], ... ]
dihedrals_data (list):
[[dihedral id, dihedral type, value1, value2, value3, value4], ... ]
imdihedrals_data (list):
[[improper dihedral id, improper dihedral type, value1, value2,
value3, value4], ... ]
"""
TEMPLATE = """
Generated by pymatgen.io.lammps.data.LammpsForceFieldData
{natoms} atoms
{nbonds} bonds
{nangles} angles
{ndihedrals} dihedrals
{nimdihedrals} impropers
{natom_types} atom types
{nbond_types} bond types
{nangle_types} angle types
{ndihedral_types} dihedral types
{nimdihedral_types} improper types
{xlo:.6f} {xhi:.6f} xlo xhi
{ylo:.6f} {yhi:.6f} ylo yhi
{zlo:.6f} {zhi:.6f} zlo zhi
{tilt}
Masses
{masses}
Pair Coeffs
{pair_coeffs}
Bond Coeffs
{bond_coeffs}
Angle Coeffs
{angle_coeffs}
Dihedral Coeffs
{dihedral_coeffs}
Improper Coeffs
{improper_coeffs}
Atoms
{atoms}
Bonds
{bonds}
Angles
{angles}
Dihedrals
{dihedrals}
Impropers
{dihedrals}
"""
def __init__(self, box_size, atomic_masses, pair_coeffs, bond_coeffs,
angle_coeffs, dihedral_coeffs, improper_coeffs, atoms_data,
bonds_data, angles_data, dihedrals_data, imdihedrals_data):
super(LammpsForceFieldData, self).__init__(box_size, atomic_masses,
atoms_data)
# number of types
self.nbond_types = len(bond_coeffs)
self.nangle_types = len(angle_coeffs)
self.ndihedral_types = len(dihedral_coeffs)
self.nimdihedral_types = len(improper_coeffs)
# number of parameters
self.nbonds = len(bonds_data)
self.nangles = len(angles_data)
self.ndihedrals = len(dihedrals_data)
self.nimdihedrals = len(imdihedrals_data)
# coefficients
self.pair_coeffs = pair_coeffs
self.bond_coeffs = bond_coeffs
self.angle_coeffs = angle_coeffs
self.dihedral_coeffs = dihedral_coeffs
self.improper_coeffs = improper_coeffs
# data
self.bonds_data = bonds_data
self.angles_data = angles_data
self.dihedrals_data = dihedrals_data
self.imdihedrals_data = imdihedrals_data
def __str__(self, significant_figures=6):
"""
Args:
significant_figures (int): No. of significant figures to
output. Default to 6.
returns a string of lammps data input file
"""
float_fmt = '%.{}f'.format(significant_figures)
def list_str(l, accu_sw=False):
"""
Need to use accu_sw to control if to use float_format or not.
Since for atomic mass, Lammps cannot read in formatted value.
"""
if accu_sw:
return "\n".join([" ".join([str(float_fmt %x) for x in ad])
for ad in l])
else:
return "\n".join([" ".join([str(x) for x in ad])
for ad in l])
d = {k: v for k, v in self.__dict__.items()}
for k in ["pair", "bond", "angle", "dihedral", "improper"]:
d["%s_coeffs" % k] = list_str(d["%s_coeffs" % k])
output = LammpsForceFieldData.TEMPLATE.format(
xlo=self.box_size[0][0],
xhi=self.box_size[0][1],
ylo=self.box_size[1][0],
yhi=self.box_size[1][1],
zlo=self.box_size[2][0],
zhi=self.box_size[2][1],
tilt="{:.6f} {:.6f} {:.6f} xy xz yz".format(
self.box_tilt[0], self.box_tilt[1], self.box_tilt[2]) if self.box_tilt else "",
masses=list_str(self.atomic_masses),
atoms=list_str(self.atoms_data, accu_sw=True),
bonds=list_str(self.bonds_data, accu_sw=True),
angles=list_str(self.angles_data, accu_sw=True),
dihedrals=list_str(self.dihedrals_data,accu_sw=True),
impropers=list_str(self.imdihedrals_data,accu_sw=True),
**d
)
return output
@staticmethod
def get_basic_system_info(molecule):
natoms = len(molecule)
atom_types = set(molecule.site_properties.get("ff_map", molecule.symbol_set))
natom_types = len(atom_types)
elements = {}
for s in molecule:
label = str(s.ff_map) if hasattr(molecule[0], "ff_map") else s.specie.symbol
elements[label] = float(s.specie.atomic_mass)
elements_items = list(elements.items())
elements_items = sorted(elements_items, key=lambda el_item: el_item[1])
atomic_masses_dict = OrderedDict([(el_item[0], [i + 1, el_item[1]])
for i, el_item in enumerate(elements_items)])
return natoms, natom_types, atomic_masses_dict
@staticmethod
def get_param_coeff(forcefield, param_name, atom_types_map=None):
"""
get the parameter coefficients and mapping from the force field.
Args:
forcefield (ForceField): ForceField object
param_name (string): name of the parameter for which
the coefficients are to be set.
atom_types_map (dict): maps atom type to the atom type id.
Used to set hthe pair coeffs.
e.g. {"C2": [3], "H2": [1], "H1": [2]}
Returns:
[[parameter id, value1, value2, ... ], ... ] and
{parameter key: parameter id, ...}
"""
if hasattr(forcefield, param_name):
param = getattr(forcefield, param_name)
param_coeffs = []
param_map = {}
if param_name == "pairs":
for i, item in enumerate(param.items()):
key = item[0][0]
param_coeffs.append([atom_types_map[key][0]] + list(item[1]))
param_coeffs = sorted(param_coeffs, key=lambda ii: ii[0])
elif param:
for i, item in enumerate(param.items()):
param_coeffs.append([i + 1] + list(item[1]))
param_map[item[0]] = i + 1
return param_coeffs, param_map
else:
raise AttributeError
@staticmethod
def get_atoms_data(mols, mols_number, molecule, atomic_masses_dict,
topologies, atom_to_mol=None):
"""
Return the atoms data.
Args:
mols (list): list of Molecule objects.
mols_number (list): number of each type of molecule in mols list.
molecule (Molecule): the molecule assembled from the molecules
in the mols list.
topologies (list): list of Topology objects, one for each molecule
type in mols list
atom_to_mol (dict): maps atom_id --> [mol_type, mol_id,
local atom id in the mol with id mol_id]
Returns:
atoms_data: [[atom id, mol type, atom type, charge, x, y, z], ... ]
molid_to_atomid: [ [global atom id 1, id 2, ..], ...], the
index will be the global mol id
"""
atoms_data = []
molid_to_atomid = []
nmols = len(mols)
# set up map atom_to_mol:
# atom_id --> [mol_type, mol_id, local atom id in the mol with id mol id]
# set up map molid_to_atomid:
# gobal molecule id --> [[atom_id1, atom_id2,...], ...]
# This assumes that the atomic order in the assembled molecule can be
# obtained from the atomic order in the constituent molecules.
if not atom_to_mol:
atom_to_mol = {}
molid_to_atomid = []
shift_ = 0
mol_id = 0
for mol_type in range(nmols):
natoms = len(mols[mol_type])
for num_mol_id in range(mols_number[mol_type]):
tmp = []
for mol_atom_id in range(natoms):
atom_id = num_mol_id * natoms + mol_atom_id + shift_
atom_to_mol[atom_id] = [mol_type, mol_id, mol_atom_id]
tmp.append(atom_id)
mol_id += 1
molid_to_atomid.append(tmp)
shift_ += len(mols[mol_type]) * mols_number[mol_type]
# set atoms data from the molecule assembly consisting of
# molecules from mols list with their count from mol_number list.
# atom id, mol id, atom type, charge from topology, x, y, z
for i, site in enumerate(molecule):
label = str(site.ff_map) if hasattr(site, "ff_map") else site.specie.symbol
atom_type = atomic_masses_dict[label][0]
# atom_type = molecule.symbol_set.index(site.species_string) + 1
atom_id = i + 1
mol_type = atom_to_mol[i][0] + 1
mol_id = atom_to_mol[i][1] + 1
mol_atom_id = atom_to_mol[i][2] + 1
charge = 0.0
if hasattr(topologies[0], "charges"):
if topologies[mol_type - 1].charges:
charge = topologies[mol_type - 1].charges[mol_atom_id - 1]
atoms_data.append([atom_id, mol_id, atom_type, charge,
site.x, site.y, site.z])
return atoms_data, molid_to_atomid
@staticmethod
def get_param_data(param_name, param_map, mols, mols_number, topologies,
molid_to_atomid):
"""
set the data for the parameter named param_name from the topology.
Args:
param_name (string): parameter name, example: "bonds"
param_map (dict):
{ mol_type: {parameter_key : unique parameter id, ... }, ... }
example: {0: {("c1","c2"): 1}} ==> c1-c2 bond in mol_type=0
has the global id of 1
mols (list): list of molecules.
mols_number (list): number of each type of molecule in mols list.
topologies (list): list of Topology objects, one for each molecule
type in mols list
molid_to_atomid (list): [ [gloabal atom id 1, id 2, ..], ...],
the index is the global mol id
Returns:
[ [parameter id, parameter type, global atom id1, global atom id2, ...], ... ]
"""
param_data = []
if hasattr(topologies[0], param_name) and getattr(topologies[0], param_name):
nmols = len(mols)
mol_id = 0
skip = 0
shift_ = 0
# set the parameter data using the topology info
# example: loop over all bonds in the system
# mol_id --> global molecule id
# mol_type --> type of molecule
# mol_param_id --> local parameter id in that molecule
for mol_type in range(nmols):
param_obj = getattr(topologies[mol_type], param_name)
nparams = len(param_obj)
for num_mol_id in range(mols_number[mol_type]):
for mol_param_id in range(nparams):
param_id = num_mol_id * nparams + mol_param_id + shift_
# example: get the bonds list for mol_type molecule
param_obj = getattr(topologies[mol_type], param_name)
# connectivity info(local atom ids and type) for the
# parameter with the local id 'mol_param_id'.
# example: single bond = [i, j, bond_type]
param = param_obj[mol_param_id]
param_atomids = []
# loop over local atom ids that constitute the parameter
# for the molecule type, mol_type
# example: single bond = [i,j,bond_label]
for atomid in param[:-1]:
# local atom id to global atom id
global_atom_id = molid_to_atomid[mol_id][atomid]
param_atomids.append(global_atom_id + 1)
param_type = tuple(param[-1])
param_type_reversed = tuple(reversed(param_type))
# example: get the unique number id for the bond_type
if param_type in param_map:
key = param_type
elif param_type_reversed in param_map:
key = param_type_reversed
else:
key = None
if key:
param_type_id = param_map[key]
param_data.append(
[param_id + 1 - skip, param_type_id] + param_atomids)
else:
skip += 1
print("{} or {} Not available".format(param_type,
param_type_reversed))
mol_id += 1
shift_ += nparams * mols_number[mol_type]
return param_data
@staticmethod
def from_forcefield_and_topology(mols, mols_number, box_size, molecule,
forcefield, topologies):
"""
Return LammpsForceFieldData object from force field and topology info
for the 'molecule' assembled from the constituent molecules specified
in the 'mols' list with their count specified in the 'mols_number' list.
Args:
mols (list): List of Molecule objects
mols_number (list): List of number of molecules of each
molecule type in mols
box_size (list): [[x_min,x_max], [y_min,y_max], [z_min,z_max]]
molecule (Molecule): The molecule that is assembled from mols
and mols_number
forcefield (ForceFiled): Force filed information
topologies (list): List of Topology objects, one for each
molecule type in mols.
Returns:
LammpsForceFieldData
"""
natoms, natom_types, atomic_masses_dict = \
LammpsForceFieldData.get_basic_system_info(molecule.copy())
box_size, _ = LammpsForceFieldData.check_box_size(molecule, box_size)
# set the coefficients and map from the force field
# bonds
bond_coeffs, bond_map = \
LammpsForceFieldData.get_param_coeff(forcefield, "bonds")
# angles
angle_coeffs, angle_map = \
LammpsForceFieldData.get_param_coeff(forcefield, "angles")
# pair coefficients
pair_coeffs, _ = \
LammpsForceFieldData.get_param_coeff(forcefield, "pairs",
atomic_masses_dict)
# dihedrals
dihedral_coeffs, dihedral_map = \
LammpsForceFieldData.get_param_coeff(forcefield, "dihedrals")
# improper dihedrals
improper_coeffs, imdihedral_map = \
LammpsForceFieldData.get_param_coeff(forcefield, "imdihedrals")
# atoms data. topology used for setting charge if present
atoms_data, molid_to_atomid = LammpsForceFieldData.get_atoms_data(
mols, mols_number, molecule, atomic_masses_dict, topologies)
# set the other data from the molecular topologies
# bonds
bonds_data = LammpsForceFieldData.get_param_data(
"bonds", bond_map, mols, mols_number, topologies, molid_to_atomid)
# angles
angles_data = LammpsForceFieldData.get_param_data(
"angles", angle_map, mols, mols_number, topologies, molid_to_atomid)
# dihedrals
dihedrals_data = LammpsForceFieldData.get_param_data(
"dihedrals", dihedral_map, mols, mols_number, topologies,
molid_to_atomid)
# improper dihedrals
imdihedrals_data = LammpsForceFieldData.get_param_data(
"imdihedrals", imdihedral_map, mols, mols_number, topologies,
molid_to_atomid)
return LammpsForceFieldData(box_size, atomic_masses_dict.values(),
pair_coeffs, bond_coeffs,
angle_coeffs, dihedral_coeffs,
improper_coeffs, atoms_data,
bonds_data, angles_data, dihedrals_data,
imdihedrals_data)
@staticmethod
def _get_coeffs(data, name):
val = []
if name in data:
for x in data[name]:
val.append([int(x[0])] + x[1:])
return val
@staticmethod
def _get_non_atoms_data(data, name):
val = []
if name in data:
for x in data[name]:
val.append([int(xi) for xi in x])
return val
@classmethod
def from_file(cls, data_file):
"""
Return LammpsForceFieldData object from the data file. It is assumed
that the forcefield paramter sections for pairs, bonds, angles,
dihedrals and improper dihedrals are named as follows(not case sensitive):
"Pair Coeffs", "Bond Coeffs", "Angle Coeffs", "Dihedral Coeffs" and
"Improper Coeffs". For "Pair Coeffs", values for factorial(n_atom_types)
pairs must be specified.
Args:
data_file (string): the data file name
Returns:
LammpsForceFieldData
"""
atoms_data = []
data = parse_data_file(data_file)
atomic_masses = [[int(x[0]), float(x[1])] for x in data["masses"]]
box_size = [data['x'], data['y'], data['z']]
pair_coeffs = cls._get_coeffs(data, "pair-coeffs")
bond_coeffs = cls._get_coeffs(data, "bond-coeffs")
angle_coeffs = cls._get_coeffs(data, "angle-coeffs")
dihedral_coeffs = cls._get_coeffs(data, "dihedral-coeffs")
improper_coeffs = cls._get_coeffs(data, "improper-coeffs")
if "atoms" in data:
for x in data["atoms"]:
atoms_data.append([int(xi) for xi in x[:3]] + x[3:])
bonds_data = cls._get_non_atoms_data(data, "bonds")
angles_data = cls._get_non_atoms_data(data, "angles")
dihedral_data = cls._get_non_atoms_data(data, "dihedrals")
imdihedral_data = cls._get_non_atoms_data(data, "impropers")
return cls(box_size, atomic_masses, pair_coeffs,
bond_coeffs, angle_coeffs,
dihedral_coeffs, improper_coeffs,
atoms_data, bonds_data, angles_data,
dihedral_data, imdihedral_data)
def parse_data_file(filename):
"""
A very general parser for arbitrary lammps data files.
Args:
filename (str): path to the data file
Returns:
dict
"""
data = {}
count_pattern = re.compile(r'^\s*(\d+)\s+([a-zA-Z]+)$')
types_pattern = re.compile(r'^\s*(\d+)\s+([a-zA-Z]+)\s+types$')
box_pattern = re.compile(r'^\s*([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+([xyz])lo\s+([xyz])hi$')
tilt_pattern = re.compile(r'^\s*([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+([0-9eE.+-]+)\s+xy\s+xz\s+yz$')
key = None
with open(filename) as f:
for line in f:
line = line.split("#")[0].strip()
if line:
if line.lower() in SECTION_KEYWORDS:
key = line.lower()
key = key.replace(" ", "-")
data[key] = []
elif key and key in data:
data[key].append([float(x) for x in line.split()])
else:
if types_pattern.search(line):
m = types_pattern.search(line)
data["{}-types".format(m.group(2))] = int(m.group(1))
elif box_pattern.search(line):
m = box_pattern.search(line)
data[m.group(3)] = [float(m.group(1)), float(m.group(2))]
elif count_pattern.search(line):
tokens = line.split(" ", 1)
data["n{}".format(tokens[-1])] = int(tokens[0])
elif tilt_pattern.search(line):
m = tilt_pattern.search(line)
data["xy-xz-yz"] = [float(m.group(1)),
float(m.group(2)),
float(m.group(3))]
return data
| |
import datetime
import pytest
from pymysql.err import Warning
import aiomysql
@pytest.mark.run_loop
async def test_issue_3(connection):
""" undefined methods datetime_or_None, date_or_None """
conn = connection
c = await conn.cursor()
await c.execute("drop table if exists issue3")
await c.execute(
"create table issue3 (d date, t time, dt datetime, ts timestamp)")
try:
await c.execute(
"insert into issue3 (d, t, dt, ts) values (%s,%s,%s,%s)",
(None, None, None, None))
await c.execute("select d from issue3")
r = await c.fetchone()
assert r[0] is None
await c.execute("select t from issue3")
r = await c.fetchone()
assert r[0] is None
await c.execute("select dt from issue3")
r = await c.fetchone()
assert r[0] is None
await c.execute("select ts from issue3")
r = await c.fetchone()
assert type(r[0]) in (type(None), datetime.datetime)
finally:
await c.execute("drop table issue3")
@pytest.mark.run_loop
async def test_issue_4(connection):
""" can't retrieve TIMESTAMP fields """
conn = connection
c = await conn.cursor()
await c.execute("drop table if exists issue4")
await c.execute("create table issue4 (ts timestamp)")
try:
await c.execute("insert into issue4 (ts) values (now())")
await c.execute("select ts from issue4")
r = await c.fetchone()
assert isinstance(r[0], datetime.datetime)
finally:
await c.execute("drop table issue4")
@pytest.mark.run_loop
async def test_issue_5(connection):
""" query on information_schema.tables fails """
conn = connection
cur = await conn.cursor()
await cur.execute("select * from information_schema.tables")
@pytest.mark.run_loop
async def test_issue_6(connection_creator):
# test for exception: TypeError: ord() expected a character,
# but string of length 0 found
conn = await connection_creator(db='mysql')
c = await conn.cursor()
assert conn.db == 'mysql'
await c.execute("select * from user")
await conn.ensure_closed()
@pytest.mark.run_loop
async def test_issue_8(connection):
""" Primary Key and Index error when selecting data """
conn = connection
c = await conn.cursor()
await c.execute("drop table if exists test")
await c.execute("""CREATE TABLE `test` (
`station` int(10) NOT NULL DEFAULT '0',
`dh` datetime NOT NULL DEFAULT '2020-04-25 22:39:12',
`echeance` int(1) NOT NULL DEFAULT '0', `me` double DEFAULT NULL,
`mo` double DEFAULT NULL, PRIMARY
KEY (`station`,`dh`,`echeance`)) ENGINE=MyISAM DEFAULT
CHARSET=latin1;""")
try:
await c.execute("SELECT * FROM test")
assert 0 == c.rowcount
await c.execute(
"ALTER TABLE `test` ADD INDEX `idx_station` (`station`)")
await c.execute("SELECT * FROM test")
assert 0 == c.rowcount
finally:
await c.execute("drop table test")
@pytest.mark.run_loop
async def test_issue_13(connection):
""" can't handle large result fields """
conn = connection
cur = await conn.cursor()
await cur.execute("drop table if exists issue13")
try:
await cur.execute("create table issue13 (t text)")
# ticket says 18k
size = 18 * 1024
await cur.execute("insert into issue13 (t) values (%s)",
("x" * size,))
await cur.execute("select t from issue13")
# use assertTrue so that obscenely huge error messages don't print
r = await cur.fetchone()
assert "x" * size == r[0]
finally:
await cur.execute("drop table issue13")
@pytest.mark.run_loop
async def test_issue_15(connection):
""" query should be expanded before perform character encoding """
conn = connection
c = await conn.cursor()
await c.execute("drop table if exists issue15")
await c.execute("create table issue15 (t varchar(32))")
try:
await c.execute("insert into issue15 (t) values (%s)",
(u'\xe4\xf6\xfc',))
await c.execute("select t from issue15")
r = await c.fetchone()
assert u'\xe4\xf6\xfc' == r[0]
finally:
await c.execute("drop table issue15")
@pytest.mark.run_loop
async def test_issue_16(connection):
""" Patch for string and tuple escaping """
conn = connection
c = await conn.cursor()
await c.execute("drop table if exists issue16")
await c.execute("create table issue16 (name varchar(32) "
"primary key, email varchar(32))")
try:
await c.execute("insert into issue16 (name, email) values "
"('pete', 'floydophone')")
await c.execute("select email from issue16 where name=%s",
("pete",))
r = await c.fetchone()
assert "floydophone" == r[0]
finally:
await c.execute("drop table issue16")
@pytest.mark.skip(
"test_issue_17() requires a custom, legacy MySQL configuration and "
"will not be run.")
@pytest.mark.run_loop
async def test_issue_17(connection, connection_creator, mysql_params):
""" could not connect mysql use passwod """
conn = connection
c = await conn.cursor()
db = mysql_params['db']
# grant access to a table to a user with a password
try:
await c.execute("drop table if exists issue17")
await c.execute(
"create table issue17 (x varchar(32) primary key)")
await c.execute(
"insert into issue17 (x) values ('hello, world!')")
await c.execute("grant all privileges on %s.issue17 to "
"'issue17user'@'%%' identified by '1234'"
% db)
await conn.commit()
conn2 = await connection_creator(user="issue17user",
passwd="1234")
c2 = await conn2.cursor()
await c2.execute("select x from issue17")
r = await c2.fetchone()
assert "hello == world!", r[0]
finally:
await c.execute("drop table issue17")
@pytest.mark.run_loop
async def test_issue_34(connection_creator):
try:
await connection_creator(host="localhost", port=1237,
user="root", unix_socket=None)
pytest.fail()
except aiomysql.OperationalError as e:
assert 2003 == e.args[0]
except Exception:
pytest.fail()
@pytest.mark.run_loop
async def test_issue_33(connection_creator):
conn = await connection_creator(charset='utf8')
c = await conn.cursor()
try:
await c.execute(
b"drop table if exists hei\xc3\x9fe".decode("utf8"))
await c.execute(
b"create table hei\xc3\x9fe (name varchar(32))".decode("utf8"))
await c.execute(b"insert into hei\xc3\x9fe (name) "
b"values ('Pi\xc3\xb1ata')".
decode("utf8"))
await c.execute(
b"select name from hei\xc3\x9fe".decode("utf8"))
r = await c.fetchone()
assert b"Pi\xc3\xb1ata".decode("utf8") == r[0]
finally:
await c.execute(b"drop table hei\xc3\x9fe".decode("utf8"))
@pytest.mark.skip("This test requires manual intervention")
@pytest.mark.run_loop
async def test_issue_35(connection):
conn = connection
c = await conn.cursor()
print("sudo killall -9 mysqld within the next 10 seconds")
try:
await c.execute("select sleep(10)")
pytest.fail()
except aiomysql.OperationalError as e:
assert 2013 == e.args[0]
@pytest.mark.run_loop
async def test_issue_36(connection_creator):
conn = await connection_creator()
c = await conn.cursor()
# kill connections[0]
await c.execute("show processlist")
kill_id = None
rows = await c.fetchall()
for row in rows:
id = row[0]
info = row[7]
if info == "show processlist":
kill_id = id
break
try:
# now nuke the connection
await conn.kill(kill_id)
# make sure this connection has broken
await c.execute("show tables")
pytest.fail()
except Exception:
pass
# check the process list from the other connection
conn2 = await connection_creator()
c = await conn2.cursor()
await c.execute("show processlist")
rows = await c.fetchall()
ids = [row[0] for row in rows]
assert kill_id not in ids
@pytest.mark.run_loop
async def test_issue_37(connection):
conn = connection
c = await conn.cursor()
assert 1 == (await c.execute("SELECT @foo"))
r = await c.fetchone()
assert (None,) == r
assert 0 == (await c.execute("SET @foo = 'bar'"))
await c.execute("set @foo = 'bar'")
@pytest.mark.run_loop
async def test_issue_38(connection):
conn = connection
c = await conn.cursor()
# reduced size for most default mysql installs
datum = "a" * 1024 * 1023
try:
await c.execute("drop table if exists issue38")
await c.execute(
"create table issue38 (id integer, data mediumblob)")
await c.execute("insert into issue38 values (1, %s)",
(datum,))
finally:
await c.execute("drop table issue38")
@pytest.mark.run_loop
async def disabled_test_issue_54(connection):
conn = connection
c = await conn.cursor()
await c.execute("drop table if exists issue54")
big_sql = "select * from issue54 where "
big_sql += " and ".join("%d=%d" % (i, i) for i in range(0, 100000))
try:
await c.execute(
"create table issue54 (id integer primary key)")
await c.execute("insert into issue54 (id) values (7)")
await c.execute(big_sql)
r = await c.fetchone()
assert 7 == r[0]
finally:
await c.execute("drop table issue54")
@pytest.mark.run_loop
async def test_issue_66(connection):
""" 'Connection' object has no attribute 'insert_id' """
conn = connection
c = await conn.cursor()
assert 0 == conn.insert_id()
try:
await c.execute("drop table if exists issue66")
await c.execute("create table issue66 (id integer primary "
"key auto_increment, x integer)")
await c.execute("insert into issue66 (x) values (1)")
await c.execute("insert into issue66 (x) values (1)")
assert 2 == conn.insert_id()
finally:
await c.execute("drop table issue66")
@pytest.mark.run_loop
async def test_issue_79(connection):
""" Duplicate field overwrites the previous one in the result
of DictCursor """
conn = connection
c = await conn.cursor(aiomysql.cursors.DictCursor)
await c.execute("drop table if exists a")
await c.execute("drop table if exists b")
await c.execute("""CREATE TABLE a (id int, value int)""")
await c.execute("""CREATE TABLE b (id int, value int)""")
a = (1, 11)
b = (1, 22)
try:
await c.execute("insert into a values (%s, %s)", a)
await c.execute("insert into b values (%s, %s)", b)
await c.execute("SELECT * FROM a inner join b on a.id = b.id")
r, *_ = await c.fetchall()
assert r['id'] == 1
assert r['value'] == 11
assert r['b.value'] == 22
finally:
await c.execute("drop table a")
await c.execute("drop table b")
@pytest.mark.run_loop
async def test_issue_95(connection):
""" Leftover trailing OK packet for "CALL my_sp" queries """
conn = connection
cur = await conn.cursor()
await cur.execute("DROP PROCEDURE IF EXISTS `foo`")
await cur.execute("""CREATE PROCEDURE `foo` ()
BEGIN
SELECT 1;
END""")
try:
await cur.execute("""CALL foo()""")
await cur.execute("""SELECT 1""")
r = await cur.fetchone()
assert r[0] == 1
finally:
await cur.execute("DROP PROCEDURE IF EXISTS `foo`")
@pytest.mark.run_loop
async def test_issue_114(connection_creator):
""" autocommit is not set after reconnecting with ping() """
conn = await connection_creator(charset="utf8")
await conn.autocommit(False)
c = await conn.cursor()
await c.execute("""select @@autocommit;""")
r = await c.fetchone()
assert not r[0]
await conn.ensure_closed()
await conn.ping()
await c.execute("""select @@autocommit;""")
r = await c.fetchone()
assert not r[0]
await conn.ensure_closed()
# Ensure autocommit() is still working
conn = await connection_creator(charset="utf8")
c = await conn.cursor()
await c.execute("""select @@autocommit;""")
r = await c.fetchone()
assert not r[0]
await conn.ensure_closed()
await conn.ping()
await conn.autocommit(True)
await c.execute("""select @@autocommit;""")
r = await c.fetchone()
assert r[0]
await conn.ensure_closed()
@pytest.mark.run_loop
async def test_issue_175(connection):
""" The number of fields returned by server is read in wrong way """
conn = connection
cur = await conn.cursor()
for length in (200, 300):
cols = ', '.join('c{0} integer'.format(i) for i in range(length))
sql = 'create table test_field_count ({0})'.format(cols)
try:
await cur.execute(sql)
await cur.execute('select * from test_field_count')
assert len(cur.description) == length
finally:
await cur.execute('drop table if exists test_field_count')
# MySQL will get you to renegotiate if sent a cleartext password
@pytest.mark.run_loop
async def test_issue_323(mysql_server, loop, recwarn):
async with aiomysql.create_pool(**mysql_server['conn_params'],
loop=loop) as pool:
async with pool.get() as conn:
async with conn.cursor() as cur:
drop_db = "DROP DATABASE IF EXISTS bugtest;"
await cur.execute(drop_db)
create_db = "CREATE DATABASE bugtest;"
await cur.execute(create_db)
create_table = """CREATE TABLE IF NOT EXISTS `bugtest`.`testtable` (
`id` INT UNSIGNED NOT NULL AUTO_INCREMENT,
`bindata` VARBINARY(200) NOT NULL,
PRIMARY KEY (`id`)
);"""
await cur.execute(create_table)
try:
recwarn.clear()
async with conn.cursor() as cur:
await cur.execute("INSERT INTO `bugtest`.`testtable` "
"(bindata) VALUES (%s);",
(b'\xB0\x17',))
warnings = [warn for warn in recwarn.list
if warn.category is Warning]
assert len(warnings) == 0, \
"Got unexpected MySQL warning {}".\
format(' '.join(str(x) for x in warnings))
await cur.execute("SELECT * FROM `bugtest`.`testtable`;")
rows = await cur.fetchall()
assert len(rows) == 1, "Table should have 1 row"
finally:
async with conn.cursor() as cur:
await cur.execute("DELETE FROM `bugtest`.`testtable`;")
| |
import sqlalchemy as sa
from sqlalchemy import schema, types, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_schemadisplay import create_schema_graph
from pyswip import Prolog
import argparse
from frozendict import frozendict
NONEQUIV_CONTROL_GROUP_DESC = """Nonequivalent Control Group Design
---------------------------------
This design exploits pre-test and a post-test, but
it cannot be assumed that the group receiving
the treatment and the control group
were equivalent before treatment was applied,
so any differences in the post-test may actually be
a result of this inequivalence. Validity
can be strengthened by finding a subset of the
data set for which treatment is quasi-random.
"""
COUNTERBALANCED_DESC = """Counterbalanced Designs
---------------------------------
These designs assume that treatment has been
assigned on a rotating basis, and each unit
has experienced each treatment.
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--rules", dest="rule_path", default=None)
parser.add_argument("--db-source", dest="db_path", default="postgresql://dgarant@localhost:5432/movielens")
args = parser.parse_args()
if args.rule_path:
with open(args.rule_path, 'r') as rule_handle:
rules = [r.strip() for r in rule_handle.readlines()]
else:
rules = build_schema_rules(args.db_path)
rules.extend(register_qeds())
prolog = Prolog()
for rule in rules:
print(rule)
prolog.assertz(rule)
report_on_qeds(prolog, "movie_gross")
def report_on_qeds(prolog, outcome):
""" Prints out suitable QEDs for modeling a particular outcome """
nonequiv_control = list(get_unique_results(prolog, "nonequivControlGroup({0}, T)".format(outcome)))
if nonequiv_control:
print(NONEQUIV_CONTROL_GROUP_DESC)
print("Candidate treatments for outcome {0}:".format(outcome))
for elt in nonequiv_control:
print("\t{0}".format(elt["T"]))
print("\n")
counterbalanced = list(get_unique_results(prolog, "counterbalancedDesign({0}, T)".format(outcome)))
if counterbalanced:
print(COUNTERBALANCED_DESC)
print("Candidate treatments for outcome {0}:".format(outcome))
for elt in counterbalanced:
print("\t{0}".format(elt["T"]))
def get_unique_results(prolog, query_string):
""" Creates a generator of unique query results """
seen = set()
for elt in prolog.query(query_string, catcherrors=False):
frozenelt = frozendict(elt)
if frozenelt in seen:
continue
else:
seen.add(frozenelt)
yield elt
def build_schema_rules(db_path):
""" Connects to a database, analyzes its schema,
and constructs facts about that schema
"""
# connect to the database and reflect metadata
engine = create_engine(db_path)
metadata = schema.MetaData()
metadata.reflect(bind=engine)
Session = sessionmaker(bind=engine)
session = Session()
# build up a knowledge base from the metadata
kb = []
for t in metadata.sorted_tables:
for fact in convert_table(session, t):
register_fact(kb, fact)
for col in t.columns:
for fact in convert_attribute(session, t, col):
register_fact(kb, fact)
if col.primary_key:
register_fact(kb, convert_pk(t, col))
for fk in col.foreign_keys:
for fact in convert_fk(session, t, fk):
register_fact(kb, fact)
return kb
def register_qeds():
""" Builds a report of applicable QEDs based on the
knowledge base stored in the Prolog engine
"""
kb = []
register_rule(kb, "tablesDirectlyRelated(X, Y) :- related(Y, X, R)")
register_rule(kb, "tablesDirectlyRelated(X, Y) :- related(X, Y, R)")
register_rule(kb, "tablesRelatedByPath(X, Y, P) :- tablesDirectlyRelated(X, Y)")
register_rule(kb, "tablesRelatedByPath(X, Y, P) :- tablesDirectlyRelated(Z, X), \+ member(Z, P), tablesRelatedByPath(Z, Y, [X|P])")
register_rule(kb, "tablesRelatedByPath(X, Y) :- tablesRelatedByPath(X, Y, [])")
register_rule(kb, "attributesRelatedByPath(X, Y) :- attribute(X, T1), attribute(Y, T1)")
register_rule(kb, "attributesRelatedByPath(X, Y) :- attribute(X, T1), attribute(Y, T2), tablesRelatedByPath(T1, T2)")
register_rule(kb, "isNumeric(X) :- dataType(X, INTEGER)")
register_rule(kb, "isNumeric(X) :- dataType(X, BIGINT)")
register_rule(kb, "isNumeric(X) :- dataType(X, NUMERIC)")
register_rule(kb, "variesWithTime(T, O) :- attribute(O, OTable), attributesRelatedByPath(T, O), attribute(E, OTable), " +
"dataType(E, time), attribute(E2, TTable), attribute(T, TTable), dataType(E2, time)")
register_rule(kb, "suitableAsTreatment(T, O) :- attribute(O, T1), recordCount(T1, OutRecords), " +
"isNumeric(O), levels(T, TreatLevels), TreatLevels < 30, OutRecords / TreatLevels > 20, T \= O")
register_rule(kb, "nonequivControlGroup(Out, Treat) :- suitableAsTreatment(Treat, Out), variesWithTime(Treat, Out)")
register_rule(kb, "counterbalancedDesign(Out, Treat) :- suitableAsTreatment(Treat, Out), variesWithTime(Treat, Out), levels(T, TreatLevels), TreatLevels > 3")
register_rule(kb, "qed(Out, Treat) :- nonequivControlGroup(Out, Treat)")
return kb
def register_rule(kb, rule):
kb.append(rule)
def register_fact(kb, fact):
kb.append(fact)
def convert_table(session, table):
num_records = session.execute(table.count()).first()[0]
return ["table({0})".format(to_identifier(str(table))),
"recordCount({0}, {1})".format(
to_identifier(str(table)), num_records)]
def convert_type(typename):
if typename in ("INTEGER", "BIGINT", "NUMERIC"):
return "numeric"
elif typename.startswith("VARCHAR"):
return "string"
elif typename.startswith("TIMESTAMP"):
return "time"
else:
raise ValueError("Unknown data type: {0}".format(typename))
def convert_attribute(session, table, attr):
attr_label = to_identifier(str(attr))
facts = ["attribute({0}, {1})".format(attr_label, table),
"dataType({0}, {1})".format(attr_label,
convert_type(str(attr.type)))]
# number of distinct values gives the number of
# levels if this were to be a treatment
num_distinct = session.query(sa.func.count(sa.distinct(attr))).first()[0]
facts.append("levels({0}, {1})".format(attr_label, num_distinct))
return facts
def convert_pk(table, key):
return "primaryKey({0}, {1})".format(
to_identifier(str(key)), table.name)
def convert_fk(session, table, key):
oneColumn = key.column
manyColumn = key.parent
rname = key.name
rules = ["related({0}, {1}, {2})".format(oneColumn.table, manyColumn.table, rname),
"cardinality(OneCard, ManyCard, {0})".format(rname)]
rules.append("key({0}, {1})".format(to_identifier(str(oneColumn)), rname))
rules.append("key({0}, {1})".format(to_identifier(str(manyColumn)), rname))
# select avg count of many-side elements,
# grouping by primary key of 1-side
num_distinct_ref = session.query(oneColumn.table).join(manyColumn.table).group_by(oneColumn).count()
total_rows = session.execute(manyColumn.table.count()).first()[0]
rules.append("averageManySize({0}, {1})".format(rname, total_rows / float(num_distinct_ref)))
return rules
def to_identifier(name):
return name.replace(".", "_")
def create_schema_image(metadata):
graph = create_schema_graph(metadata=metadata,
show_datatypes=True, show_indexes=False, rankdir='LR')
import tempfile, Image
with tempfile.NamedTemporaryFile(suffix=".png") as fout:
graph.write(fout.name, format="png")
Image.open(fout.name).show()
if __name__ == "__main__":
main()
"""
"""
| |
from __future__ import unicode_literals
import difflib
import errno
import json
import os
import posixpath
import re
import socket
import sys
import threading
import unittest
import warnings
from collections import Counter
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.core.urlresolvers import clear_url_caches, set_urlconf
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* If the class has a 'urls' attribute, replace ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._urlconf_setup()
mail.outbox = []
def _urlconf_setup(self):
if hasattr(self, 'urls'):
warnings.warn(
"SimpleTestCase.urls is deprecated and will be removed in "
"Django 2.0. Use @override_settings(ROOT_URLCONF=...) "
"in %s instead." % self.__class__.__name__,
RemovedInDjango20Warning, stacklevel=2)
set_urlconf(None)
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._urlconf_teardown()
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
set_urlconf(None)
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango21Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
if fetch_redirect_response:
redirect_response = response.client.get(path, QueryDict(query),
secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango21Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
"Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of %s in response"
" (expected %d)" % (real_count, text_repr, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0,
msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err,
repr(field_errors)))
elif field in context[formset].forms[form_index].fields:
self.fail(msg_prefix + "The field '%s' "
"on formset '%s', form %d in "
"context %d contains no errors" %
(field, formset, form_index, i))
else:
self.fail(msg_prefix + "The formset '%s', form %d in "
"context %d does not contain the field '%s'" %
(formset, form_index, i, field))
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in "
"context %d does not contain any non-field "
"errors." % (formset, form_index, i))
self.assertTrue(err in non_field_errors,
msg_prefix + "The formset '%s', form %d "
"in context %d does not contain the "
"non-field error '%s' "
"(actual errors: %s)" %
(formset, form_index, i, err,
repr(non_field_errors)))
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in "
"context %d does not contain any "
"non-form errors." % (formset, i))
self.assertTrue(err in non_form_errors,
msg_prefix + "The formset '%s' in context "
"%d does not contain the "
"non-form error '%s' (actual errors: %s)" %
(formset, i, err, repr(non_form_errors)))
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render "
"the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not
None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, ', '.join(template_names)))
if count is not None:
self.assertEqual(template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)." %
(template_name, count, template_names.count(template_name)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that the message in a raised exception matches the passed
value.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
args = (callable_obj,) + args
return six.assertRaisesRegex(self, expected_exception,
re.escape(expected_message), *args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs),
fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None,
'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None,
'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, needle, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=self.available_apps is not None)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situation, TestCase should be prefered to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
cls.setUpTestData()
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
self._rollback_atomics(self.atomics)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, obj, objtype):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, static_handler, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = WSGIServer(
(self.host, port), QuietWSGIRequestHandler)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
static_handler = _StaticFilesHandler
@property
def live_server_url(self):
return 'http://%s:%s' % (
self.server_thread.host, self.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = LiveServerThread(host, possible_ports,
cls.static_handler,
connections_override=connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
cls.server_thread.join()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
| |
from test.support import verbose, run_unittest, import_module, reap_children
#Skip these tests if either fcntl or termios is not available
fcntl = import_module('fcntl')
import_module('termios')
import errno
import pty
import os
import sys
import select
import signal
import socket
import unittest
TEST_STRING_1 = b"I wish to buy a fish license.\n"
TEST_STRING_2 = b"For my pet fish, Eric.\n"
if verbose:
def debug(msg):
print(msg)
else:
def debug(msg):
pass
def normalize_output(data):
# Some operating systems do conversions on newline. We could possibly
# fix that by doing the appropriate termios.tcsetattr()s. I couldn't
# figure out the right combo on Tru64 and I don't have an IRIX box.
# So just normalize the output and doc the problem O/Ses by allowing
# certain combinations for some platforms, but avoid allowing other
# differences (like extra whitespace, trailing garbage, etc.)
# This is about the best we can do without getting some feedback
# from someone more knowledgable.
# OSF/1 (Tru64) apparently turns \n into \r\r\n.
if data.endswith(b'\r\r\n'):
return data.replace(b'\r\r\n', b'\n')
# IRIX apparently turns \n into \r\n.
if data.endswith(b'\r\n'):
return data.replace(b'\r\n', b'\n')
return data
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
# XXX(nnorwitz): these tests leak fds when there is an error.
class PtyTest(unittest.TestCase):
def setUp(self):
# isatty() and close() can hang on some platforms. Set an alarm
# before running the test to make sure we don't hang forever.
self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
signal.alarm(10)
def tearDown(self):
# remove alarm, restore old alarm handler
signal.alarm(0)
signal.signal(signal.SIGALRM, self.old_alarm)
def handle_sig(self, sig, frame):
self.fail("isatty hung")
def test_basic(self):
try:
debug("Calling master_open()")
master_fd, slave_name = pty.master_open()
debug("Got master_fd '%d', slave_name '%s'" %
(master_fd, slave_name))
debug("Calling slave_open(%r)" % (slave_name,))
slave_fd = pty.slave_open(slave_name)
debug("Got slave_fd '%d'" % slave_fd)
except OSError:
# " An optional feature could not be imported " ... ?
raise unittest.SkipTest("Pseudo-terminals (seemingly) not functional.")
self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty')
# Solaris requires reading the fd before anything is returned.
# My guess is that since we open and close the slave fd
# in master_open(), we need to read the EOF.
# Ensure the fd is non-blocking in case there's nothing to read.
orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL)
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK)
try:
s1 = os.read(master_fd, 1024)
self.assertEqual(b'', s1)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
# Restore the original flags.
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags)
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
s1 = os.read(master_fd, 1024)
self.assertEqual(b'I wish to buy a fish license.\n',
normalize_output(s1))
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
s2 = os.read(master_fd, 1024)
self.assertEqual(b'For my pet fish, Eric.\n', normalize_output(s2))
os.close(slave_fd)
os.close(master_fd)
def test_fork(self):
debug("calling pty.fork()")
pid, master_fd = pty.fork()
if pid == pty.CHILD:
# stdout should be connected to a tty.
if not os.isatty(1):
debug("Child's fd 1 is not a tty?!")
os._exit(3)
# After pty.fork(), the child should already be a session leader.
# (on those systems that have that concept.)
debug("In child, calling os.setsid()")
try:
os.setsid()
except OSError:
# Good, we already were session leader
debug("Good: OSError was raised.")
pass
except AttributeError:
# Have pty, but not setsid()?
debug("No setsid() available?")
pass
except:
# We don't want this error to propagate, escaping the call to
# os._exit() and causing very peculiar behavior in the calling
# regrtest.py !
# Note: could add traceback printing here.
debug("An unexpected error was raised.")
os._exit(1)
else:
debug("os.setsid() succeeded! (bad!)")
os._exit(2)
os._exit(4)
else:
debug("Waiting for child (%d) to finish." % pid)
# In verbose mode, we have to consume the debug output from the
# child or the child will block, causing this test to hang in the
# parent's waitpid() call. The child blocks after a
# platform-dependent amount of data is written to its fd. On
# Linux 2.6, it's 4000 bytes and the child won't block, but on OS
# X even the small writes in the child above will block it. Also
# on Linux, the read() will throw an OSError (input/output error)
# when it tries to read past the end of the buffer but the child's
# already exited, so catch and discard those exceptions. It's not
# worth checking for EIO.
while True:
try:
data = os.read(master_fd, 80)
except OSError:
break
if not data:
break
sys.stdout.write(str(data.replace(b'\r\n', b'\n'),
encoding='ascii'))
##line = os.read(master_fd, 80)
##lines = line.replace('\r\n', '\n').split('\n')
##if False and lines != ['In child, calling os.setsid()',
## 'Good: OSError was raised.', '']:
## raise TestFailed("Unexpected output from child: %r" % line)
(pid, status) = os.waitpid(pid, 0)
res = status >> 8
debug("Child (%d) exited with status %d (%d)." % (pid, res, status))
if res == 1:
self.fail("Child raised an unexpected exception in os.setsid()")
elif res == 2:
self.fail("pty.fork() failed to make child a session leader.")
elif res == 3:
self.fail("Child spawned by pty.fork() did not have a tty as stdout")
elif res != 4:
self.fail("pty.fork() failed for unknown reasons.")
##debug("Reading from master_fd now that the child has exited")
##try:
## s1 = os.read(master_fd, 1024)
##except os.error:
## pass
##else:
## raise TestFailed("Read from master_fd did not raise exception")
os.close(master_fd)
# pty.fork() passed.
class SmallPtyTests(unittest.TestCase):
"""These tests don't spawn children or hang."""
def setUp(self):
self.orig_stdin_fileno = pty.STDIN_FILENO
self.orig_stdout_fileno = pty.STDOUT_FILENO
self.orig_pty_select = pty.select
self.fds = [] # A list of file descriptors to close.
self.select_rfds_lengths = []
self.select_rfds_results = []
def tearDown(self):
pty.STDIN_FILENO = self.orig_stdin_fileno
pty.STDOUT_FILENO = self.orig_stdout_fileno
pty.select = self.orig_pty_select
for fd in self.fds:
try:
os.close(fd)
except:
pass
def _pipe(self):
pipe_fds = os.pipe()
self.fds.extend(pipe_fds)
return pipe_fds
def _mock_select(self, rfds, wfds, xfds):
# This will raise IndexError when no more expected calls exist.
self.assertEqual(self.select_rfds_lengths.pop(0), len(rfds))
return self.select_rfds_results.pop(0), [], []
def test__copy_to_each(self):
"""Test the normal data case on both master_fd and stdin."""
read_from_stdout_fd, mock_stdout_fd = self._pipe()
pty.STDOUT_FILENO = mock_stdout_fd
mock_stdin_fd, write_to_stdin_fd = self._pipe()
pty.STDIN_FILENO = mock_stdin_fd
socketpair = socket.socketpair()
masters = [s.fileno() for s in socketpair]
self.fds.extend(masters)
# Feed data. Smaller than PIPEBUF. These writes will not block.
os.write(masters[1], b'from master')
os.write(write_to_stdin_fd, b'from stdin')
# Expect two select calls, the last one will cause IndexError
pty.select = self._mock_select
self.select_rfds_lengths.append(2)
self.select_rfds_results.append([mock_stdin_fd, masters[0]])
self.select_rfds_lengths.append(2)
with self.assertRaises(IndexError):
pty._copy(masters[0])
# Test that the right data went to the right places.
rfds = select.select([read_from_stdout_fd, masters[1]], [], [], 0)[0]
self.assertEqual([read_from_stdout_fd, masters[1]], rfds)
self.assertEqual(os.read(read_from_stdout_fd, 20), b'from master')
self.assertEqual(os.read(masters[1], 20), b'from stdin')
def test__copy_eof_on_all(self):
"""Test the empty read EOF case on both master_fd and stdin."""
read_from_stdout_fd, mock_stdout_fd = self._pipe()
pty.STDOUT_FILENO = mock_stdout_fd
mock_stdin_fd, write_to_stdin_fd = self._pipe()
pty.STDIN_FILENO = mock_stdin_fd
socketpair = socket.socketpair()
masters = [s.fileno() for s in socketpair]
self.fds.extend(masters)
os.close(masters[1])
socketpair[1].close()
os.close(write_to_stdin_fd)
# Expect two select calls, the last one will cause IndexError
pty.select = self._mock_select
self.select_rfds_lengths.append(2)
self.select_rfds_results.append([mock_stdin_fd, masters[0]])
# We expect that both fds were removed from the fds list as they
# both encountered an EOF before the second select call.
self.select_rfds_lengths.append(0)
with self.assertRaises(IndexError):
pty._copy(masters[0])
def test_main(verbose=None):
try:
run_unittest(SmallPtyTests, PtyTest)
finally:
reap_children()
if __name__ == "__main__":
test_main()
| |
from __future__ import division, print_function
import numpy as np
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
"""Test Precision Recall and F1 Score for binary classification task"""
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1)
ps = precision_score(y_true, y_pred)
assert_array_almost_equal(ps, 0.85, 2)
rs = recall_score(y_true, y_pred)
assert_array_almost_equal(rs, 0.68, 2)
fs = f1_score(y_true, y_pred)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
"""Test precision, recall and F1 score behave with a single positive or
negative class
Such a case may occur with non-stratified cross-validation"""
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
def test_average_precision_score_score_non_binary_class():
"""Test that average_precision_score function returns an error when trying
to compute average_precision_score for multiclass task.
"""
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
"""Test confusion matrix - binary classification case"""
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
"""Test Precision Recall and F1 Score for multiclass classification task"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_recall_f1_score_multiclass_pos_label_none():
"""Test Precision Recall and F1 Score for multiclass classification task
GH Issue #1296
"""
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
"""Check that pathological cases do not bring NaNs"""
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
"""Test confusion matrix - multi-class case"""
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
"""Test confusion matrix - multi-class case with subset of labels"""
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
"""Test performance report"""
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
"""Test performance report with added digits in floating point values"""
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
""" Test precision_recall_f1_score on a crafted multilabel example
"""
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
""" Test precision_recall_f1_score on a crafted multilabel example 2
"""
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
"""Check that _check_targets correctly merges target types, squeezes
output and fails if input lengths differ."""
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# python-ev3dev documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 31 20:38:27 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import subprocess
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from git_version import git_version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'sphinx_bootstrap_theme', 'recommonmark', 'evdev'])
import sphinx_bootstrap_theme
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'python-ev3dev'
copyright = '2015, Ralph Hempel et al'
author = 'Ralph Hempel et al'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = git_version()
# The full version, including alpha/beta/rc tags.
release = git_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme = 'bootstrap'
html_theme_options = {
'bootswatch_theme': 'yeti',
'navbar_links' : [
("GitHub", "https://github.com/ev3dev/ev3dev-lang-python", True)
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-ev3devdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'python-ev3dev.tex', 'python-ev3dev Documentation',
'Ralph Hempel et al', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'python-ev3dev', 'python-ev3dev Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-ev3dev', 'python-ev3dev Documentation',
author, 'python-ev3dev', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = 'bysource'
nitpick_ignore = [
('py:class', 'ev3dev2.display.FbMem'),
('py:class', 'ev3dev2.button.ButtonBase'),
('py:class', 'int'),
('py:class', 'float'),
('py:class', 'string'),
('py:class', 'iterable'),
('py:class', 'tuple'),
('py:class', 'list'),
('py:exc', 'ValueError')
]
def setup(app):
app.add_config_value('recommonmark_config', {
'enable_eval_rst': True,
}, True)
app.add_transform(AutoStructify)
| |
#!/usr/bin/python
# A Basic SimpleCV interactive shell tutorial
#load required libraries
from SimpleCV import *
from subprocess import call
from code import InteractiveInterpreter
import platform
lb = "\n" #linebreak
tb = "\t" #tab
tutorial_interpreter = InteractiveInterpreter(globals())
logo = None
img = None
clone = None
thumb = None
eroded = None
cropped = None
#Command to clear the shell screen
def shellclear():
if platform.system() == "Windows":
return
call("clear")
def attempt(variable_name, desired_class):
prompt_and_run()
variable = globals().get(variable_name)
if isinstance(variable,desired_class):
if desired_class == Image:
if variable.isEmpty():
print lb
print "Although you can create empty Images on SimpleCV, let's not"
print "play with that now!"
print lb
return False
return True
return False
def prompt_and_run():
command = raw_input("SimpleCV:> ")
tutorial_interpreter.runsource(command)
return command
def request_show_command():
while True:
if prompt_and_run().endswith('.show()'):
return
def end_tutorial():
print lb
print "Type 'quit' to leave the tutorials, or press Enter to move on!"
command = raw_input("SimpleCV:> ")
return command.lower() == 'quit'
def end_of_tutorial():
print lb
print "This is the end of our tutorial!"
print lb
print "For more help, go to www.simplecv.org, and don't forget about the"
print "help function!"
print lb
def command_loop(command, desired_tuple):
while True:
print command
print lb
if attempt(desired_tuple[0], desired_tuple[1]):
return
print lb
print "Oops! %s is still not %s" % (desired_tuple[0], str(desired_tuple[1]))
def tutorial_image():
shellclear()
print "SimpleCV Image tutorial"
print "-----------------------"
print lb
print "Using images are simple, in SimpleCV."
print lb
print "First thing we are going to do is load an image. Try it yourself:"
print lb
cmd = "logo = Image(\"simplecv\")"
desired_tuple = ('logo', Image)
command_loop(cmd, desired_tuple)
print lb
print "Correct! You just loaded SimpleCV logo into memory."
print "Let's try it to use one of your images. There are different ways to"
print "do that. You can try, for example:"
print lb
print "img = Image(URL_TO_MY_PICTURE) or img = Image(PATH_TO_MY_PICTURE)"
print lb
cmd = "Example: img = Image('http://simplecv.org/logo.jpg')"
desired_tuple = ('img', Image)
command_loop(cmd, desired_tuple)
print lb
print "Perfect! Now we want to see it:"
print lb
cmd = "img.show()"
print cmd
print lb
request_show_command()
print lb
print "Alright! This was tutorial 1/6."
print "Next tutorial: Saving Images"
if not end_tutorial():
tutorial_save()
return
def tutorial_save():
shellclear()
print "Saving Images"
print lb
print "Once you have an Image Object loaded in memory you can"
print "now save it to disk."
print lb
raw_input("[Press enter to continue]")
print lb
print "Saving an image is very simple, pardon the pun. Once it's loaded"
print "into memory, it's literally just:"
print "img.save()"
print lb
print "This will save the image back to the location it was loaded from"
print "so if you did img = Image('/tmp/test.jpg'), then it would save"
print "it back there, otherwise you can do:"
print "img.save('/any/path/you/want')"
print lb
print "So try it now and save an image somewhere on your system"
print lb
if platform.system() == "Windows":
print "img.save('C:/myimg.jpg')"
else:
print "img.save('/tmp/new.jpg')"
print lb
while True:
if prompt_and_run().startswith('img.save'):
break
print "Please try to save img!"
print lb
print "Correct, you just saved a new copy of your image!"
print "As you can see in SimpleCV most of the functions are intuitive."
print lb
print "Alright! This was tutorial 2/6."
print "Next tutorial: Camera"
if not end_tutorial():
tutorial_camera()
return
def tutorial_camera():
shellclear()
print "Camera"
print lb
print "As long as your camera driver is supported then you shouldn't have a"
print "problem. Type 'skip' to skip the camera tutorial, or press Enter to"
print "continue."
print lb
command = raw_input("SimpleCV:> ")
if command.lower() != 'skip':
print lb
print "To load the camera, just type:"
print lb
cmd = "cam = Camera()"
desired_tuple = ('cam', Camera)
command_loop(cmd, desired_tuple)
print lb
print "Next, to grab an image from the Camera we type:"
cmd = "img = cam.getImage()"
tutorial_interpreter.runsource("del(img)")
desired_tuple = ('img', Image)
command_loop(cmd, desired_tuple)
print "Just as before, if we want to display it, we just type:"
print lb
print "img.show()"
print lb
request_show_command()
print lb
print "Alright! This was tutorial 3/6."
print "Next tutorial: Copying Images"
if not end_tutorial():
tutorial_copy()
return
def tutorial_copy():
shellclear()
print "Copying Images"
print lb
print "If you need a copy of an image, this is also very simple:"
print "Let's try to clone img, which we already have."
global img
if not img:
img = Image("lenna")
print lb
cmd = "clone = img.copy()"
desired_tuple = ('clone', Image)
while True:
command_loop(cmd, desired_tuple)
if clone != img: #Returns False if they have different addresses.
break
print "You have to use the copy() function!"
print lb
print "Correct, you just cloned an image into memory."
print "You need to be careful when using this method though as using as a"
print "reference vs. a copy. For instance, if you just typed:"
print lb
print "clone = img"
print lb
print "clone would actually point at the same thing in memory as img."
print lb
print "Alright! This was tutorial 4/6."
print "Next tutorial: Manipulating Images"
if not end_tutorial():
tutorial_manipulation()
return
def tutorial_manipulation():
shellclear()
print "Manipulating Images"
print lb
print "Now we can easily load and save images. It's time to start doing some"
print "image processing with them. Let's make img, which we already have, a"
print "90x90 thumbnail:"
global img
if not img:
img = Image("lenna")
print lb
cmd = "thumb = img.scale(90,90)"
desired_tuple = ('thumb', Image)
while True:
command_loop(cmd, desired_tuple)
if thumb.size() == (90,90):
break
print "Your thumbnail's size isn't 90x90! Try again!"
print lb
print "Now display it with thumb.show()"
print lb
request_show_command()
print lb
print "Now let's erode the picture some:"
print lb
cmd = "eroded = img.erode()"
desired_tuple = ('eroded', Image)
command_loop(cmd, desired_tuple)
print lb
print "Display it with eroded.show(). It should look almost as if the image"
print "was made if ink and had water spoiled on it."
print lb
request_show_command()
print lb
print "Last but not least, let's crop a section of the image out:"
print lb
cmd = "cropped = img.crop(100, 100, 50, 50)"
desired_tuple = ('cropped', Image)
command_loop(cmd, desired_tuple)
print lb
print "Use cropped.show() to display it."
print lb
request_show_command()
print lb
print "That went from the coordinate in (X,Y), which is (0,0) and is the"
print "top left corner of the picture, to coordinates (100,100) in the"
print "(X,Y) and cropped a picture from that which is 50 pixels by 50 pixels."
print lb
print "Alright! This was tutorial 5/6."
print "Next tutorial: Features"
if not end_tutorial():
tutorial_features()
return
def tutorial_slicing():
shellclear()
print "Slicing Images"
print lb
print "Slicing is sort of a new paradigm to access parts of an image."
print "Typically in vision a region of interest (ROI) is given. "
print "In this case, slicing is a very powerful way to access parts"
print "of an image, or basically any matrix in SimpleCV in general."
print lb
print "This is done by using:"
print "section = img[1:10,1:10]"
print lb
print "What is returned is an image object with that window."
print "the slicing basically acts like a ROI but returns an image"
print "so if you wanted to say run edge detection on a 20x20 box"
print "in the picture that started at x=5,y=10 you use:"
print "foundedges = img[5:25,10:30].edges()"
print lb
raw_input("[Press enter to continue]")
shellclear()
in_text = ""
shouldbe = "ROI = img[1:6,1:6]"
print "Please type this now:"
print shouldbe
print lb
while (in_text != shouldbe):
in_text = raw_input("SimpleCV:>")
if(in_text != shouldbe):
print "sorry, that is incorrect"
print "please type:"
print shouldbe
shellclear()
print "Correct, you just returned a 5 pixel by 5 pixel image object"
print lb
return
def tutorial_features():
shellclear()
print "Features"
print lb
print "Features are things you are looking for in the picture. They can be"
print "blobs, corners, lines, etc. Features are sometimes referred to as a"
print "fiducial in computer vision. These features are something that is"
print "measurable, and something that makes images unique. Features are"
print "something like when comparing things like fruit. In this case the"
print "features could be the shape and the color, amongst others."
print lb
print "What features are in SimpleCV is an abstract representation of that."
print "You take your image, then perform a function on it, and get back"
print "features or another image with them applied. The crop example is"
print "a case where an image is returned after we perform something."
print lb
print "In a simple example we will use the famous 'lenna' image, and find"
print "corners in the picture."
print lb
tutorial_interpreter.runsource("img = Image('lenna')")
print "img = Image('lenna') (already done for you)"
print lb
print "Try it yourself:"
print lb
cmd = "corners = img.findCorners()"
desired_tuple = ('corners', FeatureSet)
command_loop(cmd, desired_tuple)
print lb
print "Correct, you just got a featureset object which contains"
print "feature objects. These feature objects contain data from the"
print "found corners"
print lb
print "Tip: If your are unsure what parameters to pass, you can always use"
print "the built in help support by typing help(Image.findCorners). Keep in"
print "mind that this help works for all of the functions available in"
print "SimpleCV"
print lb
print "We can also do that with blobs. Try it:"
print lb
cmd = "blobs = img.findBlobs()"
desired_tuple = ('blobs', FeatureSet)
command_loop(cmd, desired_tuple)
print lb
print "Great, but..."
print "When we show the image we won't notice anything different. This"
print "is because we have to actually tell the blobs to draw themselves"
print "on the image:"
print lb
print "blobs.draw()"
print lb
while True:
if prompt_and_run().endswith('.draw()'):
break
print "No blobs have been drawn!"
print lb
print "Now use img.show() to see the changes!"
print lb
request_show_command()
print lb
raw_input("[Press enter to continue]")
print lb
print lb
print "There's also a small trick built into SimpleCV to do this even faster"
print lb
tutorial_interpreter.runsource("img = Image('lenna')")
print "img = Image('lenna') (already done for you)"
print lb
while True:
print "img.findBlobs().show()"
print lb
if prompt_and_run().endswith('.show()'):
break
print "Nothing has been shown!"
print lb
print lb
print "Alright! This was tutorial 6/6."
#print "Next tutorial: ..."
return
def magic_tutorial(self,arg):
tutorials_dict = {'image': tutorial_image, 'save': tutorial_save,
'camera': tutorial_camera, 'manipulation': tutorial_manipulation,
'copy': tutorial_copy, 'features': tutorial_features}
if (arg == ""):
shellclear()
print "+--------------------------------+"
print " Welcome to the SimpleCV tutorial "
print "+--------------------------------+"
print lb
print "At anytime on the SimpleCV Interactive Shell you can type tutorial,"
print "then press the tab key and it will autocomplete any tutorial that"
print "is currently available."
print lb
print "Let's start off with Loading and Saving images!"
print lb
print lb
raw_input("[Press enter to continue]")
tutorial_image()
end_of_tutorial()
return
else:
if arg in tutorials_dict:
tutorials_dict[arg]()
else:
print "%s is not a tutorial!" % arg
| |
"""Functions for working with Amazon AWS Route 53 for managing domains.
:func:`create_cname` and :func:`delete_cname` create and delete CNAMES with
AWS Route 53.
"""
from __future__ import annotations
from pprint import pformat
from typing import TYPE_CHECKING, Any, Dict, Optional
import boto3
from structlog import get_logger
from keeper.exceptions import Route53Error
if TYPE_CHECKING:
import botocore.client.Route53
__all__ = ["create_cname", "delete_cname"]
def create_cname(
cname_domain: str,
origin_domain: str,
aws_access_key_id: str,
aws_secret_access_key: str,
) -> None:
"""Create a CNAME `cname_domain` that points to resources at
`origin_domain`.
This function technically performs an *upsert* so that an existing
CNAME record for `cname_domain` will be updated to point to the new
`origin_domain`.
**Note:** This function creates a simple `cname_domain` and doesn't yet
work with multiple geographically distributed CNAME records.
Parameters
----------
cname_domain : str
The CNAME domain. It should be a fully qualified domain that ends
in a dot (e.g., ``'my.domain.org.'``). A dot will be appended as a
convenvience.
Note that a CNAME domain should be a *sub-domain*. This function
does not configure the A (apex) record (e.g. ``'domain.org'``).
origin_domain : str
The original domain of the resource (e.g., ``'a-domain.org'``).
This should not be a fully qualified domain, but any trailing dot
will be stripped as a convenience.
aws_access_key_id : str
The access key for your AWS account. Also set `aws_secret_access_key`.
aws_secret_access_key : str
The secret key for your AWS account.
Raises
------
app.exceptions.Route53Error
Any error with Route 53 usage.
"""
logger = get_logger(__name__)
logger.info(
"create_cname", cname_domain=cname_domain, origin_domain=origin_domain
)
if not cname_domain.endswith("."):
cname_domain = cname_domain + "."
if origin_domain.endswith("."):
origin_domain = origin_domain.lstrip(".")
session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
client = session.client("route53")
zone_id = _get_zone_id(client, cname_domain)
_upsert_cname_record(client, zone_id, cname_domain, origin_domain)
def delete_cname(
cname_domain: str, aws_access_key_id: str, aws_secret_access_key: str
) -> None:
"""Delete a CNAME for `cname_domain`
**Note:** This function deletes the first matching CNAME records and
doesn't yet work with multiple geographically distributed CNAME records.
Parameters
----------
cname_domain : str
The CNAME domain. It should be a fully qualified domain that ends
in a dot (e.g., ``'my.domain.org.'``). A dot will be appended as a
convenvience.
Note that a CNAME domain should be a *sub-domain*. This function
does not configure the A (apex) record (e.g. ``'domain.org'``).
aws_access_key_id : str
The access key for your AWS account. Also set `aws_secret_access_key`.
aws_secret_access_key : str
The secret key for your AWS account.
Raises
------
app.exceptions.Route53Error
Any error with Route 53 usage.
"""
logger = get_logger(__name__)
logger.info("delete_cname", cname_domain=cname_domain)
if not cname_domain.endswith("."):
cname_domain = cname_domain + "."
session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
client = session.client("route53")
zone_id = _get_zone_id(client, cname_domain)
record = _find_cname_record(client, zone_id, cname_domain)
if record is None:
logger.info(f"Did not delete {cname_domain} because it does not exist")
return
# Build the change set for change_resource_record_sets. This method
# needs to know the TTL for the specific record to update; although
# we only use single records.
# http://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html
change: Any = {
"Action": "DELETE",
"ResourceRecordSet": {
"Name": cname_domain,
"Type": "CNAME",
"ResourceRecords": record["ResourceRecords"],
},
}
if "TTL" in record:
change["ResourceRecordSet"]["TTL"] = record["TTL"]
if "SetIdentifier" in record:
change["ResourceRecordSet"]["SetIdentifier"] = record["SetIdentifier"]
change_batch = {
"Comment": "DELETE {0}".format(cname_domain),
"Changes": [change],
}
logger.info(
"Created change batch for cname delete", change_batch=change_batch
)
r = client.change_resource_record_sets(
HostedZoneId=zone_id, ChangeBatch=change_batch
)
logger.info("cname delete response", response=r, change_batch=change_batch)
if r["ResponseMetadata"]["HTTPStatusCode"] == 400:
msg = "delete_cname failed with:\n" + pformat(change)
raise Route53Error(msg)
def _get_zone_id(client: botocore.client.Route53, domain: str) -> str:
"""Get the ID of the Hosted Zone that services this `domain`.
Parameters
---------
client :
Boto3 Route 53 client.
url : str
A fully specified domain (ethat ends in a dot, ``'.'``, e.g.
``'domain.org.'``). A sub-domain can also be provded
(e.g. ``'my.domain.org.'``).
Returns
-------
zone_id : str
Route 53 Hosted Zone ID that services the domain.
"""
logger = get_logger(__name__)
assert domain.endswith(".")
# Filter out sub-domains; leaves domains intact
fsd = ".".join(domain.split(".")[-3:])
# Find zone from Route 53 api
zones = client.list_hosted_zones()
zone_id = None
for z in zones["HostedZones"]:
if fsd == z["Name"]:
zone_id = z["Id"]
if zone_id is None:
msg = "Could not find hosted zone for fully specified domain"
logger.error(msg, domain=fsd, zones=zones)
logger.error(pformat(zones))
raise Route53Error(msg)
logger.info("Got HostedZoneId", zone_id=zone_id)
return zone_id
def _find_cname_record(
client: botocore.client.Route53, zone_id: str, cname_domain: str
) -> Optional[Dict[str, Any]]:
"""Find an existing record for the `cname_domain`, or `None` if one does
not exist.
Parameters
----------
client :
Boto3 Route 53 client.
zone_id : str
The Hosted Zone ID for the CNAME.
cname_domain : str
The CNAME domain, which is a fully qualified domain ending in a dot.
http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html#CNAMEFormat
Returns
-------
record : dict
The ``ResourceRecordSet`` with a ``Name`` corresponding to the
`cname_domain`. E.g.,
.. code-block:: python
[{'Name': 'www.mydomain.org.',
'ResourceRecords': [{'Value': 'origin_domain.com'}],
'TTL': 14400,
'Type': 'CNAME'}]
`None` is returned is no ResourceRecordSet matching ``cname_domain`` is
found.
"""
logger = get_logger(__name__)
# turns out boto3 doesn't need StardRecordName in lexicographic order
# despite their docs.
# name = _lexicographic_order_domain(cname_url)
if not cname_domain.endswith("."):
cname_domain = cname_domain + "."
r = client.list_resource_record_sets(
HostedZoneId=zone_id,
StartRecordName=cname_domain,
StartRecordType="CNAME",
)
if r["ResponseMetadata"]["HTTPStatusCode"] != 200:
msg = "list_resource_record_sets failed"
logger.error(msg, response=r)
raise Route53Error(msg)
for record in r["ResourceRecordSets"]:
if record["Name"] == cname_domain:
logger.info("Got Resource Record Set", record=record)
return record
logger.info("No existing CNAME record found", cname_domain=cname_domain)
return None
def _upsert_cname_record(
client: botocore.client.Route53,
zone_id: str,
cname_domain: str,
origin_domain: str,
) -> None:
"""Upsert a CNAME record of `cname_domain` pointint to `origin_domain`."""
logger = get_logger(__name__)
change = {
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": cname_domain,
"Type": "CNAME",
"TTL": 900,
"ResourceRecords": [{"Value": origin_domain}],
},
}
change_batch = {
"Comment": "Upsert {0} -> {1}".format(cname_domain, origin_domain),
"Changes": [change],
}
logger.info("Created cname record change batch", change_batch=change_batch)
r = client.change_resource_record_sets(
HostedZoneId=zone_id, ChangeBatch=change_batch
)
logger.info("Change resource record set", response=r)
if r["ResponseMetadata"]["HTTPStatusCode"] != 200:
msg = ("change_resource_record_sets failed",)
logger.error(msg, change=change, response=r)
raise Route53Error(msg)
| |
"""The tests for the Sun helpers."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch
from datetime import timedelta, datetime
import homeassistant.util.dt as dt_util
import homeassistant.helpers.sun as sun
from tests.common import get_test_home_assistant
# pylint: disable=invalid-name
class TestSun(unittest.TestCase):
"""Test the sun helpers."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_next_events(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
mod = -1
while True:
next_dawn = (astral.dawn_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_dawn > utc_now:
break
mod += 1
mod = -1
while True:
next_dusk = (astral.dusk_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_dusk > utc_now:
break
mod += 1
mod = -1
while True:
next_midnight = (astral.solar_midnight_utc(
utc_today + timedelta(days=mod), longitude))
if next_midnight > utc_now:
break
mod += 1
mod = -1
while True:
next_noon = (astral.solar_noon_utc(
utc_today + timedelta(days=mod), longitude))
if next_noon > utc_now:
break
mod += 1
mod = -1
while True:
next_rising = (astral.sunrise_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_rising > utc_now:
break
mod += 1
mod = -1
while True:
next_setting = (astral.sunset_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_setting > utc_now:
break
mod += 1
with patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=utc_now):
self.assertEqual(next_dawn, sun.get_astral_event_next(
self.hass, 'dawn'))
self.assertEqual(next_dusk, sun.get_astral_event_next(
self.hass, 'dusk'))
self.assertEqual(next_midnight, sun.get_astral_event_next(
self.hass, 'solar_midnight'))
self.assertEqual(next_noon, sun.get_astral_event_next(
self.hass, 'solar_noon'))
self.assertEqual(next_rising, sun.get_astral_event_next(
self.hass, 'sunrise'))
self.assertEqual(next_setting, sun.get_astral_event_next(
self.hass, 'sunset'))
def test_date_events(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
self.assertEqual(dawn, sun.get_astral_event_date(
self.hass, 'dawn', utc_today))
self.assertEqual(dusk, sun.get_astral_event_date(
self.hass, 'dusk', utc_today))
self.assertEqual(midnight, sun.get_astral_event_date(
self.hass, 'solar_midnight', utc_today))
self.assertEqual(noon, sun.get_astral_event_date(
self.hass, 'solar_noon', utc_today))
self.assertEqual(sunrise, sun.get_astral_event_date(
self.hass, 'sunrise', utc_today))
self.assertEqual(sunset, sun.get_astral_event_date(
self.hass, 'sunset', utc_today))
def test_date_events_default_date(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
with patch('homeassistant.util.dt.now', return_value=utc_now):
self.assertEqual(dawn, sun.get_astral_event_date(
self.hass, 'dawn', utc_today))
self.assertEqual(dusk, sun.get_astral_event_date(
self.hass, 'dusk', utc_today))
self.assertEqual(midnight, sun.get_astral_event_date(
self.hass, 'solar_midnight', utc_today))
self.assertEqual(noon, sun.get_astral_event_date(
self.hass, 'solar_noon', utc_today))
self.assertEqual(sunrise, sun.get_astral_event_date(
self.hass, 'sunrise', utc_today))
self.assertEqual(sunset, sun.get_astral_event_date(
self.hass, 'sunset', utc_today))
def test_date_events_accepts_datetime(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
self.assertEqual(dawn, sun.get_astral_event_date(
self.hass, 'dawn', utc_now))
self.assertEqual(dusk, sun.get_astral_event_date(
self.hass, 'dusk', utc_now))
self.assertEqual(midnight, sun.get_astral_event_date(
self.hass, 'solar_midnight', utc_now))
self.assertEqual(noon, sun.get_astral_event_date(
self.hass, 'solar_noon', utc_now))
self.assertEqual(sunrise, sun.get_astral_event_date(
self.hass, 'sunrise', utc_now))
self.assertEqual(sunset, sun.get_astral_event_date(
self.hass, 'sunset', utc_now))
def test_is_up(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 12, 0, 0, tzinfo=dt_util.UTC)
with patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=utc_now):
self.assertFalse(sun.is_up(self.hass))
utc_now = datetime(2016, 11, 1, 18, 0, 0, tzinfo=dt_util.UTC)
with patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=utc_now):
self.assertTrue(sun.is_up(self.hass))
def test_norway_in_june(self):
"""Test location in Norway where the sun doesn't set in summer."""
self.hass.config.latitude = 69.6
self.hass.config.longitude = 18.8
june = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
print(sun.get_astral_event_date(self.hass, 'sunrise',
datetime(2017, 7, 25)))
print(sun.get_astral_event_date(self.hass, 'sunset',
datetime(2017, 7, 25)))
print(sun.get_astral_event_date(self.hass, 'sunrise',
datetime(2017, 7, 26)))
print(sun.get_astral_event_date(self.hass, 'sunset',
datetime(2017, 7, 26)))
assert sun.get_astral_event_next(self.hass, 'sunrise', june) == \
datetime(2016, 7, 25, 23, 23, 39, tzinfo=dt_util.UTC)
assert sun.get_astral_event_next(self.hass, 'sunset', june) == \
datetime(2016, 7, 26, 22, 19, 1, tzinfo=dt_util.UTC)
assert sun.get_astral_event_date(self.hass, 'sunrise', june) is None
assert sun.get_astral_event_date(self.hass, 'sunset', june) is None
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import warnings
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util.tf_export import tf_export
# NOTE(mrry): It is legitimate to call `Iterator.get_next()` multiple
# times, e.g. when you are distributing different elements to multiple
# devices in a single step. However, a common pitfall arises when
# users call `Iterator.get_next()` in each iteration of their training
# loop. `Iterator.get_next()` adds ops to the graph, and executing
# each op allocates resources (including threads); as a consequence,
# invoking it in every iteration of a training loop causes slowdown
# and eventual resource exhaustion. To guard against this outcome, we
# log a warning when the number of uses crosses a threshold of suspicion.
GET_NEXT_CALL_WARNING_THRESHOLD = 32
GET_NEXT_CALL_WARNING_MESSAGE = (
"An unusually high number of `Iterator.get_next()` calls was detected. "
"This often indicates that `Iterator.get_next()` is being called inside "
"a training loop, which will cause gradual slowdown and eventual resource "
"exhaustion. If this is the case, restructure your code to call "
"`next_element = iterator.get_next()` once outside the loop, and use "
"`next_element` as the input to some computation that is invoked inside "
"the loop.")
@tf_export("data.Iterator")
class Iterator(object):
"""Represents the state of iterating through a `Dataset`."""
def __init__(self, iterator_resource, initializer, output_types,
output_shapes, output_classes):
"""Creates a new iterator from the given iterator resource.
Note: Most users will not call this initializer directly, and will
instead use `Dataset.make_initializable_iterator()` or
`Dataset.make_one_shot_iterator()`.
Args:
iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
iterator.
initializer: A `tf.Operation` that should be run to initialize this
iterator.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset.
output_classes: A nested structure of Python `type` object corresponding
to each
component of an element of this iterator.
"""
self._iterator_resource = iterator_resource
self._initializer = initializer
self._output_classes = output_classes
self._output_types = output_types
self._output_shapes = output_shapes
self._string_handle = gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource)
self._get_next_call_count = 0
@staticmethod
def from_structure(output_types,
output_shapes=None,
shared_name=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` with the given structure.
This iterator-constructing method can be used to create an iterator that
is reusable with many different datasets.
The returned iterator is not bound to a particular dataset, and it has
no `initializer`. To initialize the iterator, run the operation returned by
`Iterator.make_initializer(dataset)`.
The following is an example
```python
iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))
dataset_range = Dataset.range(10)
range_initializer = iterator.make_initializer(dataset_range)
dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
evens_initializer = iterator.make_initializer(dataset_evens)
# Define a model based on the iterator; in this example, the model_fn
# is expected to take scalar tf.int64 Tensors as input (see
# the definition of 'iterator' above).
prediction, loss = model_fn(iterator.get_next())
# Train for `num_epochs`, where for each epoch, we first iterate over
# dataset_range, and then iterate over dataset_evens.
for _ in range(num_epochs):
# Initialize the iterator to `dataset_range`
sess.run(range_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
# Initialize the iterator to `dataset_evens`
sess.run(evens_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
```
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
Raises:
TypeError: If the structures of `output_shapes` and `output_types` are
not the same.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator(
container="",
shared_name=shared_name,
output_types=nest.flatten(
sparse.as_dense_types(output_types, output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@staticmethod
def from_string_handle(string_handle,
output_types,
output_shapes=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` based on the given handle.
This method allows you to define a "feedable" iterator where you can choose
between concrete iterators by feeding a value in a @{tf.Session.run} call.
In that case, `string_handle` would a @{tf.placeholder}, and you would feed
it with the value of @{tf.data.Iterator.string_handle} in each step.
For example, if you had two iterators that marked the current position in
a training dataset and a test dataset, you could choose which to use in
each step as follows:
```python
train_iterator = tf.data.Dataset(...).make_one_shot_iterator()
train_iterator_handle = sess.run(train_iterator.string_handle())
test_iterator = tf.data.Dataset(...).make_one_shot_iterator()
test_iterator_handle = sess.run(test_iterator.string_handle())
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_iterator.output_types)
next_element = iterator.get_next()
loss = f(next_element)
train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle})
test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle})
```
Args:
string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates
to a handle produced by the `Iterator.string_handle()` method.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)
iterator_resource = gen_dataset_ops.iterator_from_string_handle(
string_handle,
output_types=nest.flatten(
sparse.as_dense_types(output_types, output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@property
def initializer(self):
"""A `tf.Operation` that should be run to initialize this iterator.
Returns:
A `tf.Operation` that should be run to initialize this iterator
Raises:
ValueError: If this iterator initializes itself automatically.
"""
if self._initializer is not None:
return self._initializer
else:
# TODO(mrry): Consider whether one-shot iterators should have
# initializers that simply reset their state to the beginning.
raise ValueError("Iterator does not have an initializer.")
def make_initializer(self, dataset, name=None):
"""Returns a `tf.Operation` that initializes this iterator on `dataset`.
Args:
dataset: A `Dataset` with compatible structure to this iterator.
name: (Optional.) A name for the created operation.
Returns:
A `tf.Operation` that can be run to initialize this iterator on the given
`dataset`.
Raises:
TypeError: If `dataset` and this iterator do not have a compatible
element structure.
"""
with ops.name_scope(name, "make_initializer") as name:
nest.assert_same_structure(self._output_types, dataset.output_types)
nest.assert_same_structure(self._output_shapes, dataset.output_shapes)
for iterator_class, dataset_class in zip(
nest.flatten(self._output_classes),
nest.flatten(dataset.output_classes)):
if iterator_class is not dataset_class:
raise TypeError(
"Expected output classes %r but got dataset with output class %r."
% (self._output_classes, dataset.output_classes))
for iterator_dtype, dataset_dtype in zip(
nest.flatten(self._output_types), nest.flatten(dataset.output_types)):
if iterator_dtype != dataset_dtype:
raise TypeError(
"Expected output types %r but got dataset with output types %r." %
(self._output_types, dataset.output_types))
for iterator_shape, dataset_shape in zip(
nest.flatten(self._output_shapes), nest.flatten(
dataset.output_shapes)):
if not iterator_shape.is_compatible_with(dataset_shape):
raise TypeError("Expected output shapes compatible with %r but got "
"dataset with output shapes %r." %
(self._output_shapes, dataset.output_shapes))
with ops.colocate_with(self._iterator_resource):
return gen_dataset_ops.make_iterator(
dataset._as_variant_tensor(), self._iterator_resource, name=name) # pylint: disable=protected-access
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s representing the next element.
In graph mode, you should typically call this method *once* and use its
result as the input to another computation. A typical loop will then call
@{tf.Session.run} on the result of that computation. The loop will terminate
when the `Iterator.get_next()` operation raises
@{tf.errors.OutOfRangeError}. The following skeleton shows how to use
this method when building a training loop:
```python
dataset = ... # A `tf.data.Dataset` object.
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Build a TensorFlow graph that does something with each element.
loss = model_function(next_element)
optimizer = ... # A `tf.train.Optimizer` object.
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
try:
while True:
sess.run(train_op)
except tf.errors.OutOfRangeError:
pass
```
NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
when you are distributing different elements to multiple devices in a single
step. However, a common pitfall arises when users call `Iterator.get_next()`
in each iteration of their training loop. `Iterator.get_next()` adds ops to
the graph, and executing each op allocates resources (including threads); as
a consequence, invoking it in every iteration of a training loop causes
slowdown and eventual resource exhaustion. To guard against this outcome, we
log a warning when the number of uses crosses a fixed threshold of
suspiciousness.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` objects.
"""
self._get_next_call_count += 1
if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types,
gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=nest.flatten(
sparse.as_dense_types(
self._output_types,
self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(
self._output_shapes,
self._output_classes)),
name=name)), self._output_types,
self._output_shapes, self._output_classes)
def string_handle(self, name=None):
"""Returns a string-valued `tf.Tensor` that represents this iterator.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.string`.
"""
if name is None:
return self._string_handle
else:
return gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource, name=name)
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._output_classes
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._output_shapes
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._output_types
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
class EagerIterator(object):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.executing_eagerly():
raise RuntimeError(
"{} objects can only be used when eager execution is enabled, use "
"tf.data.Dataset.make_initializable_iterator or "
"tf.data.Dataset.make_one_shot_iterator for graph construction".
format(type(self)))
with ops.device("/device:CPU:0"):
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_classes = dataset.output_classes
self._output_types = dataset.output_types
self._output_shapes = dataset.output_shapes
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes))
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._output_shapes, self._output_classes))
self._resource = gen_dataset_ops.iterator(
shared_name="",
container=_generate_shared_name("eageriterator"),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="/device:CPU:0")
self._device = context.context().device_name
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
# This runs in sync mode as iterators use an error status to communicate
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
with ops.device(self._device):
# TODO(ashankar): Consider removing this ops.device() contextmanager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
# NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
# because in eager mode this code will run synchronously on the calling
# thread. Therefore we do not need to make a defensive context switch
# to a background thread, and can achieve a small constant performance
# boost by invoking the iterator synchronously.
ret = gen_dataset_ops.iterator_get_next_sync(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types, ret), self._output_types,
self._output_shapes, self._output_classes)
def next(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
try:
return self._next_internal()
except errors.OutOfRangeError:
raise StopIteration
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._output_classes
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._output_shapes
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._output_types
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
Args:
name: (Optional.) A name for the created operation. Currently unused.
Returns:
A nested structure of `tf.Tensor` objects.
Raises:
`tf.errors.OutOfRangeError`: If the end of the dataset has been reached.
"""
del name
return self._next_internal()
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the MRUListEx Windows Registry plugin."""
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.parsers.winreg_plugins import mrulistex
from tests.parsers.winreg_plugins import test_lib
class TestMRUListExStringWindowsRegistryPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the string MRUListEx plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'MRUlist', key_path=key_path,
last_written_time=filetime.timestamp, offset=1456)
# The order is: 201
value_data = (
b'\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\xff\xff\xff\xff')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'MRUListEx', data=value_data,
data_type=dfwinreg_definitions.REG_BINARY, offset=123)
registry_key.AddValue(registry_value)
value_data = 'Some random text here'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'0', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1892)
registry_key.AddValue(registry_value)
value_data = 'c:\\evil.exe\x00'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'1', data=value_data, data_type=dfwinreg_definitions.REG_BINARY,
offset=612)
registry_key.AddValue(registry_value)
value_data = 'C:\\looks_legit.exe'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'2', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1001)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Some Windows\\'
'InterestingApp\\MRUlist')
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'MRUlist', key_path=key_path)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertFalse(result)
registry_value = dfwinreg_fake.FakeWinRegistryValue('MRUListEx')
registry_key.AddValue(registry_value)
registry_value = dfwinreg_fake.FakeWinRegistryValue('0')
registry_key.AddValue(registry_value)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertTrue(result)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\Shell\\BagMRU')
self._AssertNotFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
self._AssertNotFiltersOnKeyPath(plugin, key_path)
def testProcess(self):
"""Tests the Process function."""
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Some Windows\\'
'InterestingApp\\MRUlist')
time_string = '2012-08-28 09:23:49.002031'
registry_key = self._CreateTestKey(key_path, time_string)
plugin = mrulistex.MRUListExStringWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
expected_entries = (
'Index: 1 [MRU Value 2]: C:\\looks_legit.exe '
'Index: 2 [MRU Value 0]: Some random text here '
'Index: 3 [MRU Value 1]: c:\\evil.exe')
expected_event_values = {
'data_type': 'windows:registry:mrulistex',
'entries': expected_entries,
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name,
'timestamp': '2012-08-28 09:23:49.002031'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
class TestMRUListExShellItemListWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the shell item list MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExShellItemListWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\StreamMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExShellItemListWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 65)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
expected_entries = (
'Index: 1 [MRU Value 1]: Shell item path: <My Computer> '
'P:\\Application Tools\\Firefox 6.0\\Firefox Setup 6.0.exe '
'Index: 2 [MRU Value 0]: Shell item path: <Computers and '
'Devices> <UNKNOWN: 0x00>\\\\controller\\WebDavShare\\Firefox '
'Setup 3.6.12.exe')
expected_event_values = {
'data_type': 'windows:registry:mrulistex',
'entries': expected_entries,
'key_path': '{0:s}\\exe'.format(key_path),
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name,
'timestamp': '2011-08-28 22:48:28.159309'}
self.CheckEventValues(storage_writer, events[40], expected_event_values)
# A shell item event.
expected_event_values = {
'data_type': 'windows:shell_item:file_entry',
'name': 'ALLOYR~1',
'long_name': 'Alloy Research',
'file_reference': '44518-33',
'origin': '{0:s}\\*'.format(key_path),
'shell_item_path': (
'<Shared Documents Folder (Users Files)> '
'<UNKNOWN: 0x00>\\Alloy Research'),
'timestamp': '2012-03-08 22:16:02.000000'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
class TestMRUListExStringAndShellItemWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringAndShellItemWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\RecentDocs')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\RecentDocs')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExStringAndShellItemWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 6)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
expected_entries = (
'Index: 1 [MRU Value 17]: Path: The SHIELD, '
'Shell item: [The SHIELD.lnk] '
'Index: 2 [MRU Value 18]: '
'Path: captain_america_shield_by_almogrem-d48x9x8.jpg, '
'Shell item: [captain_america_shield_by_almogrem-d48x9x8.lnk] '
'Index: 3 [MRU Value 16]: Path: captain-america-shield-front.jpg, '
'Shell item: [captain-america-shield-front.lnk] '
'Index: 4 [MRU Value 12]: Path: Leadership, '
'Shell item: [Leadership.lnk] '
'Index: 5 [MRU Value 15]: Path: followership.pdf, '
'Shell item: [followership.lnk] '
'Index: 6 [MRU Value 14]: Path: leaderqualities.pdf, '
'Shell item: [leaderqualities.lnk] '
'Index: 7 [MRU Value 13]: Path: htlhtl.pdf, '
'Shell item: [htlhtl.lnk] '
'Index: 8 [MRU Value 8]: Path: StarFury, '
'Shell item: [StarFury (2).lnk] '
'Index: 9 [MRU Value 7]: Path: Earth_SA-26_Thunderbolt.jpg, '
'Shell item: [Earth_SA-26_Thunderbolt.lnk] '
'Index: 10 [MRU Value 11]: Path: 5031RR_BalancedLeadership.pdf, '
'Shell item: [5031RR_BalancedLeadership.lnk] '
'Index: 11 [MRU Value 10]: '
'Path: SA-23E Mitchell-Hyundyne Starfury.docx, '
'Shell item: [SA-23E Mitchell-Hyundyne Starfury.lnk] '
'Index: 12 [MRU Value 9]: Path: StarFury.docx, '
'Shell item: [StarFury (3).lnk] '
'Index: 13 [MRU Value 6]: Path: StarFury.zip, '
'Shell item: [StarFury.lnk] '
'Index: 14 [MRU Value 4]: Path: VIBRANIUM.docx, '
'Shell item: [VIBRANIUM.lnk] '
'Index: 15 [MRU Value 5]: Path: ADAMANTIUM-Background.docx, '
'Shell item: [ADAMANTIUM-Background.lnk] '
'Index: 16 [MRU Value 3]: Path: Pictures, '
'Shell item: [Pictures.lnk] '
'Index: 17 [MRU Value 2]: Path: nick_fury_77831.jpg, '
'Shell item: [nick_fury_77831.lnk] '
'Index: 18 [MRU Value 1]: Path: Downloads, '
'Shell item: [Downloads.lnk] '
'Index: 19 [MRU Value 0]: Path: wallpaper_medium.jpg, '
'Shell item: [wallpaper_medium.lnk]')
expected_event_values = {
'data_type': 'windows:registry:mrulistex',
'entries': expected_entries,
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name,
'timestamp': '2012-04-01 13:52:39.113742'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
class TestMRUListExStringAndShellItemListWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item list MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringAndShellItemListWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\LastVisitedPidlMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\LastVisitedPidlMRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExStringAndShellItemListWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 31)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
expected_entries = (
'Index: 1 [MRU Value 1]: Path: chrome.exe, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'<UNKNOWN: 0x00> '
'Index: 2 [MRU Value 7]: '
'Path: {48E1ED6B-CF49-4609-B1C1-C082BFC3D0B4}, '
'Shell item path: <Shared Documents Folder (Users Files)> '
'<UNKNOWN: 0x00>\\Alloy Research '
'Index: 3 [MRU Value 6]: '
'Path: {427865A0-03AF-4F25-82EE-10B6CB1DED3E}, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'Index: 4 [MRU Value 5]: '
'Path: {24B5C9BB-48B5-47FF-8343-40481DBA1E2B}, '
'Shell item path: <My Computer> C:\\Users\\nfury\\Documents '
'Index: 5 [MRU Value 4]: '
'Path: {0B8CFE96-DB69-4D33-8E3C-36EAB4F709E0}, '
'Shell item path: <My Computer> C:\\Users\\nfury\\Documents\\'
'Alloy Research '
'Index: 6 [MRU Value 3]: '
'Path: {D4F85F66-003D-4127-BCE9-CAD7A57B2857}, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'Index: 7 [MRU Value 0]: Path: iexplore.exe, '
'Shell item path: <My Computer> P:\\Application Tools\\Firefox 6.0 '
'Index: 8 [MRU Value 2]: Path: Skype.exe, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00>')
expected_event_values = {
'data_type': 'windows:registry:mrulistex',
'entries': expected_entries,
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name,
'timestamp': '2012-04-01 13:52:38.966290'}
self.CheckEventValues(storage_writer, events[30], expected_event_values)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/local/bin/env python
"""
Test iodrivers.py facility.
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import numpy as np
try:
from openmm import unit
except ImportError: # OpenMM < 7.6
from simtk import unit
import contextlib
import tempfile
from nose import tools
from openmmtools.storage import NetCDFIODriver
from openmmtools.utils import temporary_directory
# =============================================================================================
# NETCDFIODRIVER TESTING FUNCTIONS
# =============================================================================================
def test_netcdf_driver_group_manipulation():
"""Test that the NetCDFIODriver can create groups, rebind to groups, and that they are on the file"""
with temporary_directory() as tmp_dir:
nc_io_driver = NetCDFIODriver(tmp_dir + 'test.nc')
group2 = nc_io_driver.get_directory('group1/group2')
group1 = nc_io_driver.get_directory('group1')
ncfile = nc_io_driver.ncfile
ncgroup1 = ncfile.groups['group1']
ncgroup2 = ncfile.groups['group1'].groups['group2']
assert group1 is ncgroup1
assert group2 is ncgroup2
def test_netcdf_driver_dimension_manipulation():
"""Test that the NetCDFIODriver can check and create dimensions"""
with temporary_directory() as tmp_dir:
nc_io_driver = NetCDFIODriver(tmp_dir + '/test.nc')
NetCDFIODriver.check_scalar_dimension(nc_io_driver)
NetCDFIODriver.check_iterable_dimension(nc_io_driver, length=4)
NetCDFIODriver.check_infinite_dimension(nc_io_driver)
ncfile = nc_io_driver.ncfile
dims = ncfile.dimensions
assert 'scalar' in dims
assert 'iterable4' in dims
assert 'iteration' in dims
def test_netcdf_driver_metadata_creation():
"""Test that the NetCDFIODriver can create metadata on different objects"""
with temporary_directory() as tmp_dir:
nc_io_driver = NetCDFIODriver(tmp_dir + '/test.nc')
group1 = nc_io_driver.get_directory('group1')
nc_io_driver.add_metadata('root_metadata', 'IAm(G)Root!')
nc_io_driver.add_metadata('group_metadata', 'group1_metadata', path='/group1')
ncfile = nc_io_driver.ncfile
nc_metadata = ncfile.getncattr('root_metadata')
group_metadata = group1.getncattr('group_metadata')
assert nc_metadata == 'IAm(G)Root!'
assert group_metadata == 'group1_metadata'
# =============================================================================================
# NETCDF TYPE codec TESTING FUNCTIONS
# =============================================================================================
def generic_type_codec_check(input_data, with_append=True):
"""Generic type codec test to ensure all callable functions are working"""
with temporary_directory() as tmp_dir:
file_path = tmp_dir + '/test.nc'
nc_io_driver = NetCDFIODriver(file_path)
input_type = type(input_data)
# Create a write and an append of the data
write_path = 'data_write'
data_write = nc_io_driver.create_storage_variable(write_path, input_type)
if with_append:
append_path = 'group1/data_append'
data_append = nc_io_driver.create_storage_variable(append_path, input_type)
# Store initial data (unbound write/append)
data_write.write(input_data)
if with_append:
data_append.append(input_data)
# Test that we can act on them again (bound write/append)
data_write.write(input_data)
if with_append:
data_append.append(input_data)
# Test bound read
data_write_out = data_write.read()
if with_append:
data_append_out = data_append.read()
try: # Compound dictionary processing
for key in data_write_out.keys():
assert np.all(data_write_out[key] == input_data[key])
except AttributeError:
assert np.all(data_write_out == input_data)
if with_append:
try:
for key in data_write_out.keys():
assert np.all(data_append_out[0][key] == input_data[key])
assert np.all(data_append_out[1][key] == input_data[key])
except AttributeError:
assert np.all(data_append_out[0] == input_data)
assert np.all(data_append_out[1] == input_data)
# Delete the IO driver (and close the ncfile in the process)
nc_io_driver.close()
del data_write, data_write_out
if with_append:
del data_append, data_append_out
# Reopen and test reading actions
nc_io_driver = NetCDFIODriver(file_path, access_mode='r')
data_write = nc_io_driver.get_storage_variable(write_path)
if with_append:
data_append = nc_io_driver.get_storage_variable(append_path)
# Test unbound read
data_write_out = data_write.read()
if with_append:
data_append_out = data_append.read()
try: # Compound dictionary processing
for key in data_write_out.keys():
assert np.all(data_write_out[key] == input_data[key])
except AttributeError:
assert np.all(data_write_out == input_data)
if with_append:
try:
for key in data_write_out.keys(): # Must act on the data_write since it has the .keys method
assert np.all(data_append_out[0][key] == input_data[key])
assert np.all(data_append_out[1][key] == input_data[key])
except AttributeError:
assert np.all(data_append_out[0] == input_data)
assert np.all(data_append_out[1] == input_data)
def generic_append_to_check(input_data, overwrite_data):
"""Generic function to test replacing data of appended dimension"""
with temporary_directory() as tmp_dir:
file_path = tmp_dir + '/test.nc'
nc_io_driver = NetCDFIODriver(file_path)
input_type = type(input_data)
# Create a write and an append of the data
append_path = 'data_append'
data_append = nc_io_driver.create_storage_variable(append_path, input_type)
# Append data 3 times
for i in range(3):
data_append.append(input_data)
# Overwrite second entry
data_append.write(overwrite_data, at_index=1)
data_append_out = data_append.read()
try:
for key in input_data.keys(): # Must act on the data_write since it has the .keys method
assert np.all(data_append_out[0][key] == input_data[key])
assert np.all(data_append_out[2][key] == input_data[key])
assert np.all(data_append_out[1][key] == overwrite_data[key])
assert set(input_data.keys()) == set(data_append_out[0].keys()) # Assert keys match
assert set(input_data.keys()) == set(data_append_out[2].keys())
except AttributeError:
assert np.all(data_append_out[0] == input_data)
assert np.all(data_append_out[2] == input_data)
assert np.all(data_append_out[1] == overwrite_data)
def test_netcdf_int_type_codec():
"""Test that the Int type codec can read/write/append"""
input_data = 4
generic_type_codec_check(input_data)
overwrite_data = 5
generic_append_to_check(input_data, overwrite_data)
def test_netcdf_float_type_codec():
"""Test that the Float type codec can read/write/append"""
input_data = 4.0
generic_type_codec_check(input_data)
overwrite_data = 5.0
generic_append_to_check(input_data, overwrite_data)
def test_netcdf_string_type_codec():
"""Test that the String type codec can read/write/append"""
input_data = 'four point oh'
generic_type_codec_check(input_data)
overwrite_data = 'five point not'
generic_append_to_check(input_data, overwrite_data)
def test_netcdf_list_type_codec():
"""Test that the List type codec can read/write/append"""
input_data = [4, 4, 4]
generic_type_codec_check(input_data)
overwrite_data = [5, 5, 5]
generic_append_to_check(input_data, overwrite_data)
def test_netcdf_tuple_type_codec():
"""Test that the tuple type codec can read/write/append"""
input_data = (4, 4, 4)
generic_type_codec_check(input_data)
overwrite_data = (5, 5, 5)
generic_append_to_check(input_data, overwrite_data)
def test_netcdf_array_type_codec():
"""Test that the ndarray type codec can read/write/append"""
input_data = np.array([4, 4.0, 4])
generic_type_codec_check(input_data)
overwrite_data = np.array([5, 5.0, 5])
generic_append_to_check(input_data, overwrite_data)
def test_netcdf_quantity_type_codec():
"""Test that the openmm.unit.Quantity type codec can read/write/append with various unit and _value types"""
input_data = 4 * unit.kelvin
generic_type_codec_check(input_data)
overwrite_data = 5 * unit.kelvin
generic_append_to_check(input_data, overwrite_data)
input_data = [4, 4, 4] * unit.kilojoules_per_mole
generic_type_codec_check(input_data)
input_data = np.array([4, 4, 4]) / unit.nanosecond
generic_type_codec_check(input_data)
def test_netcdf_dictionary_type_codec():
"""Test that the dictionary type codec can read/write/append with various unit and _value types"""
input_data = {
'count': 4,
'ratio': 0.4,
'name': 'four',
'repeated': [4, 4, 4],
'temperature': 4 * unit.kelvin,
'box_vectors': (np.eye(3) * 4.0) * unit.nanometer
}
generic_type_codec_check(input_data)
overwrite_data = {
'count': 5,
'ratio': 0.5,
'name': 'five',
'repeated': [5, 5, 5],
'temperature': 5 * unit.kelvin,
'box_vectors': (np.eye(3) * 5.0) * unit.nanometer
}
generic_append_to_check(input_data, overwrite_data)
@tools.raises(Exception)
def test_write_at_index_must_exist():
"""Ensure that the write(data, at_index) must exist first"""
with temporary_directory() as tmp_dir:
file_path = tmp_dir + '/test.nc'
nc_io_driver = NetCDFIODriver(file_path)
input_data = 4
input_type = type(input_data)
# Create a write and an append of the data
append_path = 'data_append'
data_append = nc_io_driver.create_storage_variable(append_path, input_type)
data_append.write(input_data, at_index=0)
@tools.raises(Exception)
def test_write_at_index_is_bound():
"""Ensure that the write(data, at_index) cannot write to an index beyond"""
with temporary_directory() as tmp_dir:
file_path = tmp_dir + '/test.nc'
nc_io_driver = NetCDFIODriver(file_path)
input_data = 4
input_type = type(input_data)
# Create a write and an append of the data
append_path = 'data_append'
data_append = nc_io_driver.create_storage_variable(append_path, input_type)
data_append.append(input_data) # Creates the first data
data_append.write(input_data, at_index=1) # should fail for out of bounds index
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import re
from lxml import etree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import arch
from nova.compute import vm_mode
from nova.i18n import _
from nova.i18n import _LI
from nova import utils
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt.volume import remotefs
from nova.virt import volumeutils
libvirt_opts = [
cfg.BoolOpt('snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
return volumeutils.get_iscsi_initiator()
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# Explicitly inherit the value of 'cluster_size' property of a qcow2
# overlay image from its backing file. This can be useful in cases
# when people create a base image with a non-default 'cluster_size'
# value or cases when images were created with very old QEMU
# versions which had a different default 'cluster_size'.
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers we have to tell libvirt
which one should be used.
Xen supports the following drivers: "tap", "tap2", "phy", "file", or
"qemu", being "qemu" the preferred one. Qemu only supports "qemu".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt.virt_type == "xen":
if is_block_dev:
return "phy"
else:
# 4002000 == 4.2.0
if hypervisor_version >= 4002000:
try:
execute('xend', 'status',
run_as_root=True, check_exit_code=True)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("xend is not found")
# libvirt will try to use libxl toolstack
return 'qemu'
else:
raise
except processutils.ProcessExecutionError:
LOG.debug("xend is not started")
# libvirt will try to use libxl toolstack
return 'qemu'
# libvirt will use xend/xm toolstack
try:
out, err = execute('tap-ctl', 'check', check_exit_code=False)
if out == 'ok\n':
# 4000000 == 4.0.0
if hypervisor_version > 4000000:
return "tap2"
else:
return "tap"
else:
LOG.info(_LI("tap-ctl check: %s"), out)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("tap-ctl tool is not installed")
else:
raise
return "file"
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None, receive=False,
on_execute=None, on_completion=None,
compression=True):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
:param receive: Reverse the rsync direction
:param on_execute: Callback method to store pid of process in cache
:param on_completion: Callback method to remove pid of process from cache
:param compression: Allows to use rsync operation with or without
compression
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
execute('cp', src, dest)
else:
if receive:
src = "%s:%s" % (utils.safe_ip_format(host), src)
else:
dest = "%s:%s" % (utils.safe_ip_format(host), dest)
remote_filesystem_driver = remotefs.RemoteFilesystem()
remote_filesystem_driver.copy_file(src, dest,
on_execute=on_execute, on_completion=on_completion,
compression=compression)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def _id_map_to_config(id_map):
return "%s:%s:%s" % (id_map.start, id_map.target, id_map.count)
def chown_for_id_maps(path, id_maps):
"""Change ownership of file or directory for an id mapped
environment
:param path: File or directory whose ownership to change
:param id_maps: List of type LibvirtConfigGuestIDMap
"""
uid_maps_str = ','.join([_id_map_to_config(id_map) for id_map in id_maps if
isinstance(id_map,
vconfig.LibvirtConfigGuestUIDMap)])
gid_maps_str = ','.join([_id_map_to_config(id_map) for id_map in id_maps if
isinstance(id_map,
vconfig.LibvirtConfigGuestGIDMap)])
execute('nova-idmapshift', '-i', '-u', uid_maps_str,
'-g', gid_maps_str, path, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
"""Extract a snapshot from a disk image.
Note that nobody should write to the disk image during this operation.
:param disk_path: Path to disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
if dest_fmt == 'ploop':
dest_fmt = 'parallels'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt.snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def path_exists(path):
"""Returns if path exists
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.path.exists(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device
"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
os_type = domain.find('os/type').text
if CONF.libvirt.virt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
elif CONF.libvirt.virt_type == 'parallels' and os_type == vm_mode.EXE:
source = domain.find('devices/filesystem/source')
disk_path = source.get('file')
else:
source = domain.find('devices/disk/source')
disk_path = source.get('file') or source.get('dev')
if not disk_path and CONF.libvirt.images_type == 'rbd':
disk_path = source.get('name')
if disk_path:
disk_path = 'rbd:' + disk_path
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
return disk_path
def get_disk_type(path):
"""Retrieve disk type (raw, qcow2, lvm, ploop) for given file."""
if path.startswith('/dev'):
return 'lvm'
elif path.startswith('rbd:'):
return 'rbd'
elif (os.path.isdir(path) and
os.path.exists(os.path.join(path, "DiskDescriptor.xml"))):
return 'ploop'
return images.qemu_img_info(path).file_format
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id,
max_size=max_size)
def fetch_raw_image(context, target, image_id, user_id, project_id,
max_size=0):
"""Grab initrd or kernel image.
This function does not attempt raw conversion, as these images will
already be in raw format.
"""
images.fetch(context, image_id, target, user_id, project_id,
max_size=max_size)
def get_instance_path(instance, forceold=False, relative=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage, while
handling the fact that we changed the naming style to something more
unique in the grizzly release.
:param instance: the instance we want a path for
:param forceold: force the use of the pre-grizzly format
:param relative: if True, just the relative path is returned
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance.name)
if forceold or os.path.exists(pre_grizzly_name):
if relative:
return instance.name
return pre_grizzly_name
if relative:
return instance.uuid
return os.path.join(CONF.instances_path, instance.uuid)
def get_instance_path_at_destination(instance, migrate_data=None):
"""Get the the instance path on destination node while live migration.
This method determines the directory name for instance storage on
destination node, while live migration.
:param instance: the instance we want a path for
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
:returns: a path to store information about that instance
"""
instance_relative_path = None
if migrate_data:
instance_relative_path = migrate_data.get('instance_relative_path')
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = get_instance_path(instance)
return instance_dir
def get_arch(image_meta):
"""Determine the architecture of the guest (or host).
This method determines the CPU architecture that must be supported by
the hypervisor. It gets the (guest) arch info from image_meta properties,
and it will fallback to the nova-compute (host) arch if no architecture
info is provided in image_meta.
:param image_meta: the metadata associated with the instance image
:returns: guest (or host) architecture
"""
if image_meta:
image_arch = image_meta.properties.get('hw_architecture')
if image_arch is not None:
return image_arch
return arch.from_host()
def is_mounted(mount_path, source=None):
"""Check if the given source is mounted at given destination point."""
try:
check_cmd = ['findmnt', '--target', mount_path]
if source:
check_cmd.extend(['--source', source])
utils.execute(*check_cmd)
return True
except processutils.ProcessExecutionError:
return False
except OSError as exc:
# info since it's not required to have this tool.
if exc.errno == errno.ENOENT:
LOG.info(_LI("findmnt tool is not installed"))
return False
def is_valid_hostname(hostname):
return re.match(r"^[\w\-\.:]+$", hostname)
| |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import eventlet
import mock
import netaddr
import testtools
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as plugin_utils
from neutron.tests import base
from oslo_log import log as logging
class TestParseMappings(base.BaseTestCase):
def parse(self, mapping_list, unique_values=True):
return utils.parse_mappings(mapping_list, unique_values)
def test_parse_mappings_fails_for_missing_separator(self):
with testtools.ExpectedException(ValueError):
self.parse(['key'])
def test_parse_mappings_fails_for_missing_key(self):
with testtools.ExpectedException(ValueError):
self.parse([':val'])
def test_parse_mappings_fails_for_missing_value(self):
with testtools.ExpectedException(ValueError):
self.parse(['key:'])
def test_parse_mappings_fails_for_extra_separator(self):
with testtools.ExpectedException(ValueError):
self.parse(['key:val:junk'])
def test_parse_mappings_fails_for_duplicate_key(self):
with testtools.ExpectedException(ValueError):
self.parse(['key:val1', 'key:val2'])
def test_parse_mappings_fails_for_duplicate_value(self):
with testtools.ExpectedException(ValueError):
self.parse(['key1:val', 'key2:val'])
def test_parse_mappings_succeeds_for_one_mapping(self):
self.assertEqual(self.parse(['key:val']), {'key': 'val'})
def test_parse_mappings_succeeds_for_n_mappings(self):
self.assertEqual(self.parse(['key1:val1', 'key2:val2']),
{'key1': 'val1', 'key2': 'val2'})
def test_parse_mappings_succeeds_for_duplicate_value(self):
self.assertEqual(self.parse(['key1:val', 'key2:val'], False),
{'key1': 'val', 'key2': 'val'})
def test_parse_mappings_succeeds_for_no_mappings(self):
self.assertEqual(self.parse(['']), {})
class TestParseTunnelRangesMixin(object):
TUN_MIN = None
TUN_MAX = None
TYPE = None
_err_prefix = "Invalid network Tunnel range: '%d:%d' - "
_err_suffix = "%s is not a valid %s identifier"
_err_range = "End of tunnel range is less than start of tunnel range"
def _build_invalid_tunnel_range_msg(self, t_range_tuple, n):
bad_id = t_range_tuple[n - 1]
return (self._err_prefix % t_range_tuple) + (self._err_suffix
% (bad_id, self.TYPE))
def _build_range_reversed_msg(self, t_range_tuple):
return (self._err_prefix % t_range_tuple) + self._err_range
def _verify_range(self, tunnel_range):
return plugin_utils.verify_tunnel_range(tunnel_range, self.TYPE)
def _check_range_valid_ranges(self, tunnel_range):
self.assertIsNone(self._verify_range(tunnel_range))
def _check_range_invalid_ranges(self, bad_range, which):
expected_msg = self._build_invalid_tunnel_range_msg(bad_range, which)
err = self.assertRaises(n_exc.NetworkTunnelRangeError,
self._verify_range, bad_range)
self.assertEqual(expected_msg, str(err))
def _check_range_reversed(self, bad_range):
err = self.assertRaises(n_exc.NetworkTunnelRangeError,
self._verify_range, bad_range)
expected_msg = self._build_range_reversed_msg(bad_range)
self.assertEqual(expected_msg, str(err))
def test_range_tunnel_id_valid(self):
self._check_range_valid_ranges((self.TUN_MIN, self.TUN_MAX))
def test_range_tunnel_id_invalid(self):
self._check_range_invalid_ranges((-1, self.TUN_MAX), 1)
self._check_range_invalid_ranges((self.TUN_MIN,
self.TUN_MAX + 1), 2)
self._check_range_invalid_ranges((self.TUN_MIN - 1,
self.TUN_MAX + 1), 1)
def test_range_tunnel_id_reversed(self):
self._check_range_reversed((self.TUN_MAX, self.TUN_MIN))
class TestGreTunnelRangeVerifyValid(TestParseTunnelRangesMixin,
base.BaseTestCase):
TUN_MIN = p_const.MIN_GRE_ID
TUN_MAX = p_const.MAX_GRE_ID
TYPE = p_const.TYPE_GRE
class TestVxlanTunnelRangeVerifyValid(TestParseTunnelRangesMixin,
base.BaseTestCase):
TUN_MIN = p_const.MIN_VXLAN_VNI
TUN_MAX = p_const.MAX_VXLAN_VNI
TYPE = p_const.TYPE_VXLAN
class UtilTestParseVlanRanges(base.BaseTestCase):
_err_prefix = "Invalid network VLAN range: '"
_err_too_few = "' - 'need more than 2 values to unpack'"
_err_too_many = "' - 'too many values to unpack'"
_err_not_int = "' - 'invalid literal for int() with base 10: '%s''"
_err_bad_vlan = "' - '%s is not a valid VLAN tag'"
_err_range = "' - 'End of VLAN range is less than start of VLAN range'"
def _range_too_few_err(self, nv_range):
return self._err_prefix + nv_range + self._err_too_few
def _range_too_many_err(self, nv_range):
return self._err_prefix + nv_range + self._err_too_many
def _vlan_not_int_err(self, nv_range, vlan):
return self._err_prefix + nv_range + (self._err_not_int % vlan)
def _nrange_invalid_vlan(self, nv_range, n):
vlan = nv_range.split(':')[n]
v_range = ':'.join(nv_range.split(':')[1:])
return self._err_prefix + v_range + (self._err_bad_vlan % vlan)
def _vrange_invalid_vlan(self, v_range_tuple, n):
vlan = v_range_tuple[n - 1]
v_range_str = '%d:%d' % v_range_tuple
return self._err_prefix + v_range_str + (self._err_bad_vlan % vlan)
def _vrange_invalid(self, v_range_tuple):
v_range_str = '%d:%d' % v_range_tuple
return self._err_prefix + v_range_str + self._err_range
class TestVlanNetworkNameValid(base.BaseTestCase):
def parse_vlan_ranges(self, vlan_range):
return plugin_utils.parse_network_vlan_ranges(vlan_range)
def test_validate_provider_phynet_name_mixed(self):
self.assertRaises(n_exc.PhysicalNetworkNameError,
self.parse_vlan_ranges,
['', ':23:30', 'physnet1',
'tenant_net:100:200'])
def test_validate_provider_phynet_name_bad(self):
self.assertRaises(n_exc.PhysicalNetworkNameError,
self.parse_vlan_ranges,
[':1:34'])
class TestVlanRangeVerifyValid(UtilTestParseVlanRanges):
def verify_range(self, vlan_range):
return plugin_utils.verify_vlan_range(vlan_range)
def test_range_valid_ranges(self):
self.assertIsNone(self.verify_range((1, 2)))
self.assertIsNone(self.verify_range((1, 1999)))
self.assertIsNone(self.verify_range((100, 100)))
self.assertIsNone(self.verify_range((100, 200)))
self.assertIsNone(self.verify_range((4001, 4094)))
self.assertIsNone(self.verify_range((1, 4094)))
def check_one_vlan_invalid(self, bad_range, which):
expected_msg = self._vrange_invalid_vlan(bad_range, which)
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.verify_range, bad_range)
self.assertEqual(str(err), expected_msg)
def test_range_first_vlan_invalid_negative(self):
self.check_one_vlan_invalid((-1, 199), 1)
def test_range_first_vlan_invalid_zero(self):
self.check_one_vlan_invalid((0, 199), 1)
def test_range_first_vlan_invalid_limit_plus_one(self):
self.check_one_vlan_invalid((4095, 199), 1)
def test_range_first_vlan_invalid_too_big(self):
self.check_one_vlan_invalid((9999, 199), 1)
def test_range_second_vlan_invalid_negative(self):
self.check_one_vlan_invalid((299, -1), 2)
def test_range_second_vlan_invalid_zero(self):
self.check_one_vlan_invalid((299, 0), 2)
def test_range_second_vlan_invalid_limit_plus_one(self):
self.check_one_vlan_invalid((299, 4095), 2)
def test_range_second_vlan_invalid_too_big(self):
self.check_one_vlan_invalid((299, 9999), 2)
def test_range_both_vlans_invalid_01(self):
self.check_one_vlan_invalid((-1, 0), 1)
def test_range_both_vlans_invalid_02(self):
self.check_one_vlan_invalid((0, 4095), 1)
def test_range_both_vlans_invalid_03(self):
self.check_one_vlan_invalid((4095, 9999), 1)
def test_range_both_vlans_invalid_04(self):
self.check_one_vlan_invalid((9999, -1), 1)
def test_range_reversed(self):
bad_range = (95, 10)
expected_msg = self._vrange_invalid(bad_range)
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.verify_range, bad_range)
self.assertEqual(str(err), expected_msg)
class TestParseOneVlanRange(UtilTestParseVlanRanges):
def parse_one(self, cfg_entry):
return plugin_utils.parse_network_vlan_range(cfg_entry)
def test_parse_one_net_no_vlan_range(self):
config_str = "net1"
expected_networks = ("net1", None)
self.assertEqual(self.parse_one(config_str), expected_networks)
def test_parse_one_net_and_vlan_range(self):
config_str = "net1:100:199"
expected_networks = ("net1", (100, 199))
self.assertEqual(self.parse_one(config_str), expected_networks)
def test_parse_one_net_incomplete_range(self):
config_str = "net1:100"
expected_msg = self._range_too_few_err(config_str)
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_one, config_str)
self.assertEqual(str(err), expected_msg)
def test_parse_one_net_range_too_many(self):
config_str = "net1:100:150:200"
expected_msg = self._range_too_many_err(config_str)
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_one, config_str)
self.assertEqual(str(err), expected_msg)
def test_parse_one_net_vlan1_not_int(self):
config_str = "net1:foo:199"
expected_msg = self._vlan_not_int_err(config_str, 'foo')
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_one, config_str)
self.assertEqual(str(err), expected_msg)
def test_parse_one_net_vlan2_not_int(self):
config_str = "net1:100:bar"
expected_msg = self._vlan_not_int_err(config_str, 'bar')
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_one, config_str)
self.assertEqual(str(err), expected_msg)
def test_parse_one_net_and_max_range(self):
config_str = "net1:1:4094"
expected_networks = ("net1", (1, 4094))
self.assertEqual(self.parse_one(config_str), expected_networks)
def test_parse_one_net_range_bad_vlan1(self):
config_str = "net1:9000:150"
expected_msg = self._nrange_invalid_vlan(config_str, 1)
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_one, config_str)
self.assertEqual(str(err), expected_msg)
def test_parse_one_net_range_bad_vlan2(self):
config_str = "net1:4000:4999"
expected_msg = self._nrange_invalid_vlan(config_str, 2)
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_one, config_str)
self.assertEqual(str(err), expected_msg)
class TestParseVlanRangeList(UtilTestParseVlanRanges):
def parse_list(self, cfg_entries):
return plugin_utils.parse_network_vlan_ranges(cfg_entries)
def test_parse_list_one_net_no_vlan_range(self):
config_list = ["net1"]
expected_networks = {"net1": []}
self.assertEqual(self.parse_list(config_list), expected_networks)
def test_parse_list_one_net_vlan_range(self):
config_list = ["net1:100:199"]
expected_networks = {"net1": [(100, 199)]}
self.assertEqual(self.parse_list(config_list), expected_networks)
def test_parse_two_nets_no_vlan_range(self):
config_list = ["net1",
"net2"]
expected_networks = {"net1": [],
"net2": []}
self.assertEqual(self.parse_list(config_list), expected_networks)
def test_parse_two_nets_range_and_no_range(self):
config_list = ["net1:100:199",
"net2"]
expected_networks = {"net1": [(100, 199)],
"net2": []}
self.assertEqual(self.parse_list(config_list), expected_networks)
def test_parse_two_nets_no_range_and_range(self):
config_list = ["net1",
"net2:200:299"]
expected_networks = {"net1": [],
"net2": [(200, 299)]}
self.assertEqual(self.parse_list(config_list), expected_networks)
def test_parse_two_nets_bad_vlan_range1(self):
config_list = ["net1:100",
"net2:200:299"]
expected_msg = self._range_too_few_err(config_list[0])
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_list, config_list)
self.assertEqual(str(err), expected_msg)
def test_parse_two_nets_vlan_not_int2(self):
config_list = ["net1:100:199",
"net2:200:0x200"]
expected_msg = self._vlan_not_int_err(config_list[1], '0x200')
err = self.assertRaises(n_exc.NetworkVlanRangeError,
self.parse_list, config_list)
self.assertEqual(str(err), expected_msg)
def test_parse_two_nets_and_append_1_2(self):
config_list = ["net1:100:199",
"net1:1000:1099",
"net2:200:299"]
expected_networks = {"net1": [(100, 199),
(1000, 1099)],
"net2": [(200, 299)]}
self.assertEqual(self.parse_list(config_list), expected_networks)
def test_parse_two_nets_and_append_1_3(self):
config_list = ["net1:100:199",
"net2:200:299",
"net1:1000:1099"]
expected_networks = {"net1": [(100, 199),
(1000, 1099)],
"net2": [(200, 299)]}
self.assertEqual(self.parse_list(config_list), expected_networks)
class TestDictUtils(base.BaseTestCase):
def test_dict2str(self):
dic = {"key1": "value1", "key2": "value2", "key3": "value3"}
expected = "key1=value1,key2=value2,key3=value3"
self.assertEqual(utils.dict2str(dic), expected)
def test_str2dict(self):
string = "key1=value1,key2=value2,key3=value3"
expected = {"key1": "value1", "key2": "value2", "key3": "value3"}
self.assertEqual(utils.str2dict(string), expected)
def test_dict_str_conversion(self):
dic = {"key1": "value1", "key2": "value2"}
self.assertEqual(utils.str2dict(utils.dict2str(dic)), dic)
def test_diff_list_of_dict(self):
old_list = [{"key1": "value1"},
{"key2": "value2"},
{"key3": "value3"}]
new_list = [{"key1": "value1"},
{"key2": "value2"},
{"key4": "value4"}]
added, removed = utils.diff_list_of_dict(old_list, new_list)
self.assertEqual(added, [dict(key4="value4")])
self.assertEqual(removed, [dict(key3="value3")])
class _CachingDecorator(object):
def __init__(self):
self.func_retval = 'bar'
self._cache = mock.Mock()
@utils.cache_method_results
def func(self, *args, **kwargs):
return self.func_retval
class TestCachingDecorator(base.BaseTestCase):
def setUp(self):
super(TestCachingDecorator, self).setUp()
self.decor = _CachingDecorator()
self.func_name = '%(module)s._CachingDecorator.func' % {
'module': self.__module__
}
self.not_cached = self.decor.func.func.__self__._not_cached
def test_cache_miss(self):
expected_key = (self.func_name, 1, 2, ('foo', 'bar'))
args = (1, 2)
kwargs = {'foo': 'bar'}
self.decor._cache.get.return_value = self.not_cached
retval = self.decor.func(*args, **kwargs)
self.decor._cache.set.assert_called_once_with(
expected_key, self.decor.func_retval, None)
self.assertEqual(self.decor.func_retval, retval)
def test_cache_hit(self):
expected_key = (self.func_name, 1, 2, ('foo', 'bar'))
args = (1, 2)
kwargs = {'foo': 'bar'}
retval = self.decor.func(*args, **kwargs)
self.assertFalse(self.decor._cache.set.called)
self.assertEqual(self.decor._cache.get.return_value, retval)
self.decor._cache.get.assert_called_once_with(expected_key,
self.not_cached)
def test_get_unhashable(self):
expected_key = (self.func_name, [1], 2)
self.decor._cache.get.side_effect = TypeError
retval = self.decor.func([1], 2)
self.assertFalse(self.decor._cache.set.called)
self.assertEqual(self.decor.func_retval, retval)
self.decor._cache.get.assert_called_once_with(expected_key,
self.not_cached)
def test_missing_cache(self):
delattr(self.decor, '_cache')
self.assertRaises(NotImplementedError, self.decor.func, (1, 2))
def test_no_cache(self):
self.decor._cache = False
retval = self.decor.func((1, 2))
self.assertEqual(self.decor.func_retval, retval)
class TestDict2Tuples(base.BaseTestCase):
def test_dict(self):
input_dict = {'foo': 'bar', 42: 'baz', 'aaa': 'zzz'}
expected = ((42, 'baz'), ('aaa', 'zzz'), ('foo', 'bar'))
output_tuple = utils.dict2tuple(input_dict)
self.assertEqual(expected, output_tuple)
class TestExceptionLogger(base.BaseTestCase):
def test_normal_call(self):
result = "Result"
@utils.exception_logger()
def func():
return result
self.assertEqual(result, func())
def test_raise(self):
result = "Result"
@utils.exception_logger()
def func():
raise RuntimeError(result)
self.assertRaises(RuntimeError, func)
def test_spawn_normal(self):
result = "Result"
logger = mock.Mock()
@utils.exception_logger(logger=logger)
def func():
return result
gt = eventlet.spawn(func)
self.assertEqual(result, gt.wait())
self.assertFalse(logger.called)
def test_spawn_raise(self):
result = "Result"
logger = mock.Mock()
@utils.exception_logger(logger=logger)
def func():
raise RuntimeError(result)
gt = eventlet.spawn(func)
self.assertRaises(RuntimeError, gt.wait)
self.assertTrue(logger.called)
def test_pool_spawn_normal(self):
logger = mock.Mock()
calls = mock.Mock()
@utils.exception_logger(logger=logger)
def func(i):
calls(i)
pool = eventlet.GreenPool(4)
for i in range(0, 4):
pool.spawn(func, i)
pool.waitall()
calls.assert_has_calls([mock.call(0), mock.call(1),
mock.call(2), mock.call(3)],
any_order=True)
self.assertFalse(logger.called)
def test_pool_spawn_raise(self):
logger = mock.Mock()
calls = mock.Mock()
@utils.exception_logger(logger=logger)
def func(i):
if i == 2:
raise RuntimeError(2)
else:
calls(i)
pool = eventlet.GreenPool(4)
for i in range(0, 4):
pool.spawn(func, i)
pool.waitall()
calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(3)],
any_order=True)
self.assertTrue(logger.called)
class TestDvrServices(base.BaseTestCase):
def _test_is_dvr_serviced(self, device_owner, expected):
self.assertEqual(expected, utils.is_dvr_serviced(device_owner))
def test_is_dvr_serviced_with_lb_port(self):
self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCER, True)
def test_is_dvr_serviced_with_dhcp_port(self):
self._test_is_dvr_serviced(constants.DEVICE_OWNER_DHCP, True)
def test_is_dvr_serviced_with_vm_port(self):
self._test_is_dvr_serviced('compute:', True)
class TestIpToCidr(base.BaseTestCase):
def test_ip_to_cidr_ipv4_default(self):
self.assertEqual('15.1.2.3/32', utils.ip_to_cidr('15.1.2.3'))
def test_ip_to_cidr_ipv4_prefix(self):
self.assertEqual('15.1.2.3/24', utils.ip_to_cidr('15.1.2.3', 24))
def test_ip_to_cidr_ipv4_netaddr(self):
ip_address = netaddr.IPAddress('15.1.2.3')
self.assertEqual('15.1.2.3/32', utils.ip_to_cidr(ip_address))
def test_ip_to_cidr_ipv4_bad_prefix(self):
self.assertRaises(netaddr.core.AddrFormatError,
utils.ip_to_cidr, '15.1.2.3', 33)
def test_ip_to_cidr_ipv6_default(self):
self.assertEqual('::1/128', utils.ip_to_cidr('::1'))
def test_ip_to_cidr_ipv6_prefix(self):
self.assertEqual('::1/64', utils.ip_to_cidr('::1', 64))
def test_ip_to_cidr_ipv6_bad_prefix(self):
self.assertRaises(netaddr.core.AddrFormatError,
utils.ip_to_cidr, '2000::1', 129)
class TestCidrIsHost(base.BaseTestCase):
def test_is_cidr_host_ipv4(self):
self.assertTrue(utils.is_cidr_host('15.1.2.3/32'))
def test_is_cidr_host_ipv4_not_cidr(self):
self.assertRaises(ValueError,
utils.is_cidr_host,
'15.1.2.3')
def test_is_cidr_host_ipv6(self):
self.assertTrue(utils.is_cidr_host('2000::1/128'))
def test_is_cidr_host_ipv6_netaddr(self):
net = netaddr.IPNetwork("2000::1")
self.assertTrue(utils.is_cidr_host(net))
def test_is_cidr_host_ipv6_32(self):
self.assertFalse(utils.is_cidr_host('2000::1/32'))
def test_is_cidr_host_ipv6_not_cidr(self):
self.assertRaises(ValueError,
utils.is_cidr_host,
'2000::1')
def test_is_cidr_host_ipv6_not_cidr_netaddr(self):
ip_address = netaddr.IPAddress("2000::3")
self.assertRaises(ValueError,
utils.is_cidr_host,
ip_address)
class TestIpVersionFromInt(base.BaseTestCase):
def test_ip_version_from_int_ipv4(self):
self.assertEqual(utils.ip_version_from_int(4),
constants.IPv4)
def test_ip_version_from_int_ipv6(self):
self.assertEqual(utils.ip_version_from_int(6),
constants.IPv6)
def test_ip_version_from_int_illegal_int(self):
self.assertRaises(ValueError,
utils.ip_version_from_int,
8)
class TestDelayedStringRenderer(base.BaseTestCase):
def test_call_deferred_until_str(self):
my_func = mock.MagicMock(return_value='Brie cheese!')
delayed = utils.DelayedStringRenderer(my_func, 1, 2, key_arg=44)
self.assertFalse(my_func.called)
string = "Type: %s" % delayed
my_func.assert_called_once_with(1, 2, key_arg=44)
self.assertEqual("Type: Brie cheese!", string)
def test_not_called_with_low_log_level(self):
LOG = logging.getLogger(__name__)
# make sure we return logging to previous level
current_log_level = LOG.logger.getEffectiveLevel()
self.addCleanup(LOG.logger.setLevel, current_log_level)
my_func = mock.MagicMock()
delayed = utils.DelayedStringRenderer(my_func)
# set to warning so we shouldn't be logging debug messages
LOG.logger.setLevel(logging.logging.WARNING)
LOG.debug("Hello %s", delayed)
self.assertFalse(my_func.called)
# but it should be called with the debug level
LOG.logger.setLevel(logging.logging.DEBUG)
LOG.debug("Hello %s", delayed)
self.assertTrue(my_func.called)
class TestEnsureDir(base.BaseTestCase):
@mock.patch('os.makedirs')
def test_ensure_dir_no_fail_if_exists(self, makedirs):
error = OSError()
error.errno = errno.EEXIST
makedirs.side_effect = error
utils.ensure_dir("/etc/create/concurrently")
@mock.patch('os.makedirs')
def test_ensure_dir_calls_makedirs(self, makedirs):
utils.ensure_dir("/etc/create/directory")
makedirs.assert_called_once_with("/etc/create/directory", 0o755)
| |
#!/usr/bin/env python
"""System cron flows tests."""
import time
# pylint: disable=unused-import, g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import, g-bad-import-order
from grr.endtoend_tests import base
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.flows.cron import system
from grr.lib.flows.general import endtoend_test
from grr.test_data import client_fixture
class SystemCronFlowTest(test_lib.FlowTestsBaseclass):
"""Test system cron flows."""
def setUp(self):
super(SystemCronFlowTest, self).setUp()
# Mock today's time to be 8 days after the fixture date.
self.old_time = time.time
self.now = test_lib.FIXTURE_TIME + 8 * 60 * 60 * 24
time.time = lambda: self.now
# We are only interested in the client object (path = "/" in client VFS)
fixture = test_lib.FilterFixture(regex="^/$")
# Make 10 windows clients
for i in range(0, 10):
test_lib.ClientFixture("C.0%015X" % i, token=self.token, fixture=fixture)
# Make 10 linux clients 12 hours apart.
for i in range(0, 10):
test_lib.ClientFixture("C.1%015X" % i, token=self.token,
fixture=client_fixture.LINUX_FIXTURE)
def tearDown(self):
time.time = self.old_time
def testGRRVersionBreakDown(self):
"""Check that all client stats cron jobs are run."""
for _ in test_lib.TestFlowHelper("GRRVersionBreakDown", token=self.token):
pass
fd = aff4.FACTORY.Open("aff4:/stats/ClientFleetStats", token=self.token)
histogram = fd.Get(fd.Schema.GRRVERSION_HISTOGRAM)
# There should be 0 instances in 1 day actives.
self.assertEqual(histogram[0].title, "1 day actives")
self.assertEqual(len(histogram[0]), 0)
# There should be 0 instances in 7 day actives.
self.assertEqual(histogram[1].title, "7 day actives")
self.assertEqual(len(histogram[1]), 0)
# There should be 10 of each (Linux, Windows) instances in 14 day actives.
self.assertEqual(histogram[2].title, "14 day actives")
self.assertEqual(histogram[2][0].label, "GRR Monitor 1")
self.assertEqual(histogram[2][0].y_value, 20)
# There should be 10 of each (Linux, Windows) instances in 30 day actives.
self.assertEqual(histogram[3].title, "30 day actives")
self.assertEqual(histogram[3][0].label, "GRR Monitor 1")
self.assertEqual(histogram[3][0].y_value, 20)
def testOSBreakdown(self):
"""Check that all client stats cron jobs are run."""
for _ in test_lib.TestFlowHelper("OSBreakDown", token=self.token):
pass
fd = aff4.FACTORY.Open("aff4:/stats/ClientFleetStats", token=self.token)
histogram = fd.Get(fd.Schema.OS_HISTOGRAM)
# There should be a 0 instances in 1 day actives.
self.assertEqual(histogram[0].title, "1 day actives")
self.assertEqual(len(histogram[0]), 0)
# There should be a 0 instances in 7 day actives.
self.assertEqual(histogram[1].title, "7 day actives")
self.assertEqual(len(histogram[1]), 0)
# There should be 10 of each (Linux, Windows) instances in 14 day actives.
self.assertEqual(histogram[2].title, "14 day actives")
self.assertEqual(histogram[2][0].label, "Linux")
self.assertEqual(histogram[2][0].y_value, 10)
self.assertEqual(histogram[2][1].label, "Windows")
self.assertEqual(histogram[2][1].y_value, 10)
# There should be 10 of each (Linux, Windows) instances in 30 day actives.
self.assertEqual(histogram[3].title, "30 day actives")
self.assertEqual(histogram[3][0].label, "Linux")
self.assertEqual(histogram[3][0].y_value, 10)
self.assertEqual(histogram[3][1].label, "Windows")
self.assertEqual(histogram[3][1].y_value, 10)
def testLastAccessStats(self):
"""Check that all client stats cron jobs are run."""
for _ in test_lib.TestFlowHelper("LastAccessStats", token=self.token):
pass
fd = aff4.FACTORY.Open("aff4:/stats/ClientFleetStats", token=self.token)
histogram = fd.Get(fd.Schema.LAST_CONTACTED_HISTOGRAM)
data = [(x.x_value, x.y_value) for x in histogram]
self.assertEqual(data, [
(86400000000L, 0L),
(172800000000L, 0L),
(259200000000L, 0L),
(604800000000L, 0L),
# All our clients appeared at the same time (and did not appear since).
(1209600000000L, 20L),
(2592000000000L, 20L),
(5184000000000L, 20L)])
def testPurgeClientStats(self):
max_age = system.PurgeClientStats.MAX_AGE
for t in [1 * max_age, 1.5 * max_age, 2 * max_age]:
with test_lib.FakeTime(t):
urn = self.client_id.Add("stats")
stats_fd = aff4.FACTORY.Create(urn, "ClientStats", token=self.token,
mode="rw")
st = rdfvalue.ClientStats(RSS_size=int(t))
stats_fd.AddAttribute(stats_fd.Schema.STATS(st))
stats_fd.Close()
stat_obj = aff4.FACTORY.Open(
urn, age=aff4.ALL_TIMES, token=self.token, ignore_cache=True)
stat_entries = list(stat_obj.GetValuesForAttribute(stat_obj.Schema.STATS))
self.assertEqual(len(stat_entries), 3)
self.assertTrue(max_age in [e.RSS_size for e in stat_entries])
with test_lib.FakeTime(2.5 * max_age):
for _ in test_lib.TestFlowHelper(
"PurgeClientStats", None, client_id=self.client_id, token=self.token):
pass
stat_obj = aff4.FACTORY.Open(
urn, age=aff4.ALL_TIMES, token=self.token, ignore_cache=True)
stat_entries = list(stat_obj.GetValuesForAttribute(stat_obj.Schema.STATS))
self.assertEqual(len(stat_entries), 1)
self.assertTrue(max_age not in [e.RSS_size for e in stat_entries])
def _SetSummaries(self, client_id):
client = aff4.FACTORY.Create(client_id, "VFSGRRClient", mode="rw",
token=self.token)
client.Set(client.Schema.HOSTNAME(client_id))
client.Set(client.Schema.SYSTEM("Darwin"))
client.Set(client.Schema.OS_RELEASE("OSX"))
client.Set(client.Schema.OS_VERSION("10.9.2"))
client.Set(client.Schema.KERNEL("13.1.0"))
client.Set(client.Schema.FQDN("%s.example.com" % client_id))
client.Set(client.Schema.ARCH("AMD64"))
client.Flush()
def testEndToEndTests(self):
self.client_ids = ["aff4:/C.6000000000000000",
"aff4:/C.6000000000000001",
"aff4:/C.6000000000000002"]
for clientid in self.client_ids:
self._SetSummaries(clientid)
self.client_mock = action_mocks.ActionMock("ListDirectory", "StatFile")
config_lib.CONFIG.Set("Test.end_to_end_client_ids", self.client_ids)
with utils.MultiStubber((base.AutomatedTest, "classes",
{"MockEndToEndTest":
endtoend_test.MockEndToEndTest}),
(system.EndToEndTests, "lifetime", 0)):
# The test harness doesn't understand the callstate at a later time that
# this flow is doing, so we need to disable check_flow_errors.
for _ in test_lib.TestFlowHelper("EndToEndTests", self.client_mock,
client_id=self.client_id,
check_flow_errors=False,
token=self.token):
pass
hunt_ids = list(aff4.FACTORY.Open("aff4:/hunts",
token=self.token).ListChildren())
# We have only created one hunt, and we should have started with clean aff4
# space.
self.assertEqual(len(hunt_ids), 1)
hunt_obj = aff4.FACTORY.Open(hunt_ids[0], token=self.token,
age=aff4.ALL_TIMES)
self.assertItemsEqual(sorted(hunt_obj.GetClients()),
sorted(self.client_ids))
def _CreateResult(self, success, clientid):
success = rdfvalue.EndToEndTestResult(success=success)
return rdfvalue.GrrMessage(source=clientid,
payload=success)
def testEndToEndTestsResultChecking(self):
self.client_ids = ["aff4:/C.6000000000000000",
"aff4:/C.6000000000000001",
"aff4:/C.6000000000000002"]
for clientid in self.client_ids:
self._SetSummaries(clientid)
self.client_mock = action_mocks.ActionMock("ListDirectory", "StatFile")
endtoend = system.EndToEndTests(None, token=self.token)
endtoend.state.Register("hunt_id", "aff4:/temphuntid")
endtoend.state.Register("client_ids", set(self.client_ids))
endtoend.state.Register("client_ids_failures", set())
endtoend.state.Register("client_ids_result_reported", set())
# No results at all
self.assertRaises(flow.FlowError, endtoend._CheckForSuccess, [])
# Not enough client results
endtoend.state.Register("client_ids_failures", set())
endtoend.state.Register("client_ids_result_reported", set())
self.assertRaises(flow.FlowError,
endtoend._CheckForSuccess,
[self._CreateResult(True, "aff4:/C.6000000000000001")])
# All clients succeeded
endtoend.state.Register("client_ids_failures", set())
endtoend.state.Register("client_ids_result_reported", set())
endtoend._CheckForSuccess(
[self._CreateResult(True, "aff4:/C.6000000000000000"),
self._CreateResult(True, "aff4:/C.6000000000000001"),
self._CreateResult(True, "aff4:/C.6000000000000002")])
# All clients complete, but some failures
endtoend.state.Register("client_ids_failures", set())
endtoend.state.Register("client_ids_result_reported", set())
self.assertRaises(flow.FlowError,
endtoend._CheckForSuccess,
[self._CreateResult(True, "aff4:/C.6000000000000000"),
self._CreateResult(False, "aff4:/C.6000000000000001"),
self._CreateResult(False, "aff4:/C.6000000000000002")])
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import operator
import warnings
import numpy as np
from pandas._libs import NaT, algos, iNaT, lib
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ, IncompatibleFrequency, Period)
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import (
RoundTo, maybe_integer_op_deprecated, round_nsint64)
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import (
AbstractMethodError, NullFrequencyError, PerformanceWarning)
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal,
is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like,
is_object_dtype, is_offsetlike, is_period_dtype, is_string_dtype,
is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import isna
from pandas.core import missing, nanops
from pandas.core.algorithms import (
checked_add_with_arr, take, unique1d, value_counts)
import pandas.core.common as com
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
from .base import ExtensionArray, ExtensionOpsMixin
class AttributesMixin(object):
@property
def _attributes(self):
# Inheriting subclass should implement _attributes as a list of strings
raise AbstractMethodError(self)
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
def _get_attributes_dict(self):
"""
return an attributes dict for my class
"""
return {k: getattr(self, k, None) for k in self._attributes}
@property
def _scalar_type(self):
# type: () -> Union[type, Tuple[type]]
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(self, value):
# type: (str) -> Union[Period, Timestamp, Timedelta, NaTType]
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value):
# type: (Union[Period, Timestamp, Timedelta, NaTType]) -> int
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(self, other):
# type: (Union[Period, Timestamp, Timedelta, NaTType]) -> None
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps(object):
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior")
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
Index
Index of formatted strings
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
from pandas import Index
return Index(self._format_native_types(date_format=date_format))
class TimelikeOps(object):
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = (
"""
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta,
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
""")
_round_example = (
""">>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
""")
_floor_example = (
""">>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
)
_ceil_example = (
""">>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
)
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
values = _ensure_datetimelike_to_i8(self)
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
dtype = self.dtype
if is_datetime64tz_dtype(self):
dtype = None
return self._ensure_localized(
self._simple_new(result, dtype=dtype), ambiguous, nonexistent
)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous='raise', nonexistent='raise'):
return self._round(
freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent
)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous='raise', nonexistent='raise'):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous='raise', nonexistent='raise'):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin,
AttributesMixin,
ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self):
# type: () -> ndarray
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view('i8')
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep='NaT', date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None):
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def shape(self):
return (len(self),)
@property
def size(self):
# type: () -> int
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError("only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices")
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
return self._box_func(val)
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key, # type: Union[int, Sequence[int], Sequence[bool], slice]
value, # type: Union[NaTType, Scalar, Sequence[Scalar]]
):
# type: (...) -> None
# I'm fudging the types a bit here. The "Scalar" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if (not is_slice
and len(key) != len(value)
and not com.is_bool_indexer(key)):
msg = ("shape mismatch: value array of length '{}' does not "
"match indexing result of length '{}'.")
raise ValueError(msg.format(len(key), len(value)))
if not is_slice and len(key) == 0:
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value)
value = self._unbox_scalar(value)
elif isna(value) or value == iNaT:
value = iNaT
else:
msg = (
"'value' should be a '{scalar}', 'NaT', or array of those. "
"Got '{typ}' instead."
)
raise TypeError(msg.format(scalar=self._scalar_type.__name__,
typ=type(value).__name__))
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (is_datetime_or_timedelta_dtype(dtype) and
not is_dtype_equal(self.dtype, dtype)) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
"""
New view on this array with the same data.
Parameters
----------
dtype : numpy dtype, optional
Returns
-------
ndarray
With the specified `dtype`.
"""
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
raise AbstractMethodError(self)
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(self.asi8,
indices,
allow_fill=allow_fill,
fill_value=fill_value)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self, deep=False):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, compat.string_types):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self)))
or isna(value)):
raise ValueError("Unexpected type for 'value': {valtype}"
.format(valtype=type(value)))
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view('i8'), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(cls(result.index.view('i8'), dtype=self.dtype),
name=result.index.name)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return (self.asi8 == iNaT)
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : string/dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError("Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self)))
value = value[mask]
if mask.any():
if method is not None:
if method == 'pad':
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit,
mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
@classmethod
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(start=index[0], end=None,
periods=len(index), freq=freq,
**kwargs)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError('Inferred frequency {infer} from passed values '
'does not conform to passed frequency {passed}'
.format(infer=inferred, passed=freq.freqstr))
# monotonicity/uniqueness properties are called via frequencies.infer_freq,
# see GH#23789
@property
def _is_monotonic_increasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[0]
@property
def _is_monotonic_decreasing(self):
return algos.is_monotonic(self.asi8, timelike=True)[1]
@property
def _is_unique(self):
return len(unique1d(self.asi8)) == len(self)
# ------------------------------------------------------------------
# Arithmetic Methods
def _add_datetimelike_scalar(self, other):
# Overriden by TimedeltaArray
raise TypeError("cannot add {cls} and {typ}"
.format(cls=type(self).__name__,
typ=type(other).__name__))
_add_datetime_arraylike = _add_datetimelike_scalar
def _sub_datetimelike_scalar(self, other):
# Overridden by DatetimeArray
assert other is not NaT
raise TypeError("cannot subtract a datelike from a {cls}"
.format(cls=type(self).__name__))
_sub_datetime_arraylike = _sub_datetimelike_scalar
def _sub_period(self, other):
# Overriden by PeriodArray
raise TypeError("cannot subtract Period from a {cls}"
.format(cls=type(self).__name__))
def _add_offset(self, offset):
raise AbstractMethodError(self)
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(len(self), dtype='i8')
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
new_values = self._maybe_mask_results(new_values)
return new_values.view('i8')
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas import TimedeltaIndex
other = TimedeltaIndex(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('i8')
def _add_nat(self):
"""
Add pd.NaT to self
"""
if is_period_dtype(self):
raise TypeError('Cannot add {cls} and {typ}'
.format(cls=type(self).__name__,
typ=type(NaT).__name__))
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
def _sub_nat(self):
"""
Subtract pd.NaT from self
"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return result.view('timedelta64[ns]')
def _sub_period_array(self, other):
"""
Subtract a Period Array/Index from self. This is only valid if self
is itself a Period Array/Index, raises otherwise. Both objects must
have the same frequency.
Parameters
----------
other : PeriodIndex or PeriodArray
Returns
-------
result : np.ndarray[object]
Array of DateOffset objects; nulls represented by NaT
"""
if not is_period_dtype(self):
raise TypeError("cannot subtract {dtype}-dtype from {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
if len(self) != len(other):
raise ValueError("cannot subtract arrays/indices of "
"unequal length")
if self.freq != other.freq:
msg = DIFFERENT_FREQ.format(cls=type(self).__name__,
own_freq=self.freqstr,
other_freq=other.freqstr)
raise IncompatibleFrequency(msg)
new_values = checked_add_with_arr(self.asi8, -other.asi8,
arr_mask=self._isnan,
b_mask=other._isnan)
new_values = np.array([self.freq.base * x for x in new_values])
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = NaT
return new_values
def _addsub_int_array(self, other, op):
"""
Add or subtract array-like of integers equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : Index, ExtensionArray, np.ndarray
integer-dtype
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
# _addsub_int_array is overriden by PeriodArray
assert not is_period_dtype(self)
assert op in [operator.add, operator.sub]
if self.freq is None:
# GH#19123
raise NullFrequencyError("Cannot shift with no freq")
elif isinstance(self.freq, Tick):
# easy case where we can convert to timedelta64 operation
td = Timedelta(self.freq)
return op(self, td * other)
# We should only get here with DatetimeIndex; dispatch
# to _addsub_offset_array
assert not is_timedelta64_dtype(self)
return op(self, np.array(other) * self.freq)
def _addsub_offset_array(self, other, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : Index, np.ndarray
object-dtype containing pd.DateOffset objects
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn("Adding/subtracting array of DateOffsets to "
"{cls} not vectorized"
.format(cls=type(self).__name__), PerformanceWarning)
# For EA self.astype('O') returns a numpy array, not an Index
left = lib.values_from_object(self.astype('O'))
res_values = op(left, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs['freq'] = 'infer'
return self._from_sequence(res_values, **kwargs)
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or string
Frequency increment to shift by.
"""
if freq is not None and freq != self.freq:
if isinstance(freq, compat.string_types):
freq = frequencies.to_offset(freq)
offset = periods * freq
result = self + offset
return result
if periods == 0:
# immutable so OK
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None,
freq=self.freq)
def __add__(self, other):
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
# scalar others
elif other is NaT:
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._add_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
maybe_integer_op_deprecated(self)
result = self._time_shift(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
result = self._addsub_offset_array(other, operator.add)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
return self._add_datetime_arraylike(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
maybe_integer_op_deprecated(self)
result = self._addsub_int_array(other, operator.add)
elif is_float_dtype(other):
# Explicitly catch invalid dtypes
raise TypeError("cannot add {dtype}-dtype to {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
elif is_period_dtype(other):
# if self is a TimedeltaArray and other is a PeriodArray with
# a timedelta-like (i.e. Tick) freq, this operation is valid.
# Defer to the PeriodArray implementation.
# In remaining cases, this will end up raising TypeError.
return NotImplemented
elif is_extension_array_dtype(other):
# Categorical op will raise; defer explicitly
return NotImplemented
else: # pragma: no cover
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
# TODO: infer freq?
return TimedeltaArray(result)
return result
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
def __sub__(self, other):
other = lib.item_from_zerodim(other)
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
# scalar others
elif other is NaT:
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_delta(-other)
elif isinstance(other, DateOffset):
# specifically _not_ a Tick
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
result = self._sub_datetimelike_scalar(other)
elif lib.is_integer(other):
# This check must come after the check for np.timedelta64
# as is_integer returns True for these
if not is_period_dtype(self):
maybe_integer_op_deprecated(self)
result = self._time_shift(-other)
elif isinstance(other, Period):
result = self._sub_period(other)
# array-like others
elif is_timedelta64_dtype(other):
# TimedeltaIndex, ndarray[timedelta64]
result = self._add_delta(-other)
elif is_offsetlike(other):
# Array/Index of DateOffset objects
result = self._addsub_offset_array(other, operator.sub)
elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other):
# DatetimeIndex, ndarray[datetime64]
result = self._sub_datetime_arraylike(other)
elif is_period_dtype(other):
# PeriodIndex
result = self._sub_period_array(other)
elif is_integer_dtype(other):
if not is_period_dtype(self):
maybe_integer_op_deprecated(self)
result = self._addsub_int_array(other, operator.sub)
elif isinstance(other, ABCIndexClass):
raise TypeError("cannot subtract {cls} and {typ}"
.format(cls=type(self).__name__,
typ=type(other).__name__))
elif is_float_dtype(other):
# Explicitly catch invalid dtypes
raise TypeError("cannot subtract {dtype}-dtype from {cls}"
.format(dtype=other.dtype,
cls=type(self).__name__))
elif is_extension_array_dtype(other):
# Categorical op will raise; defer explicitly
return NotImplemented
else: # pragma: no cover
return NotImplemented
if is_timedelta64_dtype(result) and isinstance(result, np.ndarray):
from pandas.core.arrays import TimedeltaArray
# TODO: infer freq?
return TimedeltaArray(result)
return result
def __rsub__(self, other):
if is_datetime64_dtype(other) and is_timedelta64_dtype(self):
# ndarray[datetime64] cannot be subtracted from self, so
# we need to wrap in DatetimeArray/Index and flip the operation
if not isinstance(other, DatetimeLikeArrayMixin):
# Avoid down-casting DatetimeIndex
from pandas.core.arrays import DatetimeArray
other = DatetimeArray(other)
return other - self
elif (is_datetime64_any_dtype(self) and hasattr(other, 'dtype') and
not is_datetime64_any_dtype(other)):
# GH#19959 datetime - datetime is well-defined as timedelta,
# but any other type - datetime is not well-defined.
raise TypeError("cannot subtract {cls} from {typ}"
.format(cls=type(self).__name__,
typ=type(other).__name__))
elif is_period_dtype(self) and is_timedelta64_dtype(other):
# TODO: Can we simplify/generalize these cases at all?
raise TypeError("cannot subtract {cls} from {dtype}"
.format(cls=type(self).__name__,
dtype=other.dtype))
return -(self - other)
# FIXME: DTA/TDA/PA inplace methods should actually be inplace, GH#24115
def __iadd__(self, other):
# alias for __add__
return self.__add__(other)
def __isub__(self, other):
# alias for __sub__
return self.__sub__(other)
# --------------------------------------------------------------
# Comparison Methods
def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise',
from_utc=False):
"""
Ensure that we are re-localized.
This is for compat as we can then call this on all datetimelike
arrays generally (ignored for Period/Timedelta)
Parameters
----------
arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray]
ambiguous : str, bool, or bool-ndarray, default 'raise'
nonexistent : str, default 'raise'
from_utc : bool, default False
If True, localize the i8 ndarray to UTC first before converting to
the appropriate tz. If False, localize directly to the tz.
Returns
-------
localized array
"""
# reconvert to local tz
tz = getattr(self, 'tz', None)
if tz is not None:
if not isinstance(arg, type(self)):
arg = self._simple_new(arg)
if from_utc:
arg = arg.tz_localize('UTC').tz_convert(self.tz)
else:
arg = arg.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return arg
# --------------------------------------------------------------
# Reductions
def _reduce(self, name, axis=0, skipna=True, **kwargs):
op = getattr(self, name, None)
if op:
return op(axis=axis, skipna=skipna, **kwargs)
else:
return super(DatetimeLikeArrayMixin, self)._reduce(
name, skipna, **kwargs
)
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Array or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna())
if isna(result):
# Period._from_ordinal does not handle np.nan gracefully
return NaT
return self._box_func(result)
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
mask = self.isna()
if skipna:
values = self[~mask].asi8
elif mask.any():
return NaT
else:
values = self.asi8
if not len(values):
# short-circut for empty max / min
return NaT
result = nanops.nanmax(values, skipna=skipna)
# Don't have to worry about NA `result`, since no NA went in.
return self._box_func(result)
# -------------------------------------------------------------------
# Shared Constructor Helpers
def validate_periods(periods):
"""
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
constructor, cast it to an integer.
Parameters
----------
periods : None, float, int
Returns
-------
periods : None or int
Raises
------
TypeError
if periods is None, float, or int
"""
if periods is not None:
if lib.is_float(periods):
periods = int(periods)
elif not lib.is_integer(periods):
raise TypeError('periods must be a number, got {periods}'
.format(periods=periods))
return periods
def validate_endpoints(closed):
"""
Check that the `closed` argument is among [None, "left", "right"]
Parameters
----------
closed : {None, "left", "right"}
Returns
-------
left_closed : bool
right_closed : bool
Raises
------
ValueError : if argument is not among valid values
"""
left_closed = False
right_closed = False
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
return left_closed, right_closed
def validate_inferred_freq(freq, inferred_freq, freq_infer):
"""
If the user passes a freq and another freq is inferred from passed data,
require that they match.
Parameters
----------
freq : DateOffset or None
inferred_freq : DateOffset or None
freq_infer : bool
Returns
-------
freq : DateOffset or None
freq_infer : bool
Notes
-----
We assume at this point that `maybe_infer_freq` has been called, so
`freq` is either a DateOffset object or None.
"""
if inferred_freq is not None:
if freq is not None and freq != inferred_freq:
raise ValueError('Inferred frequency {inferred} from passed '
'values does not conform to passed frequency '
'{passed}'
.format(inferred=inferred_freq,
passed=freq.freqstr))
elif freq is None:
freq = inferred_freq
freq_infer = False
return freq, freq_infer
def maybe_infer_freq(freq):
"""
Comparing a DateOffset to the string "infer" raises, so we need to
be careful about comparisons. Make a dummy variable `freq_infer` to
signify the case where the given freq is "infer" and set freq to None
to avoid comparison trouble later on.
Parameters
----------
freq : {DateOffset, None, str}
Returns
-------
freq : {DateOffset, None}
freq_infer : bool
"""
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = frequencies.to_offset(freq)
else:
freq_infer = True
freq = None
return freq, freq_infer
def _ensure_datetimelike_to_i8(other, to_utc=False):
"""
Helper for coercing an input scalar or array to i8.
Parameters
----------
other : 1d array
to_utc : bool, default False
If True, convert the values to UTC before extracting the i8 values
If False, extract the i8 values directly.
Returns
-------
i8 1d array
"""
from pandas import Index
from pandas.core.arrays import PeriodArray
if lib.is_scalar(other) and isna(other):
return iNaT
elif isinstance(other, (PeriodArray, ABCIndexClass,
DatetimeLikeArrayMixin)):
# convert tz if needed
if getattr(other, 'tz', None) is not None:
if to_utc:
other = other.tz_convert('UTC')
else:
other = other.tz_localize(None)
else:
try:
return np.array(other, copy=False).view('i8')
except TypeError:
# period array cannot be coerced to int
other = Index(other)
return other.asi8
| |
from mock import patch
from random import choice, randint
from django.apps import apps
from django.test import TestCase
from corehq.apps.accounting.tests.generator import init_default_currency
from corehq.apps.sms.models import SMS, SQLMobileBackend
from corehq.apps.smsbillables.management.commands.bootstrap_usage_fees import bootstrap_usage_fees
from corehq.apps.smsbillables.models import (
add_twilio_gateway_fee,
SmsBillable,
SmsGatewayFee,
SmsGatewayFeeCriteria,
SmsUsageFee,
SmsUsageFeeCriteria
)
from corehq.apps.smsbillables.tests import generator
from corehq.apps.smsbillables.tests.utils import FakeTwilioMessageFactory
from corehq.messaging.smsbackends.twilio.models import SQLTwilioBackend
class TestGatewayFee(TestCase):
def setUp(self):
super(TestGatewayFee, self).setUp()
self.currency_usd = init_default_currency()
self.backend_ids = generator.arbitrary_backend_ids()
self.message_logs = generator.arbitrary_messages_by_backend_and_direction(self.backend_ids)
self.least_specific_fees = generator.arbitrary_fees_by_direction_and_backend()
self.country_code_fees = generator.arbitrary_fees_by_country()
self.instance_fees = generator.arbitrary_fees_by_backend_instance(self.backend_ids)
self.most_specific_fees = generator.arbitrary_fees_by_all(self.backend_ids)
self.country_code_and_prefixes = generator.arbitrary_country_code_and_prefixes(3, 3)
self.prefix_fees = generator.arbitrary_fees_by_prefix(self.backend_ids, self.country_code_and_prefixes)
self.other_currency = generator.arbitrary_currency()
# Must remove existing data populated in migrations
SmsGatewayFee.objects.all().delete()
SmsGatewayFeeCriteria.objects.all().delete()
def create_least_specific_gateway_fees(self):
for direction, fees in self.least_specific_fees.items():
for backend_api_id, amount in fees.items():
SmsGatewayFee.create_new(backend_api_id, direction, amount)
def create_other_currency_fees(self):
for direction, fees in self.least_specific_fees.items():
for backend_api_id, amount in fees.items():
SmsGatewayFee.create_new(backend_api_id, direction, amount, currency=self.other_currency)
def create_country_code_gateway_fees(self):
for direction, backend in self.country_code_fees.items():
for backend_api_id, country in backend.items():
for country_code, amount in country.items():
SmsGatewayFee.create_new(backend_api_id, direction, amount, country_code=country_code)
def create_instance_gateway_fees(self):
for direction, backend in self.instance_fees.items():
for backend_api_id, (backend_instance, amount) in backend.items():
SmsGatewayFee.create_new(backend_api_id, direction, amount, backend_instance=backend_instance)
def create_most_specific_gateway_fees(self):
for direction, backend in self.most_specific_fees.items():
for backend_api_id, country in backend.items():
for country_code, (backend_instance, amount) in country.items():
SmsGatewayFee.create_new(backend_api_id, direction, amount,
country_code=country_code, backend_instance=backend_instance)
def create_prefix_gateway_fees(self):
for direction, backend in self.prefix_fees.items():
for backend_api_id, country in backend.items():
for country_code, prfx in country.items():
for prefix, backend_instance_and_amount in prfx.items():
for backend_instance, amount in backend_instance_and_amount.items():
SmsGatewayFee.create_new(
backend_api_id,
direction,
amount,
country_code=country_code,
prefix=prefix,
backend_instance=backend_instance,
)
def test_least_specific_fees(self):
self.create_least_specific_gateway_fees()
for msg_log in self.message_logs:
billable = SmsBillable.create(msg_log)
self.assertIsNotNone(billable)
self.assertEqual(
billable.gateway_charge,
self.least_specific_fees[billable.direction][billable.gateway_fee.criteria.backend_api_id]
)
def test_multipart_gateway_charge(self):
self.create_least_specific_gateway_fees()
for msg_log in self.message_logs:
multipart_count = randint(1, 10)
billable = SmsBillable.create(msg_log, multipart_count=multipart_count)
self.assertIsNotNone(billable)
self.assertEqual(
billable.gateway_charge,
self.least_specific_fees
[billable.direction]
[billable.gateway_fee.criteria.backend_api_id] * multipart_count
)
def test_other_currency_fees(self):
self.create_other_currency_fees()
for msg_log in self.message_logs:
billable = SmsBillable.create(msg_log)
self.assertIsNotNone(billable)
self.assertEqual(
billable.gateway_charge,
self.least_specific_fees[billable.direction][billable.gateway_fee.criteria.backend_api_id]
/ self.other_currency.rate_to_default
)
def test_country_code_fees(self):
self.create_least_specific_gateway_fees()
self.create_country_code_gateway_fees()
phone_numbers = [generator.arbitrary_phone_number() for i in range(10)]
for phone_number in phone_numbers:
messages = generator.arbitrary_messages_by_backend_and_direction(self.backend_ids,
phone_number=phone_number)
for msg_log in messages:
billable = SmsBillable.create(msg_log)
self.assertIsNotNone(billable)
self.assertEqual(
billable.gateway_charge,
self.country_code_fees[billable.direction]
[billable.gateway_fee.criteria.backend_api_id]
[int(phone_number[:-10])]
)
def test_instance_fees(self):
self.create_least_specific_gateway_fees()
self.create_country_code_gateway_fees()
self.create_instance_gateway_fees()
phone_numbers = [generator.arbitrary_phone_number() for i in range(10)]
for phone_number in phone_numbers:
messages = generator.arbitrary_messages_by_backend_and_direction(self.backend_ids,
phone_number=phone_number)
for msg_log in messages:
billable = SmsBillable.create(msg_log)
self.assertIsNotNone(billable)
self.assertEqual(
billable.gateway_charge,
self.instance_fees[billable.direction]
[billable.gateway_fee.criteria.backend_api_id]
[1]
)
def test_specific_fees(self):
self.create_least_specific_gateway_fees()
self.create_country_code_gateway_fees()
self.create_instance_gateway_fees()
self.create_most_specific_gateway_fees()
phone_numbers = [generator.arbitrary_phone_number() for i in range(10)]
for phone_number in phone_numbers:
messages = generator.arbitrary_messages_by_backend_and_direction(self.backend_ids,
phone_number=phone_number)
for msg_log in messages:
billable = SmsBillable.create(msg_log)
self.assertIsNotNone(billable)
self.assertEqual(
billable.gateway_charge,
self.most_specific_fees[billable.direction]
[billable.gateway_fee.criteria.backend_api_id]
[int(phone_number[:-10])]
[1]
)
def test_prefix_fees(self):
self.create_prefix_gateway_fees()
for phone_number, prefix in generator.arbitrary_phone_numbers_and_prefixes(
self.country_code_and_prefixes
):
messages = generator.arbitrary_messages_by_backend_and_direction(
{
random_key: self.backend_ids[random_key]
for random_key in [choice(self.backend_ids.keys())]
},
phone_number=phone_number,
)
for msg_log in messages:
billable = SmsBillable.create(msg_log)
self.assertIsNotNone(billable)
try:
self.assertEqual(
billable.gateway_charge,
self.prefix_fees
[billable.direction]
[billable.gateway_fee.criteria.backend_api_id]
[phone_number[:-10]]
[prefix]
[msg_log.backend_id]
)
except AssertionError:
raise Exception(
"Phone number: %s, " % phone_number
+ "given prefix: %s, " % prefix
+ "found prefix: %s" % billable.gateway_fee.criteria.prefix
)
def test_no_matching_fee(self):
self.create_least_specific_gateway_fees()
self.create_country_code_gateway_fees()
self.create_instance_gateway_fees()
self.create_most_specific_gateway_fees()
phone_numbers = [generator.arbitrary_phone_number() for i in range(10)]
for phone_number in phone_numbers:
messages = generator.arbitrary_messages_by_backend_and_direction(self.backend_ids,
phone_number=phone_number,
directions=['X', 'Y'])
for msg_log in messages:
billable = SmsBillable.create(msg_log)
self.assertIsNotNone(billable)
self.assertIsNone(billable.gateway_fee)
@patch(
'twilio.rest.resources.messages.Messages.get',
lambda self, message_id: FakeTwilioMessageFactory.get_message(message_id)
)
def test_twilio_global_backend(self):
add_twilio_gateway_fee(apps)
twilio_backend = SQLTwilioBackend.objects.create(
name='TWILIO',
is_global=True,
hq_api_id=SQLTwilioBackend.get_api_id(),
couch_id='global_backend',
)
twilio_backend.set_extra_fields(
account_sid='sid',
auth_token='token',
)
twilio_backend.save()
messages = [
message
for phone_number in [generator.arbitrary_phone_number() for _ in range(10)]
for message in generator.arbitrary_messages_by_backend_and_direction(
{twilio_backend.hq_api_id: twilio_backend.couch_id}, phone_number=phone_number
)
]
for msg_log in messages:
FakeTwilioMessageFactory.add_price_for_message(msg_log.backend_message_id, generator.arbitrary_fee())
for msg_log in messages:
multipart_count = randint(1, 10) # Should be ignored
billable = SmsBillable.create(msg_log, multipart_count=multipart_count)
self.assertIsNotNone(billable)
self.assertIsNotNone(billable.gateway_fee)
self.assertEqual(
billable.gateway_charge,
FakeTwilioMessageFactory.get_price_for_message(msg_log.backend_message_id)
)
@patch('corehq.apps.smsbillables.models.log_smsbillables_error')
@patch(
'twilio.rest.resources.messages.Messages.get',
lambda self, message_id: FakeTwilioMessageFactory.get_message(message_id)
)
def test_twilio_domain_level_backend(self, mock_log_smsbillables_error):
add_twilio_gateway_fee(apps)
bootstrap_usage_fees(apps)
twilio_backend = SQLTwilioBackend.objects.create(
name='TWILIO',
is_global=False,
hq_api_id=SQLTwilioBackend.get_api_id(),
couch_id='domain_backend',
)
twilio_backend.set_extra_fields(
account_sid='sid',
auth_token='token',
)
twilio_backend.save()
messages = [
message
for phone_number in [generator.arbitrary_phone_number() for _ in range(10)]
for message in generator.arbitrary_messages_by_backend_and_direction(
{twilio_backend.hq_api_id: twilio_backend.couch_id}, phone_number=phone_number
)
]
for msg_log in messages:
FakeTwilioMessageFactory.add_price_for_message(msg_log.backend_message_id, generator.arbitrary_fee())
for msg_log in messages:
multipart_count = randint(1, 10) # Should be ignored
billable = SmsBillable.create(msg_log, multipart_count=multipart_count)
self.assertIsNotNone(billable)
self.assertIsNone(billable.gateway_fee)
self.assertEqual(billable.gateway_charge, 0)
self.assertEqual(mock_log_smsbillables_error.call_count, 0)
def tearDown(self):
SmsBillable.objects.all().delete()
SmsGatewayFee.objects.all().delete()
SmsGatewayFeeCriteria.objects.all().delete()
SmsUsageFee.objects.all().delete()
SmsUsageFeeCriteria.objects.all().delete()
self.currency_usd.delete()
self.other_currency.delete()
SMS.by_domain(generator.TEST_DOMAIN).delete()
for api_id, backend_id in self.backend_ids.iteritems():
SQLMobileBackend.load(backend_id, is_couch_id=True).delete()
FakeTwilioMessageFactory.backend_message_id_to_price = {}
super(TestGatewayFee, self).tearDown()
| |
import wx
# Overall menu styles
StyleDefault = 0
StyleXP = 1
Style2007 = 2
StyleVista = 3
# Menu shadows
RightShadow = 1 # Right side shadow
BottomShadow = 2 # Not full bottom shadow
BottomShadowFull = 4 # Full bottom shadow
# Button styles
BU_EXT_XP_STYLE = 1
BU_EXT_2007_STYLE = 2
BU_EXT_LEFT_ALIGN_STYLE = 4
BU_EXT_CENTER_ALIGN_STYLE = 8
BU_EXT_RIGHT_ALIGN_STYLE = 16
BU_EXT_RIGHT_TO_LEFT_STYLE = 32
# Control state
ControlPressed = 0
ControlFocus = 1
ControlDisabled = 2
ControlNormal = 3
# FlatMenu styles
FM_OPT_IS_LCD = 1
""" Use this style if your computer uses a LCD screen. """
FM_OPT_MINIBAR = 2
""" Use this if you plan to use the toolbar only. """
FM_OPT_SHOW_CUSTOMIZE = 4
""" Show "customize link" in the `More` menu, you will need to write your own handler. See demo. """
FM_OPT_SHOW_TOOLBAR = 8
""" Set this option is you are planning to use the toolbar. """
# Control status
ControlStatusNoFocus = 0
ControlStatusFocus = 1
ControlStatusPressed = 2
# HitTest constants
NoWhere = 0
MenuItem = 1
ToolbarItem = 2
DropDownArrowButton = 3
FTB_ITEM_TOOL = 0
FTB_ITEM_SEPARATOR = 1
FTB_ITEM_CHECK = 2
FTB_ITEM_RADIO = 3
FTB_ITEM_RADIO_MENU = 4
FTB_ITEM_CUSTOM = 5
LargeIcons = 32
SmallIcons = 16
MENU_HT_NONE = 0
MENU_HT_ITEM = 1
MENU_HT_SCROLL_UP = 2
MENU_HT_SCROLL_DOWN = 3
MENU_DEC_TOP = 0
MENU_DEC_BOTTOM = 1
MENU_DEC_LEFT = 2
MENU_DEC_RIGHT = 3
DROP_DOWN_ARROW_WIDTH = 16
SPACER = 12
MARGIN = 3
TOOLBAR_SPACER = 4
TOOLBAR_MARGIN = 4
SEPARATOR_WIDTH = 12
SCROLL_BTN_HEIGHT = 20
CS_DROPSHADOW = 0x00020000
INB_BOTTOM = 1
INB_LEFT = 2
INB_RIGHT = 4
INB_TOP = 8
INB_BORDER = 16
INB_SHOW_ONLY_TEXT = 32
INB_SHOW_ONLY_IMAGES = 64
INB_FIT_BUTTON = 128
INB_DRAW_SHADOW = 256
INB_USE_PIN_BUTTON = 512
INB_GRADIENT_BACKGROUND = 1024
INB_WEB_HILITE = 2048
INB_NO_RESIZE = 4096
INB_FIT_LABELTEXT = 8192
INB_BOLD_TAB_SELECTION = 16384
INB_DEFAULT_STYLE = INB_BORDER | INB_TOP | INB_USE_PIN_BUTTON
INB_TAB_AREA_BACKGROUND_COLOUR = 100
INB_ACTIVE_TAB_COLOUR = 101
INB_TABS_BORDER_COLOUR = 102
INB_TEXT_COLOUR = 103
INB_ACTIVE_TEXT_COLOUR = 104
INB_HILITE_TAB_COLOUR = 105
INB_LABEL_BOOK_DEFAULT = INB_DRAW_SHADOW | INB_BORDER | INB_USE_PIN_BUTTON | INB_LEFT
# HitTest results
IMG_OVER_IMG = 0
IMG_OVER_PIN = 1
IMG_OVER_EW_BORDER = 2
IMG_NONE = 3
# Pin button states
INB_PIN_NONE = 0
INB_PIN_HOVER = 200
INB_PIN_PRESSED = 201
# Windows Vista Colours
rgbSelectOuter = wx.Colour(170, 200, 245)
rgbSelectInner = wx.Colour(230, 250, 250)
rgbSelectTop = wx.Colour(210, 240, 250)
rgbSelectBottom = wx.Colour(185, 215, 250)
check_mark_xpm = [" 16 16 16 1",
"` c #000000",
". c #800000",
"# c #008000",
"a c #808000",
"b c #000080",
"c c #800080",
"d c #008080",
"e c #808080",
"f c #c0c0c0",
"g c #ff0000",
"h c #00ff00",
"i c #ffff00",
"j c #0000ff",
"k c #ff00ff",
"l c #00ffff",
"m c #ffffff",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmm`mmmmm",
"mmmmmmmmm``mmmmm",
"mmmm`mmm```mmmmm",
"mmmm``m```mmmmmm",
"mmmm`````mmmmmmm",
"mmmmm```mmmmmmmm",
"mmmmmm`mmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm"
]
radio_item_xpm = [" 16 16 16 1",
"` c #000000",
". c #800000",
"# c #008000",
"a c #808000",
"b c #000080",
"c c #800080",
"d c #008080",
"e c #808080",
"f c #c0c0c0",
"g c #ff0000",
"h c #00ff00",
"i c #ffff00",
"j c #0000ff",
"k c #ff00ff",
"l c #00ffff",
"m c #ffffff",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmm```mmmmmmm",
"mmmmm`````mmmmmm",
"mmmmm`````mmmmmm",
"mmmmmm```mmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm",
"mmmmmmmmmmmmmmmm"]
menu_right_arrow_xpm = [
" 16 16 8 1",
"` c #ffffff",
". c #000000",
"# c #000000",
"a c #000000",
"b c #000000",
"c c #000000",
"d c #000000",
"e c #000000",
"````````````````",
"````````````````",
"````````````````",
"````````````````",
"````````````````",
"``````.`````````",
"``````..````````",
"``````...```````",
"``````....``````",
"``````...```````",
"``````..````````",
"``````.`````````",
"````````````````",
"````````````````",
"````````````````",
"````````````````"
]
#----------------------------------
# Shadow images
#----------------------------------
shadow_right_xpm = ["5 5 1 1"," c Black"," "," "," "," "," "]
# shadow_right.xpm 5x5
shadow_right_alpha = [168, 145, 115, 76, 46, 168, 145, 115, 76, 46, 168, 145, 115, 76, 46,
168, 145, 115, 76, 46, 168, 145, 115, 76, 46]
shadow_right_top_xpm = ["5 10 1 1"," c Black"," "," "," "," ",
" "," "," "," "," "," "]
shadow_right_top_alpha = [40, 35, 28, 18, 11, 67, 58, 46, 31, 18, 101, 87, 69, 46, 28,
128, 110, 87, 58, 35, 148, 128, 101, 67, 40, 161, 139, 110, 73, 44,
168, 145, 115, 76, 46, 168, 145, 115, 76, 46, 168, 145, 115, 76, 46,
168, 145, 115, 76, 46]
# shadow_buttom.xpm 5x5
shadow_bottom_alpha = [184, 184, 184, 184, 184, 168, 168, 168, 168, 168, 145, 145, 145, 145, 145,
115, 115, 115, 115, 115, 76, 76, 76, 76, 76]
shadow_bottom_left_xpm = ["10 5 1 1"," c Black"," "," ",
" "," "," "]
shadow_bottom_left_alpha = [22, 44, 73, 110, 139, 161, 176, 184, 184, 184,
20, 40, 67, 101, 128, 148, 161, 168, 168, 168,
17, 35, 58, 87, 110, 128, 139, 145, 145, 145,
13, 28, 46, 69, 87, 101, 110, 115, 115, 115,
9, 18, 31, 46, 58, 67, 73, 76, 76, 76]
shadow_center_xpm = ["5 5 1 1"," c Black"," "," "," "," "," "]
shadow_center_alpha = [161, 139, 110, 73, 44, 148, 128, 101, 67, 40,
128, 110, 87, 58, 35, 101, 87, 69, 46, 28,
67, 58, 46, 31, 18]
shadow_bottom_xpm = ["5 5 1 1"," c Black"," "," "," "," "," "]
arrow_down_xpm = ["16 16 3 1",
". c Black",
"X c #FFFFFF",
" c #008080",
" ",
" ",
" ",
" ",
" ....... ",
" XXXXXXX ",
" ",
" ....... ",
" X.....X ",
" X...X ",
" X.X ",
" X ",
" ",
" ",
" ",
" "]
#---------------------------------------------
# Pin images
#---------------------------------------------
pin_left_xpm = [" 16 16 8 1",
"` c #ffffff",
". c #000000",
"# c #808080",
"a c #000000",
"b c #000000",
"c c #000000",
"d c #000000",
"e c #000000",
"````````````````",
"````````````````",
"```````.````````",
"```````.````````",
"```````.......``",
"```````.`````.``",
"`````...`````.``",
"``......#####.``",
"`````...#####.``",
"```````.......``",
"```````.......``",
"```````.````````",
"```````.````````",
"````````````````",
"````````````````",
"````````````````"]
pin_down_xpm = [" 16 16 8 1",
"` c #ffffff",
". c #000000",
"# c #808080",
"a c #000000",
"b c #000000",
"c c #000000",
"d c #000000",
"e c #000000",
"````````````````",
"````````````````",
"````.......`````",
"````.``##..`````",
"````.``##..`````",
"````.``##..`````",
"````.``##..`````",
"````.``##..`````",
"``...........```",
"``````...```````",
"``````...```````",
"```````.````````",
"```````.````````",
"```````.````````",
"````````````````",
"````````````````"]
arrow_up = 'BM\xf6\x00\x00\x00\x00\x00\x00\x00v\x00\x00\x00(\x00\x00\x00\x10\x00\x00\
\x00\x10\x00\x00\x00\x01\x00\x04\x00\x00\x00\x00\x00\x80\x00\x00\x00\x12\x0b\x00\x00\x12\
\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x00\x80\x80\x00\
\x00w\xfcM\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00""""""""""""""""""""""""""""""""""\x00\x00\x00\x02""""\x11\x11\
\x11\x12""""""""""""\x00\x00\x00\x02""""\x10\x00\x00\x12""""!\x00\x01""""""\x10\x12""""""!\
""""""""""""""""""""""""""""""""""""'
arrow_down = 'BM\xf6\x00\x00\x00\x00\x00\x00\x00v\x00\x00\x00(\x00\x00\x00\x10\x00\x00\x00\
\x10\x00\x00\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x0b\x00\x00\x12\x0b\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x00\x80\x80\x00\x00w\
\xfcM\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00"""""""""""""""""""""""""""""""""""!"""""""\x10\x12"""""!\x00\x01\
"""""\x10\x00\x00\x12""""\x00\x00\x00\x02""""""""""""\x11\x11\x11\x12""""\x00\x00\x00\x02\
""""""""""""""""""""""""""""""""""'
menu_up_arrow_xpm = ["16 16 2 1",
". c Black",
" c White",
" ",
" ",
" ",
" ",
" ",
" ",
" . ",
" ... ",
" ..... ",
" ",
" ",
" ",
" ",
" ",
" ",
" "]
menu_down_arrow_xpm = ["16 16 2 1",
". c Black",
" c White",
" ",
" ",
" ",
" ",
" ",
" ",
" ..... ",
" ... ",
" . ",
" ",
" ",
" ",
" ",
" ",
" ",
" "]
def getMenuUpArrowBitmap():
bmp = wx.BitmapFromXPMData(menu_up_arrow_xpm)
bmp.SetMask(wx.Mask(bmp, wx.WHITE))
return bmp
def getMenuDownArrowBitmap():
bmp = wx.BitmapFromXPMData(menu_down_arrow_xpm)
bmp.SetMask(wx.Mask(bmp, wx.WHITE))
return bmp
| |
from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from typing import (cast, Any, Callable, Dict, Generator, Iterable, Iterator, List, Mapping,
Optional, Sized, Tuple, Union, IO)
from django.core.urlresolvers import LocaleRegexURLResolver
from django.conf import settings
from django.test import TestCase
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.http import HttpResponse
from django.db.utils import IntegrityError
from django.utils.translation import ugettext as _
from zerver.lib.avatar import avatar_url
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.str_utils import force_text
from zerver.lib import cache
from zerver.tornado import event_queue
from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, bulk_add_subscriptions,
get_display_recipient, bulk_remove_subscriptions
)
from zerver.models import (
get_stream,
get_user_profile_by_email,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
import collections
import base64
import mock
import os
import re
import sys
import time
import ujson
import unittest
from six.moves import urllib
from six import binary_type
from typing import Text
from zerver.lib.str_utils import NonBinaryStr
from contextlib import contextmanager
import six
import fakeldap
import ldap
class MockLDAP(fakeldap.MockLDAP):
class LDAPError(ldap.LDAPError):
pass
class INVALID_CREDENTIALS(ldap.INVALID_CREDENTIALS):
pass
class NO_SUCH_OBJECT(ldap.NO_SUCH_OBJECT):
pass
class ALREADY_EXISTS(ldap.ALREADY_EXISTS):
pass
@contextmanager
def simulated_queue_client(client):
# type: (type) -> Iterator[None]
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client # type: ignore # https://github.com/JukkaL/mypy/issues/1152
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient # type: ignore # https://github.com/JukkaL/mypy/issues/1152
@contextmanager
def tornado_redirected_to_list(lst):
# type: (List[Mapping[str, Any]]) -> Iterator[None]
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
# type: () -> Generator[List[Tuple[str, Union[Text, List[Text]], Text]], None, None]
cache_queries = [] # type: List[Tuple[str, Union[Text, List[Text]], Text]]
def my_cache_get(key, cache_name=None):
# type: (Text, Optional[str]) -> Any
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
# type: (List[Text], Optional[str]) -> Dict[Text, Any]
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured(include_savepoints=False):
# type: (Optional[bool]) -> Generator[List[Dict[str, Union[str, binary_type]]], None, None]
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = [] # type: List[Dict[str, Union[str, binary_type]]]
def wrapper_execute(self, action, sql, params=()):
# type: (TimeTrackingCursor, Callable, NonBinaryStr, Iterable[Any]) -> None
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or ('SAVEPOINT' not in sql):
queries.append({
'sql': self.mogrify(sql, params).decode('utf-8'),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.execute = cursor_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def cursor_executemany(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = cursor_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
yield queries
TimeTrackingCursor.execute = old_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = old_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def get_test_image_file(filename):
# type: (str) -> IO[Any]
test_avatar_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tests/images'))
return open(os.path.join(test_avatar_dir, filename), 'rb')
def avatar_disk_path(user_profile, medium=False):
# type: (UserProfile, bool) -> str
avatar_url_path = avatar_url(user_profile, medium)
avatar_disk_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_url_path.split("/")[-1].split("?")[0])
return avatar_disk_path
def make_client(name):
# type: (str) -> Client
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address):
# type: (Text) -> Text
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
# type: (Dict[str, Any]) -> Set[int]
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
# type: (UserProfile) -> int
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
# type: (UserProfile) -> UserMessage
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
# type: (UserProfile) -> Message
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
# type: (UserProfile) -> List[Message]
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyHandler(object):
def __init__(self):
# type: () -> None
allocate_handler_id(self) # type: ignore # this is a testing mock
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile):
# type: (Dict[str, Any], UserProfile) -> None
self.GET = {} # type: Dict[str, Any]
self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler()
self._log_data = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
class HostRequestMock(object):
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(self, host=settings.EXTERNAL_HOST):
# type: (Text) -> None
self.host = host
def get_host(self):
# type: () -> Text
return self.host
class MockPythonResponse(object):
def __init__(self, text, status_code):
# type: (Text, int) -> None
self.text = text
self.status_code = status_code
@property
def ok(self):
# type: () -> bool
return self.status_code == 200
INSTRUMENTING = os.environ.get('TEST_INSTRUMENT_URL_COVERAGE', '') == 'TRUE'
INSTRUMENTED_CALLS = [] # type: List[Dict[str, Any]]
UrlFuncT = Callable[..., HttpResponse] # TODO: make more specific
def instrument_url(f):
# type: (UrlFuncT) -> UrlFuncT
if not INSTRUMENTING:
return f
else:
def wrapper(self, url, info={}, **kwargs):
# type: (Any, Text, Dict[str, Any], **Any) -> HttpResponse
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if '?' in url:
url, extra_info = url.split('?', 1)
else:
extra_info = ''
INSTRUMENTED_CALLS.append(dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs))
return result
return wrapper
def write_instrumentation_reports(full_suite):
# type: (bool) -> None
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
from zproject.urls import urlpatterns, v1_api_and_json_patterns
# Find our untested urls.
pattern_cnt = collections.defaultdict(int) # type: Dict[str, int]
def re_strip(r):
# type: (Any) -> str
return str(r).lstrip('^').rstrip('$')
def find_patterns(patterns, prefixes):
# type: (List[Any], List[str]) -> None
for pattern in patterns:
find_pattern(pattern, prefixes)
def cleanup_url(url):
# type: (str) -> str
if url.startswith('/'):
url = url[1:]
if url.startswith('http://testserver/'):
url = url[len('http://testserver/'):]
if url.startswith('http://zulip.testserver/'):
url = url[len('http://zulip.testserver/'):]
if url.startswith('http://testserver:9080/'):
url = url[len('http://testserver:9080/'):]
return url
def find_pattern(pattern, prefixes):
# type: (Any, List[str]) -> None
if isinstance(pattern, type(LocaleRegexURLResolver)):
return
if hasattr(pattern, 'url_patterns'):
return
canon_pattern = prefixes[0] + re_strip(pattern.regex.pattern)
cnt = 0
for call in calls:
if 'pattern' in call:
continue
url = cleanup_url(call['url'])
for prefix in prefixes:
if url.startswith(prefix):
match_url = url[len(prefix):]
if pattern.regex.match(match_url):
if call['status_code'] in [200, 204, 301, 302]:
cnt += 1
call['pattern'] = canon_pattern
pattern_cnt[canon_pattern] += cnt
find_patterns(urlpatterns, ['', 'en/', 'de/'])
find_patterns(v1_api_and_json_patterns, ['api/v1/', 'json/'])
assert len(pattern_cnt) > 100
untested_patterns = set([p for p in pattern_cnt if pattern_cnt[p] == 0])
# We exempt some patterns that are called via Tornado.
exempt_patterns = set([
'api/v1/events',
'api/v1/register',
])
untested_patterns -= exempt_patterns
var_dir = 'var' # TODO make sure path is robust here
fn = os.path.join(var_dir, 'url_coverage.txt')
with open(fn, 'w') as f:
for call in calls:
try:
line = ujson.dumps(call)
f.write(line + '\n')
except OverflowError:
print('''
A JSON overflow error was encountered while
producing the URL coverage report. Sometimes
this indicates that a test is passing objects
into methods like client_post(), which is
unnecessary and leads to false positives.
''')
print(call)
if full_suite:
print('INFO: URL coverage report is in %s' % (fn,))
print('INFO: Try running: ./tools/create-test-api-docs')
if full_suite and len(untested_patterns):
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
print(" %s" % (untested_pattern,))
sys.exit(1)
def get_all_templates():
# type: () -> List[str]
templates = []
relpath = os.path.relpath
isfile = os.path.isfile
path_exists = os.path.exists
def is_valid_template(p, n):
# type: (Text, Text) -> bool
return (not n.startswith('.') and
not n.startswith('__init__') and
not n.endswith(".md") and
isfile(p))
def process(template_dir, dirname, fnames):
# type: (str, str, Iterable[str]) -> None
for name in fnames:
path = os.path.join(dirname, name)
if is_valid_template(path, name):
templates.append(relpath(path, template_dir))
for engine in loader.engines.all():
template_dirs = [d for d in engine.template_dirs if path_exists(d)]
for template_dir in template_dirs:
template_dir = os.path.normpath(template_dir)
for dirpath, dirnames, fnames in os.walk(template_dir):
process(template_dir, dirpath, fnames)
return templates
| |
# SMOP compiler -- Simple Matlab/Octave to Python compiler
# Copyright 2011-2013 Victor Leikehman
from collections import namedtuple
from recipes import recordtype
import copy,sys,inspect
import options
# def preorder(u):
# if isinstance(u,traversable):
# yield u
# for n in u:
# for t in preorder(n):
# yield t
def decode(self):
r = ""
s = self.name
while s:
if len(s) >= 2 and s[0] == "_":
r += s[1].upper()
s = s[2:]
else:
r += s[0].lower()
s = s[1:]
return r
def encode(s):
return "".join(c+"_" if c.isupper() or c=="_" else c.upper() for c in s)
def postorder(u):
if isinstance(u,node):
for v in u:
for t in postorder(v):
yield t
yield u # returns only traversible objects
def extend(cls):
return lambda f: (setattr(cls,f.__name__,f) or f)
def exceptions(f):
def wrapper(self,*args,**kwargs):
try:
return f(self,*args,**kwargs)
except:
print "%s.%s()" % (self.__class__.__name__, f.__name__)
raise
wrapper.__name__ = f.__name__
return wrapper
class node(object):
def become(self,other):
class Wrapper(self.__class__):
def __copy__(self):
other = object.__getattribute__(self,"other")
return copy.copy(other)
def __getattribute__(self,name):
other = object.__getattribute__(self,"other")
return getattr(other,name)
def __setattr__(self,name,value):
other = object.__getattribute__(self,"other")
return setattr(other,name,value)
def __iter__(self):
other = object.__getattribute__(self,"other")
return iter(other)
#def __hash__(self):
# other = object.__getattribute__(self,"other")
# return id(other)
def __repr__(self):
other = object.__getattribute__(self,"other")
return repr(other)
def __len__(self):
other = object.__getattribute__(self,"other")
return len(other)
assert self != other
self.other = other
self.__class__ = Wrapper
def _type(self):
raise AttributeError("_type")
######### LISTS
class concat_list(node,list):
pass
class global_list(node,list):
"""space-separated list of variables used in GLOBAL statement"""
pass
class expr_list(node,list):
def __str__(self):
return ",".join([str(t) for t in self])
def __repr__(self):
return "expr_list(%s)" % list.__repr__(self)
class stmt_list(node,list):
def __str__(self):
return "\n".join([str(t) for t in self])
def __repr__(self):
return "stmt_list(%s)" % list.__repr__(self)
#####################
#
# ATOMS
class atom(node): pass
class string(atom,recordtype("string", "value lineno lexpos", default=None)):
def __str__(self):
return "'%s'" % self.value
class logical(atom,recordtype("logical", "value lineno lexpos", default=None)):
pass
class number(atom,recordtype("number","value lineno lexpos",default=None)):
def __str__(self):
return str(self.value)
class ident(atom,recordtype("ident","name lineno column lexpos defs props",
default=None)):
def __str__(self):
return self.name
class param(ident):
pass
###########################
#
# STATEMENTS
#
class stmt(node): pass
# class call_stmt(stmt,recordtype("call_stmt","func_expr args ret")):
# """Sometimes called multiple assignment, call statements represent
# something like [x,y]=foo(a,b,c); Note the square brackets around
# the lhs.
# SEE ALSO: funcall,let
# """
# def __str__(self):
# return "%s=%s(%s)" % (str(self.ret),
# str(self.func_expr),
# str(self.args))
class let(stmt,recordtype("let",
"ret args lineno lexpos nargout",
default=None)):
"""Assignment statement, except [x,y]=foo(x,y,z),
which is handled by call_stmt."""
def __str__(self):
return "%s=%s" % (str(self.ret), str(self.args))
class func_decl(stmt,recordtype("func_decl","ident ret args decl_list use_nargin",default=None)):
pass
class lambda_expr(func_decl):
pass
class function(stmt,recordtype("function","head body")):
pass
class for_stmt(stmt,recordtype("for_stmt","ident expr stmt_list")):
pass
class DO_STMT(stmt,recordtype("DO_STMT","ident start stop stmt_list")):
pass
# We generate where_stmt to implement A(B==C) = D
class where_stmt(stmt,recordtype("where_stmt","cond_expr stmt_list")):
pass
class if_stmt(stmt,recordtype("if_stmt","cond_expr then_stmt else_stmt")):
pass
class global_stmt(stmt,recordtype("global_stmt","global_list")):
def __str__(self):
return "global %s" % str(self.global_list)
class return_stmt(stmt,namedtuple("return_stmt","ret")):
def __str__(self):
return "return"
class end_stmt(stmt,namedtuple("end_stmt","dummy")):
def __str__(self):
return "end"
class continue_stmt(stmt,namedtuple("continue_stmt","dummy")):
def __str__(self):
return "continue"
class break_stmt(stmt,namedtuple("break_stmt","dummy")):
def __str__(self):
return "break"
class expr_stmt(stmt,node,recordtype("expr_stmt","expr")):
def __str__(self):
return str(self.expr)
class while_stmt(stmt,node,recordtype("while_stmt","cond_expr stmt_list")):
pass
class try_catch(stmt,recordtype("try_catch","try_stmt catch_stmt finally_stmt")):
pass
class allocate_stmt(stmt,recordtype("allocate_stmt",
"ident args")):
pass
#######################################333
#
# FUNCALL
class funcall(node,recordtype("funcall","func_expr args nargout",default=None)):
"""Funcall instances represent
(a) Array references, both lhs and rhs
(b) Function call expressions
"""
def __str__(self):
return "%s(%s)" % (str(self.func_expr),
str(self.args))
class builtins(funcall):
"""
Builtin functions are represented as subclasses
of class builtins. Application of a function to
specific arguments is represented as its instance.
For example, builtin function foo is represented
as class foo(builtins), and foo(x) is represented
as foo(x).
"""
def __init__(self,*args,**kwargs):
"""
If a built-in function _foo takes three arguments
a, b, and c, we can just say _foo(a,b,c) and let
the constructor take care of proper structuring of
the node (like expr_list around the arguments, etc.
"""
funcall.__init__(self,
func_expr=None,
args=expr_list(args),
**kwargs)
#import pdb; pdb.set_trace()
#self.func_expr.defs = set(self.func_expr)
def __repr__(self):
return "np.%s%s" % (self.__class__,repr(self.args))
def __str__(self):
return "np.%s(%s)" % (self.__class__.__name__,
str(self.args))
class arrayref(funcall):
def __repr__(self):
return "%s%s[%s]" % (self.__class__,
self.func_expr,
self.args)
########################## EXPR
class expr(node,recordtype("expr","op args")):
def __str__(self):
if self.op == ".":
return "%s%s" % (str(self.args[0]),self.args[1])
if self.op == "parens":
return "(%s)" % str(self.args[0])
if not self.args:
return str(self.op)
if len(self.args) == 1:
return "%s%s" % (self.op,self.args[0])
if len(self.args) == 2:
return "%s%s%s" % (self.args[0]._backend(),
self.op,
self.args[1]._backend())
ret = "%s=" % str(self.ret) if self.ret else ""
return ret+"%s(%s)" % (self.op,
",".join([str(t) for t in self.args]))
# names in caps correspond to fortran funcs
builtins_list = [
"ABS",
"ALL",
"ANY",
"CEILING",
"FIND",
"ISNAN",
"MAXVAL",
"MINVAL",
"MODULO",
"RAND",
"RESHAPE",
"SHAPE",
"SIGN",
"SIZE",
"SUM",
#"abs",
"add", # synthetic opcode
#"all",
#"any",
"cellfun",
#"ceil",
"clazz",
#"cumprod",
#"cumsum",
#"diff",
"dot", # Exists in numpy. Implements matlab .*
#"exist",
"false",
#"fclose",
#"find",
#"findone", # same as find, but returns ONE result
#"floor",
#"fopen",
"getfield",
"inf", "inf0",
#"isempty",
#"isequal",
"isinf",
"isnan",
#"length",
#"load",
#"lower",
#"max",
#"min",
#"mod",
#"nnz",
#"numel",
#"ones",
#"rand",
#"range_", # synthetic opcode
"ravel", # synthetic opcode
#"rem",
#"save",
"setfield",
#"sign",
#"size",
#"sort",
#"strcmp",
#"strcmpi",
"sub", # synthetic opcode for subtract
#"sum",
"transpose",
#"true",
#"zeros",
]
for name in builtins_list:
globals()[name] = type(name, (builtins,), {})
#class cellarrayref(node,recordtype("cellarrayref","ident args")):
class cellarrayref(funcall):
pass
class cellarray(expr):
pass
class matrix(builtins):
"""
Anything enclosed in square brackets counts as matrix
>>> print matrix([1,2,3])
[1,2,3]
>>> print matrix()
[]
"""
# def __init__(self,args=expr_list()):
# expr.__init__(self,op="[]",args=args)
# def __str__(self):
# return "[%s]" % ",".join([str(t) for t in self.args])
@extend(node)
def is_const(self):
return False
@extend(number)
@extend(string)
def is_const(self):
return True
@extend(expr_list)
def is_const(self):
return all(t.is_const() for t in self)
@extend(matrix)
def is_const(self):
return not self.args or self.args[0].is_const()
# vim: ts=8:sw=4:et
| |
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import unittest
from gruvi import http
from gruvi.http import HttpServer, HttpClient, HttpMessage, HttpProtocol, ParsedUrl
from gruvi.http import parse_content_type, parse_te, parse_trailer, parse_url
from gruvi.http import get_header, remove_headers
from gruvi.stream import Stream, StreamClient
from gruvi.sync import Queue
from support import UnitTest, MockTransport
URL = ParsedUrl
class TestParseContentType(UnitTest):
def test_simple(self):
parsed = parse_content_type('text/plain')
self.assertEqual(parsed, ('text/plain', {}))
def test_params(self):
parsed = parse_content_type('text/plain; charset=foo')
self.assertEqual(parsed[0], 'text/plain')
self.assertEqual(parsed[1], {'charset': 'foo'})
def test_iso8859_1(self):
parsed = parse_content_type('text/plain; foo="bar\xfe"')
self.assertEqual(parsed, ('text/plain', {'foo': 'bar\xfe'}))
def test_param_whitespace(self):
parsed = parse_content_type('text/plain; charset=foo ')
self.assertEqual(parsed, ('text/plain', {'charset': 'foo'}))
def test_param_quoted(self):
parsed = parse_content_type('text/plain; charset="foo bar"')
self.assertEqual(parsed, ('text/plain', {'charset': 'foo bar'}))
def test_param_quoted_pair(self):
parsed = parse_content_type('text/plain; charset="foo\\"bar"')
self.assertEqual(parsed, ('text/plain', {'charset': 'foo"bar'}))
def test_param_empty(self):
parsed = parse_content_type('text/plain; charset=""')
self.assertEqual(parsed, ('text/plain', {'charset': ''}))
def test_param_multiple(self):
parsed = parse_content_type('text/plain; foo=bar; baz=qux')
self.assertEqual(parsed, ('text/plain', {'foo': 'bar', 'baz': 'qux'}))
def test_param_multiple_missing_semi(self):
parsed = parse_content_type('text/plain; foo=bar baz=qux')
self.assertEqual(parsed, ('text/plain', {'foo': 'bar'}))
class TestParseTE(UnitTest):
def test_simple(self):
parsed = parse_te('chunked')
self.assertEqual(parsed, [('chunked', None)])
def test_multiple(self):
parsed = parse_te('chunked, deflate')
self.assertEqual(parsed, [('chunked', None), ('deflate', None)])
def test_qvalue(self):
parsed = parse_te('deflate; q=0.5')
self.assertEqual(parsed, [('deflate', '0.5')])
def test_case_insensitive(self):
parsed = parse_te('dEfLaTe; Q=0.5')
self.assertEqual(parsed, [('dEfLaTe', '0.5')])
def test_illegal_qvalue(self):
parsed = parse_te('deflate; q=2.5')
self.assertEqual(parsed, [('deflate', None)])
def test_multiple_qvalue(self):
parsed = parse_te('deflate; q=0.5, zlib; q=0.8')
self.assertEqual(parsed, [('deflate', '0.5'), ('zlib', '0.8')])
class TestParseTrailer(UnitTest):
def test_simple(self):
parsed = parse_trailer('foo')
self.assertEqual(parsed, ['foo'])
def test_multiple(self):
parsed = parse_trailer('foo, bar')
self.assertEqual(parsed, ['foo', 'bar'])
def test_spacing(self):
parsed = parse_trailer('foo , bar ')
self.assertEqual(parsed, ['foo', 'bar'])
def test_wrong_separator(self):
parsed = parse_trailer('foo; bar')
self.assertEqual(parsed, ['foo'])
class TestParseUrl(UnitTest):
def test_test(self):
self.assertEqual(URL(), ('', '', '', '', '', '', ''))
self.assertEqual(URL(scheme='http'), ('http', '', '', '', '', '', ''))
self.assertEqual(URL(host='foo'), ('', 'foo', '', '', '', '', ''))
self.assertEqual(URL(path='/path'), ('', '', '/path', '', '', '', ''))
self.assertEqual(URL(query='foo=bar'), ('', '', '', 'foo=bar', '', '', ''))
self.assertEqual(URL(fragment='baz'), ('', '', '', '', 'baz', '', ''))
self.assertEqual(URL(port='80'), ('', '', '', '', '', '80', ''))
self.assertEqual(URL(userinfo='user:pass'), ('', '', '', '', '', '', 'user:pass'))
def test_origin(self):
parsed = parse_url('/path')
self.assertEqual(parsed, URL(path='/path'))
def test_absolute(self):
parsed = parse_url('http://example.com/path')
self.assertEqual(parsed, URL('http', 'example.com', '/path'))
def test_authority(self):
parsed = parse_url('example.com:80', is_connect=True)
self.assertEqual(parsed, URL('', 'example.com', port='80'))
def test_authority_error(self):
self.assertRaises(ValueError, parse_url, '/path', is_connect=True)
self.assertRaises(ValueError, parse_url, 'http://example.com:80', is_connect=True)
self.assertRaises(ValueError, parse_url, '*', is_connect=True)
def test_asterisk(self):
parsed = parse_url('*')
self.assertEqual(parsed, URL(path='*'))
def test_userinfo(self):
parsed = parse_url('http://user:pass@example.com')
self.assertEqual(parsed, URL('http', 'example.com', userinfo='user:pass'))
def test_port(self):
parsed = parse_url('http://example.com:80')
self.assertEqual(parsed, URL('http', 'example.com', port='80'))
def test_userinfo_port(self):
parsed = parse_url('http://user:pass@example.com:80')
self.assertEqual(parsed, URL('http', 'example.com', port='80', userinfo='user:pass'))
def test_default_scheme(self):
parsed = parse_url('www.example.com')
self.assertEqual(parsed, URL('http', 'www.example.com'))
parsed = parse_url('http://www.example.com')
self.assertEqual(parsed, URL('http', 'www.example.com'))
parsed = parse_url('www.example.com', default_scheme='https')
self.assertEqual(parsed, URL('https', 'www.example.com'))
parsed = parse_url('https://www.example.com', default_scheme='https')
self.assertEqual(parsed, URL('https', 'www.example.com'))
def test_addr(self):
parsed = parse_url('www.example.com')
self.assertEqual(parsed.addr, ('www.example.com', 80))
parsed = parse_url('https://www.example.com')
self.assertEqual(parsed.addr, ('www.example.com', 443))
def test_ssl(self):
parsed = parse_url('www.example.com')
self.assertFalse(parsed.ssl)
parsed = parse_url('http://www.example.com')
self.assertFalse(parsed.ssl)
parsed = parse_url('https://www.example.com')
self.assertTrue(parsed.ssl)
def test_target(self):
parsed = parse_url('www.example.com')
self.assertEqual(parsed.target, '/')
parsed = parse_url('www.example.com/foo')
self.assertEqual(parsed.target, '/foo')
parsed = parse_url('www.example.com?bar')
self.assertEqual(parsed.target, '/?bar')
parsed = parse_url('www.example.com/foo?bar')
self.assertEqual(parsed.target, '/foo?bar')
class TestGetHeader(UnitTest):
headers = [('foo', 'fooval'),
('bar', 'barval'),
('baz', 'bazval')]
def test_simple(self):
self.assertEqual(get_header(self.headers, 'foo'), 'fooval')
self.assertEqual(get_header(self.headers, 'bar'), 'barval')
self.assertEqual(get_header(self.headers, 'baz'), 'bazval')
def test_case_insensitive(self):
self.assertEqual(get_header(self.headers, 'Foo'), 'fooval')
self.assertEqual(get_header(self.headers, 'FOO'), 'fooval')
def test_not_present(self):
self.assertIsNone(get_header(self.headers, 'qux'))
def test_default_value(self):
self.assertEqual(get_header(self.headers, 'qux', 'quxval'), 'quxval')
class TestRemoveHeaders(UnitTest):
headers = [('foo', 'fooval1'),
('bar', 'barval1'),
('foo', 'fooval2'),
('baz', 'bazval'),
('bar', 'barval2')]
def test_simple(self):
self.assertEqual(remove_headers(self.headers[:], 'foo'),
[('bar', 'barval1'), ('baz', 'bazval'), ('bar', 'barval2')])
self.assertEqual(remove_headers(self.headers[:], 'bar'),
[('foo', 'fooval1'), ('foo', 'fooval2'), ('baz', 'bazval')])
def test_in_place(self):
headers = self.headers[:]
removed = remove_headers(headers, 'foo')
self.assertIs(headers, removed)
def test_non_quadratic(self):
# Ensure remove_headers() doesn't take quadratic time.
names = ('foo', 'bar', 'baz', 'qux')
headers = []
for i in range(100000):
name = names[i%4]
headers.append((name, name + 'val'))
removed = remove_headers(headers, 'foo')
self.assertEqual(len(removed), 75000)
class TestHttpProtocol(UnitTest):
def setUp(self):
super(TestHttpProtocol, self).setUp()
self.requests = Queue()
def store_request(self, message, transport, protocol):
self.requests.put(message)
protocol.writer.write(b'HTTP/1.1 200 OK\r\n\r\n')
def parse_request(self, *chunks):
# Parse the HTTP request made up of *chunks.
transport = MockTransport()
protocol = HttpProtocol(self.store_request, server_side=True)
transport.start(protocol)
for chunk in chunks:
protocol.data_received(chunk)
self.assertIsNone(protocol._error)
self.transport = transport
self.protocol = protocol
def get_request(self):
# Get a parsed request.
m = self.requests.get(timeout=1.0)
self.assertIsInstance(m, HttpMessage)
self.assertEqual(m.message_type, http.REQUEST)
return m
# Tests that parse a request
def test_simple_request(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.version, '1.1')
self.assertIsNone(m.status_code)
self.assertEqual(m.method, 'GET')
self.assertEqual(m.url, '/')
self.assertTrue(m._should_keep_alive)
self.assertEqual(m.parsed_url, URL(path='/'))
self.assertEqual(m.headers, [('Host', 'example.com')])
self.assertIsInstance(m.body, Stream)
self.assertTrue(m.body.buffer.eof)
def test_request_with_body(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 3\r\n\r\nFoo'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'), ('Content-Length', '3')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_body_incremental(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 3\r\n\r\nFoo'
self.parse_request(*[r[i:i+1] for i in range(len(r))])
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'), ('Content-Length', '3')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_chunked_body(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Transfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'),
('Transfer-Encoding', 'chunked')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_chunked_body_incremental(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Transfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_request(*[r[i:i+1] for i in range(len(r))])
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'),
('Transfer-Encoding', 'chunked')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_request_with_chunked_body_and_trailers(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Transfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\nETag: foo\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'),
('Transfer-Encoding', 'chunked'),
('ETag', 'foo')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'Foo')
self.assertTrue(m.body.buffer.eof)
def test_pipelined_requests(self):
r = b'GET /0 HTTP/1.1\r\nHost: example0.com\r\n\r\n' \
b'GET /1 HTTP/1.1\r\nHost: example1.com\r\n\r\n'
self.parse_request(r)
for i in range(2):
m = self.get_request()
self.assertEqual(m.url, '/{0}'.format(i))
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example{}.com'.format(i))])
self.assertTrue(m.body.buffer.eof)
def test_pipelined_requests_with_body(self):
r = b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 4\r\n\r\nFoo0' \
b'GET / HTTP/1.1\r\nHost: example.com\r\n' \
b'Content-Length: 4\r\n\r\nFoo1'
self.parse_request(r)
for i in range(2):
m = self.get_request()
self.assertEqual(m.url, '/')
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Host', 'example.com'), ('Content-Length', '4')])
self.assertFalse(m.body.buffer.eof)
self.assertEqual(m.body.read(), 'Foo{}'.format(i).encode('ascii'))
self.assertTrue(m.body.buffer.eof)
def test_request_url(self):
r = b'GET /foo/bar HTTP/1.1\r\n' \
b'Host: example.com\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.parsed_url, URL(path='/foo/bar'))
def test_long_request_url(self):
r = b'GET http://user:pass@example.com:80/foo/bar?baz=qux#quux HTTP/1.1\r\n' \
b'Host: example.com\r\n\r\n'
self.parse_request(r)
m = self.get_request()
self.assertEqual(m.parsed_url, URL('http', 'example.com', '/foo/bar', 'baz=qux', 'quux',
port='80', userinfo='user:pass'))
# Tests that parse a response
def parse_response(self, *chunks, **kwargs):
# Parse the HTTP resposne made up of *chunks.
transport = MockTransport()
protocol = HttpProtocol()
transport.start(protocol)
methods = kwargs.get('methods', [])
if methods:
protocol._requests = methods
for chunk in chunks:
protocol.data_received(chunk)
self.assertIsNone(protocol._error)
self.transport = transport
self.protocol = protocol
def get_response(self):
# Get a parsed resposne.
m = self.protocol.getresponse()
self.assertIsInstance(m, HttpMessage)
self.assertEqual(m.message_type, http.RESPONSE)
return m
def test_simple_response(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '0')])
self.assertEqual(m.get_header('Content-Length'), '0')
self.assertEqual(m.body.read(), b'')
self.assertTrue(m.body.buffer.eof)
def test_response_with_body(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nFoo'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '3')])
self.assertEqual(m.get_header('Content-Length'), '3')
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_body_incremental(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nFoo'
self.parse_response([r[i:i+1] for i in range(len(r))])
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '3')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_chunked_body(self):
r = b'HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Transfer-Encoding', 'chunked')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_chunked_body_incremental(self):
r = b'HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\n\r\n'
self.parse_response([r[i:i+1] for i in range(len(r))])
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Transfer-Encoding', 'chunked')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_response_with_chunked_body_and_trailers(self):
r = b'HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n' \
b'3\r\nFoo\r\n0\r\nETag: foo\r\n\r\n'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Transfer-Encoding', 'chunked'), ('ETag', 'foo')])
self.assertEqual(m.body.read(), b'Foo')
self.assertEqual(m.body.read(), b'')
def test_pipelined_responses(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 0\r\nSet-Cookie: foo=0\r\n\r\n' \
b'HTTP/1.1 200 OK\r\nContent-Length: 0\r\nSet-Cookie: foo=1\r\n\r\n'
self.parse_response(r)
for i in range(2):
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
cookie = 'foo={0}'.format(i)
self.assertEqual(m.headers, [('Content-Length', '0'), ('Set-Cookie', cookie)])
self.assertEqual(m.body.read(), b'')
def test_pipelined_responses_with_body(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nFoo0' \
b'HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nFoo1'
self.parse_response(r)
for i in range(2):
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '4')])
self.assertEqual(m.body.read(), 'Foo{0}'.format(i).encode('ascii'))
self.assertEqual(m.body.read(), b'')
def test_pipelined_head_responses(self):
r = b'HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\n' \
b'HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\n'
self.parse_response(r, methods=['HEAD', 'HEAD'])
for i in range(2):
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.status_code, 200)
self.assertEqual(m.headers, [('Content-Length', '3')])
self.assertEqual(m.body.read(), b'')
def test_pipelined_empty_responses(self):
# These status codes take no body. The parser should know this.
r = b'HTTP/1.1 100 OK\r\nCookie: foo0\r\n\r\n' \
b'HTTP/1.1 204 OK\r\nCookie: foo1\r\n\r\n' \
b'HTTP/1.1 304 OK\r\nCookie: foo2\r\n\r\n'
self.parse_response(r)
for i in range(3):
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Cookie', 'foo{0}'.format(i))])
self.assertEqual(m.body.read(), b'')
def test_pipelined_200_response_eof(self):
# No content-length so 200 requires and EOF to indicate the end of
# message. The second request is therefore interpreted as a the body of
# the first.
r = b'HTTP/1.1 200 OK\r\nCookie: foo0\r\n\r\nfoo'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Cookie', 'foo0')])
self.assertEqual(m.body.read(3), b'foo')
self.assertFalse(m.body.buffer.eof)
self.protocol.connection_lost(None)
self.assertTrue(m.body.buffer.eof)
self.assertEqual(m.body.read(), b'')
def test_pipelined_204_response_eof(self):
# A 204 never has a body so the absence of a Content-Length header
# still does not require it to see an EOF.
r = b'HTTP/1.1 204 OK\r\nCookie: foo0\r\n\r\n'
self.parse_response(r)
m = self.get_response()
self.assertEqual(m.version, '1.1')
self.assertEqual(m.headers, [('Cookie', 'foo0')])
self.assertEqual(m.body.read(), b'')
def hello_app(environ, start_response):
headers = [('Content-Type', 'text/plain')]
start_response('200 OK', headers)
return [b'Hello!']
def echo_app(environ, start_response):
headers = [('Content-Type', 'text/plain')]
for name in environ:
if name.startswith('HTTP_X'):
hname = name[5:].replace('_', '-').title()
headers.append((hname, environ[name]))
headers.sort() # for easier comparison
body = environ['wsgi.input'].read()
start_response('200 OK', headers)
return [body]
class TestHttp(UnitTest):
def test_simple(self):
server = HttpServer(hello_app)
server.listen(('localhost', 0))
addr = server.addresses[0]
client = HttpClient()
client.connect(addr)
client.request('GET', '/')
resp = client.getresponse()
self.assertEqual(resp.version, '1.1')
self.assertEqual(resp.status_code, 200)
serv = resp.get_header('Server')
self.assertTrue(serv.startswith('gruvi'))
ctype = resp.get_header('Content-Type')
self.assertEqual(ctype, 'text/plain')
self.assertEqual(resp.body.read(), b'Hello!')
server.close()
client.close()
def test_simple_pipe(self):
server = HttpServer(hello_app)
server.listen(self.pipename())
addr = server.addresses[0]
client = HttpClient()
client.connect(addr)
client.request('GET', '/')
resp = client.getresponse()
self.assertEqual(resp.body.read(), b'Hello!')
server.close()
client.close()
def test_simple_ssl(self):
server = HttpServer(hello_app)
context = self.get_ssl_context()
server.listen(('localhost', 0), ssl=context)
addr = server.addresses[0]
client = HttpClient()
client.connect(addr, ssl=context)
client.request('GET', '/')
resp = client.getresponse()
self.assertEqual(resp.body.read(), b'Hello!')
server.close()
client.close()
def test_request_headers(self):
server = HttpServer(echo_app)
server.listen(('localhost', 0))
addr = server.addresses[0]
client = HttpClient()
client.connect(addr)
client.request('GET', '/', headers=[('X-Echo', 'Bar')])
resp = client.getresponse()
self.assertEqual(resp.get_header('X-Echo'), 'Bar')
body = resp.body.read()
self.assertEqual(body, b'')
server.close()
client.close()
def test_request_body(self):
server = HttpServer(echo_app)
server.listen(('localhost', 0))
addr = server.addresses[0]
client = HttpClient()
client.connect(addr)
client.request('POST', '/', body=b'foo')
resp = client.getresponse()
body = resp.body.read()
self.assertEqual(body, b'foo')
server.close()
client.close()
def test_request_body_sequence(self):
server = HttpServer(echo_app)
server.listen(('localhost', 0))
addr = server.addresses[0]
client = HttpClient()
client.connect(addr)
client.request('POST', '/', body=[b'foo', b'bar'])
resp = client.getresponse()
body = resp.body.read()
self.assertEqual(body, b'foobar')
server.close()
client.close()
def test_request_trailers(self):
server = HttpServer(echo_app)
server.listen(('localhost', 0))
addr = server.addresses[0]
client = HttpClient()
client.connect(addr)
headers = [('X-Echo', 'Bar'), ('Trailer', 'X-Length')]
def genbody():
yield b'FooBody'
headers.append(('X-Length', '7'))
client.request('GET', '/', body=genbody(), headers=headers)
resp = client.getresponse()
self.assertEqual(resp.get_header('X-Echo'), 'Bar')
self.assertEqual(resp.get_header('X-Length'), '7')
body = resp.body.read()
self.assertEqual(body, b'FooBody')
server.close()
client.close()
def test_illegal_request(self):
server = HttpServer(hello_app)
server.listen(('localhost', 0))
addr = server.addresses[0]
client = StreamClient()
client.connect(addr)
client.write(b'foo\r\n')
client.write_eof()
buf = client.read()
self.assertEqual(buf, b'')
server.close()
client.close()
if __name__ == '__main__':
unittest.main()
| |
"""Test config flow."""
from collections import namedtuple
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.components.esphome import config_flow
from tests.common import mock_coro, MockConfigEntry
MockDeviceInfo = namedtuple("DeviceInfo", ["uses_password", "name"])
@pytest.fixture(autouse=True)
def aioesphomeapi_mock():
"""Mock aioesphomeapi."""
with patch.dict('sys.modules', {
'aioesphomeapi': MagicMock(),
}):
yield
@pytest.fixture
def mock_client():
"""Mock APIClient."""
with patch('aioesphomeapi.APIClient') as mock_client:
def mock_constructor(loop, host, port, password):
"""Fake the client constructor."""
mock_client.host = host
mock_client.port = port
mock_client.password = password
return mock_client
mock_client.side_effect = mock_constructor
mock_client.connect.return_value = mock_coro()
mock_client.disconnect.return_value = mock_coro()
yield mock_client
@pytest.fixture(autouse=True)
def mock_api_connection_error():
"""Mock out the try login method."""
with patch('aioesphomeapi.APIConnectionError',
new_callable=lambda: OSError) as mock_error:
yield mock_error
async def test_user_connection_works(hass, mock_client):
"""Test we can finish a config flow."""
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result['type'] == 'form'
mock_client.device_info.return_value = mock_coro(
MockDeviceInfo(False, "test"))
result = await flow.async_step_user(user_input={
'host': '127.0.0.1',
'port': 80,
})
assert result['type'] == 'create_entry'
assert result['data'] == {
'host': '127.0.0.1',
'port': 80,
'password': ''
}
assert result['title'] == 'test'
assert len(mock_client.connect.mock_calls) == 1
assert len(mock_client.device_info.mock_calls) == 1
assert len(mock_client.disconnect.mock_calls) == 1
assert mock_client.host == '127.0.0.1'
assert mock_client.port == 80
assert mock_client.password == ''
async def test_user_resolve_error(hass, mock_api_connection_error,
mock_client):
"""Test user step with IP resolve error."""
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
await flow.async_step_user(user_input=None)
class MockResolveError(mock_api_connection_error):
"""Create an exception with a specific error message."""
def __init__(self):
"""Initialize."""
super().__init__("Error resolving IP address")
with patch('aioesphomeapi.APIConnectionError',
new_callable=lambda: MockResolveError,
) as exc:
mock_client.device_info.side_effect = exc
result = await flow.async_step_user(user_input={
'host': '127.0.0.1',
'port': 6053,
})
assert result['type'] == 'form'
assert result['step_id'] == 'user'
assert result['errors'] == {
'base': 'resolve_error'
}
assert len(mock_client.connect.mock_calls) == 1
assert len(mock_client.device_info.mock_calls) == 1
assert len(mock_client.disconnect.mock_calls) == 1
async def test_user_connection_error(hass, mock_api_connection_error,
mock_client):
"""Test user step with connection error."""
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
await flow.async_step_user(user_input=None)
mock_client.device_info.side_effect = mock_api_connection_error
result = await flow.async_step_user(user_input={
'host': '127.0.0.1',
'port': 6053,
})
assert result['type'] == 'form'
assert result['step_id'] == 'user'
assert result['errors'] == {
'base': 'connection_error'
}
assert len(mock_client.connect.mock_calls) == 1
assert len(mock_client.device_info.mock_calls) == 1
assert len(mock_client.disconnect.mock_calls) == 1
async def test_user_with_password(hass, mock_client):
"""Test user step with password."""
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
await flow.async_step_user(user_input=None)
mock_client.device_info.return_value = mock_coro(
MockDeviceInfo(True, "test"))
result = await flow.async_step_user(user_input={
'host': '127.0.0.1',
'port': 6053,
})
assert result['type'] == 'form'
assert result['step_id'] == 'authenticate'
result = await flow.async_step_authenticate(user_input={
'password': 'password1'
})
assert result['type'] == 'create_entry'
assert result['data'] == {
'host': '127.0.0.1',
'port': 6053,
'password': 'password1'
}
assert mock_client.password == 'password1'
async def test_user_invalid_password(hass, mock_api_connection_error,
mock_client):
"""Test user step with invalid password."""
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
await flow.async_step_user(user_input=None)
mock_client.device_info.return_value = mock_coro(
MockDeviceInfo(True, "test"))
await flow.async_step_user(user_input={
'host': '127.0.0.1',
'port': 6053,
})
mock_client.connect.side_effect = mock_api_connection_error
result = await flow.async_step_authenticate(user_input={
'password': 'invalid'
})
assert result['type'] == 'form'
assert result['step_id'] == 'authenticate'
assert result['errors'] == {
'base': 'invalid_password'
}
async def test_discovery_initiation(hass, mock_client):
"""Test discovery importing works."""
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
service_info = {
'host': '192.168.43.183',
'port': 6053,
'hostname': 'test8266.local.',
'properties': {}
}
mock_client.device_info.return_value = mock_coro(
MockDeviceInfo(False, "test8266"))
result = await flow.async_step_discovery(user_input=service_info)
assert result['type'] == 'create_entry'
assert result['title'] == 'test8266'
assert result['data']['host'] == 'test8266.local'
assert result['data']['port'] == 6053
async def test_discovery_already_configured_hostname(hass, mock_client):
"""Test discovery aborts if already configured via hostname."""
MockConfigEntry(
domain='esphome',
data={'host': 'test8266.local', 'port': 6053, 'password': ''}
).add_to_hass(hass)
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
service_info = {
'host': '192.168.43.183',
'port': 6053,
'hostname': 'test8266.local.',
'properties': {}
}
result = await flow.async_step_discovery(user_input=service_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
async def test_discovery_already_configured_ip(hass, mock_client):
"""Test discovery aborts if already configured via static IP."""
MockConfigEntry(
domain='esphome',
data={'host': '192.168.43.183', 'port': 6053, 'password': ''}
).add_to_hass(hass)
flow = config_flow.EsphomeFlowHandler()
flow.hass = hass
service_info = {
'host': '192.168.43.183',
'port': 6053,
'hostname': 'test8266.local.',
'properties': {}
}
result = await flow.async_step_discovery(user_input=service_info)
assert result['type'] == 'abort'
assert result['reason'] == 'already_configured'
| |
from rdflib import BNode, Graph, Literal, Namespace, RDFS, OWL, XSD
from rdflib.namespace import NamespaceManager
from rdflib.util import first
from rdflib.extras.infixowl import (
BooleanClass,
Class,
Collection,
DeepClassClear,
Individual,
some,
EnumeratedClass,
Property,
Restriction,
max,
)
def test_infix_owl_example1():
exNs = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", exNs, override=False)
namespace_manager.bind("owl", OWL, override=False)
g = Graph()
g.namespace_manager = namespace_manager
Individual.factoryGraph = g
classD = Class(exNs.D)
anonClass = exNs.someProp << some >> classD
assert str(anonClass) == "( ex:someProp SOME ex:D )"
a = Class(exNs.Opera, graph=g)
# Now we can assert rdfs:subClassOf and owl:equivalentClass relationships
# (in the underlying graph) with other classes using the 'subClassOf'
# and 'equivalentClass' descriptors which can be set to a list
# of objects for the corresponding predicates.
a.subClassOf = [exNs.MusicalWork]
# We can then access the rdfs:subClassOf relationships
assert str(list(a.subClassOf)) == "[Class: ex:MusicalWork ]"
# [Class: ex:MusicalWork ]
# This can also be used against already populated graphs:
owlGraph = Graph().parse(str(OWL))
namespace_manager.bind("owl", OWL, override=False)
owlGraph.namespace_manager = namespace_manager
assert (
str(list(Class(OWL.Class, graph=owlGraph).subClassOf))
== "[Class: rdfs:Class ]"
)
# Operators are also available. For instance we can add ex:Opera to the extension
# of the ex:CreativeWork class via the '+=' operator
assert str(a) == "Class: ex:Opera SubClassOf: ex:MusicalWork"
b = Class(exNs.CreativeWork, graph=g)
b += a
assert (
str(sorted(a.subClassOf, key=lambda c: c.identifier))
== "[Class: ex:CreativeWork , Class: ex:MusicalWork ]"
)
# And we can then remove it from the extension as well
b -= a
assert str(a) == "Class: ex:Opera SubClassOf: ex:MusicalWork"
# Boolean class constructions can also be created with Python operators.
# For example, The | operator can be used to construct a class consisting of a
# owl:unionOf the operands:
c = a | b | Class(exNs.Work, graph=g)
assert str(c) == "( ex:Opera OR ex:CreativeWork OR ex:Work )"
# Boolean class expressions can also be operated as lists (using python list
# operators)
del c[c.index(Class(exNs.Work, graph=g))]
assert str(c) == "( ex:Opera OR ex:CreativeWork )"
# The '&' operator can be used to construct class intersection:
woman = Class(exNs.Female, graph=g) & Class(exNs.Human, graph=g)
woman.identifier = exNs.Woman
assert str(woman) == "( ex:Female AND ex:Human )"
assert len(woman) == 2
# Enumerated classes can also be manipulated
contList = [Class(exNs.Africa, graph=g), Class(exNs.NorthAmerica, graph=g)]
assert (
str(EnumeratedClass(members=contList, graph=g))
== "{ ex:Africa ex:NorthAmerica }"
)
# owl:Restrictions can also be instantiated:
assert (
str(Restriction(exNs.hasParent, graph=g, allValuesFrom=exNs.Human))
== "( ex:hasParent ONLY ex:Human )"
)
# Restrictions can also be created using Manchester OWL syntax in 'colloquial'
# Python
assert (
str(exNs.hasParent << some >> Class(exNs.Physician, graph=g))
== "( ex:hasParent SOME ex:Physician )"
)
Property(exNs.hasParent, graph=g) << max >> Literal(1)
assert (
str(Property(exNs.hasParent, graph=g) << max >> Literal(1))
== "( ex:hasParent MAX 1 )"
)
def test_infixowl_deepclassclear():
EX = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", EX, override=False)
namespace_manager.bind("owl", OWL, override=False)
g = Graph()
g.namespace_manager = namespace_manager
Individual.factoryGraph = g
classB = Class(EX.B)
classC = Class(EX.C)
classD = Class(EX.D)
classE = Class(EX.E)
classF = Class(EX.F)
anonClass = EX.someProp << some >> classD
assert str(anonClass) == "( ex:someProp SOME ex:D )"
assert str(classF) == "Class: ex:F "
classF += anonClass
assert str(list(anonClass.subClassOf)) == "[Class: ex:F ]"
classA = classE | classF | anonClass
classB += classA
classA.equivalentClass = [Class()]
classB.subClassOf = [EX.someProp << some >> classC]
assert str(classA) == "( ex:E OR ex:F OR ( ex:someProp SOME ex:D ) )"
DeepClassClear(classA)
assert str(classA) == "( )"
assert list(anonClass.subClassOf) == []
assert str(classB) == "Class: ex:B SubClassOf: ( ex:someProp SOME ex:C )"
otherClass = classD | anonClass
assert str(otherClass) == "( ex:D OR ( ex:someProp SOME ex:D ) )"
DeepClassClear(otherClass)
assert str(otherClass) == "( )"
otherClass.delete()
assert list(g.triples((otherClass.identifier, None, None))) == []
def test_infixowl_individual_type():
g = Graph()
b = Individual(OWL.Restriction, g)
b.type = RDFS.Resource
assert len(list(b.type)) == 1
del b.type
assert len(list(b.type)) == 0
def test_infixowl_individual_label():
g = Graph()
b = Individual(OWL.Restriction, g)
b.label = Literal("boo")
assert len(list(b.label)) == 3
del b.label
assert hasattr(b, "label") is False
def test_infixowl_class_hash():
b = Class(OWL.Restriction)
c = Class(OWL.Restriction)
assert len(set([b, c])) == 1
def test_infixowl_class_and():
# Construct an anonymous class description consisting of the
# intersection of this class and 'other' and return it
exNs = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", exNs, override=False)
namespace_manager.bind("owl", OWL, override=False)
g = Graph()
g.namespace_manager = namespace_manager
# Chaining 3 intersections
female = Class(exNs.Female, graph=g)
human = Class(exNs.Human, graph=g)
youngPerson = Class(exNs.YoungPerson, graph=g)
youngWoman = female & human & youngPerson
assert str(youngWoman) == "ex:YoungPerson THAT ( ex:Female AND ex:Human )"
assert isinstance(youngWoman, BooleanClass) is True
assert isinstance(youngWoman.identifier, BNode) is True
def test_infix_owl_class_getparents():
# computed attributes that returns a generator over taxonomic 'parents'
# by disjunction, conjunction, and subsumption
exNs = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", exNs, override=False)
namespace_manager.bind("owl", OWL, override=False)
g = Graph()
g.namespace_manager = namespace_manager
Individual.factoryGraph = g
brother = Class(exNs.Brother)
sister = Class(exNs.Sister)
sibling = brother | sister
sibling.identifier = exNs.Sibling
assert len(sibling) == 2
assert str(sibling) == "( ex:Brother OR ex:Sister )"
assert (
str(first(brother.parents))
== "Class: ex:Sibling EquivalentTo: ( ex:Brother OR ex:Sister )"
)
parent = Class(exNs.Parent)
male = Class(exNs.Male)
father = parent & male
father.identifier = exNs.Father
assert len(list(father.parents)) == 2
assert str(list(father.parents)) == "[Class: ex:Parent , Class: ex:Male ]"
def test_infixowl_enumeratedclass():
exNs = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", exNs, override=False)
namespace_manager.bind("owl", OWL, override=False)
g = Graph()
g.namespace_manager = namespace_manager
Individual.factoryGraph = g
ogbujiBros = EnumeratedClass(
exNs.ogbujicBros, members=[exNs.chime, exNs.uche, exNs.ejike]
)
assert str(ogbujiBros) == "{ ex:chime ex:uche ex:ejike }"
col = Collection(
g, first(g.objects(predicate=OWL.oneOf, subject=ogbujiBros.identifier))
)
assert (
str(sorted([g.qname(item) for item in col]))
== "['ex:chime', 'ex:ejike', 'ex:uche']"
)
# logger.debug(g.serialize(format="pretty-xml"))
assert str(g.serialize(format="n3")) == str(
"@prefix ex: <http://example.com/> .\n"
"@prefix owl: <http://www.w3.org/2002/07/owl#> .\n"
"@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n"
"\n"
"ex:ogbujicBros a owl:Class ;\n"
" owl:oneOf ( ex:chime ex:uche ex:ejike ) .\n"
"\nex:chime a owl:Class .\n"
"\nex:ejike a owl:Class .\n"
"\nex:uche a owl:Class .\n"
"\n"
)
def test_infixowl_booleanclassextenthelper():
testGraph = Graph()
Individual.factoryGraph = testGraph
EX = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", EX, override=False)
testGraph.namespace_manager = namespace_manager
fire = Class(EX.Fire)
water = Class(EX.Water)
testClass = BooleanClass(members=[fire, water])
assert str(testClass) == "( ex:Fire AND ex:Water )"
testClass2 = BooleanClass(operator=OWL.unionOf, members=[fire, water])
assert str(testClass2) == "( ex:Fire OR ex:Water )"
def test_infixowl_changeoperator():
# Converts a unionOf / intersectionOf class expression into one
# that instead uses the given operator
testGraph = Graph()
Individual.factoryGraph = testGraph
EX = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", EX, override=False)
testGraph.namespace_manager = namespace_manager
fire = Class(EX.Fire)
water = Class(EX.Water)
testClass = BooleanClass(members=[fire, water])
assert str(testClass) == "( ex:Fire AND ex:Water )"
testClass.changeOperator(OWL.unionOf)
assert repr(testClass) == "( ex:Fire OR ex:Water )"
try:
testClass.changeOperator(OWL.unionOf)
except Exception as e:
assert str(e) == "The new operator is already being used!"
def test_infixowl_serialization():
g1 = Graph()
g2 = Graph()
EX = Namespace("http://example.com/")
namespace_manager = NamespaceManager(g1)
namespace_manager.bind("ex", EX, override=False)
namespace_manager = NamespaceManager(g2)
namespace_manager.bind("ex", EX, override=False)
# Individual.factoryGraph = g1
prop = Property(EX.someProp, baseType=OWL.DatatypeProperty)
restr1 = prop << some >> (Class(EX.Foo))
assert str(restr1) == "( ex:someProp SOME ex:Foo )"
assert (
str(list(Property(EX.someProp, baseType=None).type))
== "[rdflib.term.URIRef('http://www.w3.org/2002/07/owl#DatatypeProperty')]"
)
def test_cardinality_zero():
graph = Graph()
EX = Namespace("http://example.com/")
namespace_manager = NamespaceManager(graph)
namespace_manager.bind("ex", EX, override=False)
prop = Property(EX.someProp, baseType=OWL.DatatypeProperty)
Restriction(
prop, graph=graph, cardinality=Literal(0, datatype=XSD.nonNegativeInteger)
)
def test_lshift_rlshift_delimiters():
EX = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", EX, override=False)
namespace_manager.bind("owl", OWL, override=False)
g = Graph()
g.namespace_manager = namespace_manager
Individual.factoryGraph = g
classB = Class(EX.B)
classC = Class(EX.C)
classD = Class(EX.D)
classE = Class(EX.E)
classF = Class(EX.F)
anonClass = EX.someProp << some >> classD
classF += anonClass
assert str(list(anonClass.subClassOf)) == "[Class: ex:F ]"
classA = classE | classF | anonClass
classB += classA
classA.equivalentClass = [Class()]
classB.subClassOf = [EX.someProp << some >> classC]
assert str(classA) == "( ex:E OR ex:F OR ( ex:someProp SOME ex:D ) )"
def test_matmul_rmatmul_delimiters():
EX = Namespace("http://example.com/")
namespace_manager = NamespaceManager(Graph())
namespace_manager.bind("ex", EX, override=False)
namespace_manager.bind("owl", OWL, override=False)
g = Graph()
g.namespace_manager = namespace_manager
Individual.factoryGraph = g
classB = Class(EX.B)
classC = Class(EX.C)
classD = Class(EX.D)
classE = Class(EX.E)
classF = Class(EX.F)
anonClass = EX.someProp @ some @ classD
classF += anonClass
assert str(list(anonClass.subClassOf)) == "[Class: ex:F ]"
classA = classE | classF | anonClass
classB += classA
classA.equivalentClass = [Class()]
classB.subClassOf = [EX.someProp @ some @ classC]
assert str(classA) == "( ex:E OR ex:F OR ( ex:someProp SOME ex:D ) )"
| |
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to be driven by the Qt mainloop.
In order to use this support, simply do the following::
| app = QApplication(sys.argv) # your code to init Qt
| import qt4reactor
| qt4reactor.install()
alternatively:
| from twisted.application import reactors
| reactors.installReactor('qt4')
Then use twisted.internet APIs as usual. The other methods here are not
intended to be called directly.
If you don't instantiate a QApplication or QCoreApplication prior to
installing the reactor, a QCoreApplication will be constructed
by the reactor. QCoreApplication does not require a GUI so trial testing
can occur normally.
Twisted can be initialized after QApplication.exec_() with a call to
reactor.runReturn(). calling reactor.stop() will unhook twisted but
leave your Qt application running
API Stability: stable
Maintainer: U{Glenn H Tarbox, PhD<mailto:glenn@tarbox.org>}
Previous maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
Original port to QT4: U{Gabe Rudy<mailto:rudy@goldenhelix.com>}
Subsequent port by therve
"""
__all__ = ['install']
import sys
import time
from zope.interface import implements
from PyQt4.QtCore import QSocketNotifier, QObject, SIGNAL, QTimer, QCoreApplication
from PyQt4.QtCore import QEventLoop
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import log
from twisted.internet.posixbase import PosixReactorBase
class TwistedSocketNotifier(QSocketNotifier):
"""
Connection between an fd event and reader/writer callbacks.
"""
def __init__(self, reactor, watcher, type):
QSocketNotifier.__init__(self, watcher.fileno(), type)
self.reactor = reactor
self.watcher = watcher
self.fn = None
if type == QSocketNotifier.Read:
self.fn = self.read
elif type == QSocketNotifier.Write:
self.fn = self.write
QObject.connect(self, SIGNAL("activated(int)"), self.fn)
def shutdown(self):
QObject.disconnect(self, SIGNAL("activated(int)"), self.fn)
self.setEnabled(False)
self.fn = self.watcher = None
self.deleteLater()
def read(self, sock):
w = self.watcher
#self.setEnabled(False) # ??? do I need this?
def _read():
why = None
try:
why = w.doRead()
except:
log.err()
why = sys.exc_info()[1]
if why:
self.reactor._disconnectSelectable(w, why, True)
elif self.watcher:
pass
#self.setEnabled(True)
log.callWithLogger(w, _read)
self.reactor.reactorInvocation()
def write(self, sock):
w = self.watcher
self.setEnabled(False)
def _write():
why = None
try:
why = w.doWrite()
except:
log.err()
why = sys.exc_info()[1]
if why:
self.reactor._disconnectSelectable(w, why, False)
elif self.watcher:
self.setEnabled(True)
log.callWithLogger(w, _write)
self.reactor.reactorInvocation()
class fakeApplication(QEventLoop):
def __init__(self):
QEventLoop.__init__(self)
def exec_(self):
QEventLoop.exec_(self)
class QTReactor(PosixReactorBase):
"""
Qt based reactor.
"""
implements(IReactorFDSet)
_timer = None
def __init__(self):
self._reads = {}
self._writes = {}
self._timer = QTimer()
self._timer.setSingleShot(True)
if QCoreApplication.startingUp():
self.qApp = QCoreApplication([])
self._ownApp = True
else:
self.qApp = QCoreApplication.instance()
self._ownApp = False
self._blockApp = None
self._readWriteQ = []
""" some debugging instrumentation """
self._doSomethingCount = 0
PosixReactorBase.__init__(self)
def addReader(self, reader):
if not reader in self._reads:
self._reads[reader] = TwistedSocketNotifier(self, reader,
QSocketNotifier.Read)
def addWriter(self, writer):
if not writer in self._writes:
self._writes[writer] = TwistedSocketNotifier(self, writer,
QSocketNotifier.Write)
def removeReader(self, reader):
if reader in self._reads:
#self._reads[reader].shutdown()
#del self._reads[reader]
self._reads.pop(reader).shutdown()
def removeWriter(self, writer):
if writer in self._writes:
self._writes[writer].shutdown()
#del self._writes[writer]
self._writes.pop(writer)
def removeAll(self):
return self._removeAll(self._reads, self._writes)
def getReaders(self):
return self._reads.keys()
def getWriters(self):
return self._writes.keys()
def callLater(self, howlong, *args, **kargs):
rval = super(QTReactor, self).callLater(howlong, *args, **kargs)
self.reactorInvocation()
return rval
def crash(self):
super(QTReactor, self).crash()
def iterate(self, delay=0.0):
t = self.running # not sure I entirely get the state of running
self.running = True
self._timer.stop() # in case its not (rare?)
try:
if delay == 0.0:
self.reactorInvokePrivate()
self._timer.stop() # supports multiple invocations
else:
endTime = delay + time.time()
self.reactorInvokePrivate()
while True:
t = endTime - time.time()
if t <= 0.0:
return
self.qApp.processEvents(QEventLoop.AllEvents |
QEventLoop.WaitForMoreEvents, t * 1010)
finally:
self.running = t
def addReadWrite(self, t):
self._readWriteQ.append(t)
def runReturn(self, installSignalHandlers=True):
QObject.connect(self._timer, SIGNAL("timeout()"),
self.reactorInvokePrivate)
self.startRunning(installSignalHandlers=installSignalHandlers)
self._timer.start(0)
def run(self, installSignalHandlers=True):
try:
if self._ownApp:
self._blockApp = self.qApp
else:
self._blockApp = fakeApplication()
self.runReturn(installSignalHandlers)
self._blockApp.exec_()
finally:
self._timer.stop() # should already be stopped
def reactorInvocation(self):
self._timer.setInterval(0)
def reactorInvokePrivate(self):
if not self.running:
self._blockApp.quit()
self._doSomethingCount += 1
self.runUntilCurrent()
t = self.timeout()
if t is None:
t = 0.1
else:
t = min(t, 0.1)
self._timer.setInterval(t * 1010)
self.qApp.processEvents() # could change interval
self._timer.start()
def doIteration(self):
assert False, "doiteration is invalid call"
def install():
"""
Configure the twisted mainloop to be run inside the qt mainloop.
"""
from twisted.internet import main
reactor = QTReactor()
main.installReactor(reactor)
| |
import sublime, sublime_plugin
from simplenote import Simplenote
import functools
import time
import copy
from collections import deque
from os import path, makedirs, remove, listdir
from datetime import datetime
from threading import Semaphore, Lock
from operations import NoteCreator, MultipleNoteContentDownloader, GetNotesDelta, NoteDeleter, NoteUpdater
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def sort_notes(a_note, b_note):
if 'pinned' in a_note['systemtags']:
return 1
elif 'pinned' in b_note['systemtags']:
return -1
else:
date_a = datetime.fromtimestamp(float(a_note['modifydate']))
date_b = datetime.fromtimestamp(float(b_note['modifydate']))
return cmp(date_a, date_b)
def show_message(message):
if not message:
message = ''
for window in sublime.windows():
for currentView in window.views():
currentView.set_status('QuickSimplenote', message)
def remove_status():
show_message(None)
def write_note_to_path(note, filepath):
f = open(filepath, 'wb')
try:
content = note['content']
f.write(content)
except KeyError:
pass
f.close()
def open_note(note, window=None):
if not window:
window = sublime.active_window()
filepath = get_path_for_note(note)
write_note_to_path(note, filepath)
return window.open_file(filepath)
def get_filename_for_note(note):
# Take out invalid characters from title and use that as base for the name
import string
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
note_name = get_note_name(note)
base = ''.join(c for c in note_name if c in valid_chars)
# Determine extension based on title
extension_map = settings.get('title_extension_map')
extension = ''
if extension_map:
for item in extension_map:
import re
pattern = re.compile(item['title_regex'], re.UNICODE)
if re.search(pattern, note_name):
extension = '.' + item['extension']
break
return base + ' (' + note['key'] + ')' + extension
def get_path_for_note(note):
return path.join(temp_path, get_filename_for_note(note))
def get_note_from_path(view_filepath):
note = None
if view_filepath:
if path.dirname(view_filepath) == temp_path:
note_filename = path.split(view_filepath)[1]
note = [note for note in notes if get_filename_for_note(note) == note_filename]
if not note:
import re
pattern = re.compile(ur'\((.*?)\)')
results = re.findall(pattern, note_filename)
if results:
noteKey = results[ len(results) - 1]
note = [note for note in notes if note['key'] == noteKey]
if note:
note = note[0]
return note
def get_note_name(note):
try:
content = note['content']
except Exception, e:
return 'untitled'
index = content.find('\n');
if index > -1:
title = content[:index]
else:
if content:
title = content
else:
title = 'untitled'
title = title.decode('utf-8')
return title
def handle_open_filename_change(old_file_path, updated_note):
new_file_path = get_path_for_note(updated_note)
old_note_view = None
new_view = None
# If name changed
if old_file_path != new_file_path:
# Save the current active view because we might lose the focus
old_active_view = sublime.active_window().active_view()
# Search for the view of the open note
for view_list in [window.views() for window in sublime.windows()]:
for view in view_list:
if view.file_name() == old_file_path:
old_note_view = view
break
# If found
if old_note_view:
# Open the note in a new view
new_view = open_note(updated_note, old_note_view.window())
# Close the old dirty note
old_note_view_id = old_note_view.id()
old_active_view_id = old_active_view.id()
if old_note_view.window():
old_note_window_id = old_note_view.window().id()
else:
old_note_window_id = sublime.active_window() # Sometimes this happens on Sublime 2...
close_view(old_note_view)
# Focus on the new view or on the previous one depending
# on where we were
if old_note_view_id == old_active_view_id:
old_note_window = [window for window in sublime.windows() if window.id() == old_note_window_id]
if old_note_window:
old_note_window[0].focus_view(new_view)
else:
sublime.active_window().focus_view(old_active_view)
try:
remove(old_file_path)
except OSError as e:
pass
return True
return False
def close_view(view):
view.set_scratch(True)
view_window = view.window()
if not view_window:
view_window = sublime.active_window()
view_window.focus_view(view)
view_window.run_command("close_file")
def synch_note_resume(existing_note_entry, updated_note_resume):
for key in updated_note_resume:
existing_note_entry[key] = updated_note_resume[key]
def update_note(existing_note, updated_note):
synch_note_resume(existing_note, updated_note)
existing_note['local_modifydate'] = time.time()
existing_note['needs_update'] = False
existing_note['filename'] = get_filename_for_note(existing_note)
def load_notes():
notes = []
try:
with open(path.join(package_path, 'note_cache'),'rb') as cache_file:
notes = pickle.load(cache_file)
except (EOFError, IOError) as e:
pass
return notes
def save_notes(notes):
with open(path.join(package_path, 'note_cache'),'w+b') as cache_file:
pickle.dump(notes, cache_file)
class OperationManager:
_instance = None
_lock = Lock()
@classmethod
def instance(cls):
if not cls._instance:
with cls._lock:
if not cls._instance:
cls._instance = OperationManager()
return cls._instance
def __init__(self):
self.operations = deque([])
self.running = False
self.current_operation = None
def is_running(self):
return self.running
def add_operation(self, operation):
self.operations.append(operation)
if (not self.running):
self.run()
def check_operations(self):
if self.current_operation == None:
return
# If it's still running, update the status
if self.current_operation.is_alive():
text = self.current_operation.get_update_run_text()
else:
# If not running, show finished text
# call callback with result and do the
# next operation
text = self.current_operation.get_run_finished_text()
self.current_operation.join()
if len( self.operations ) > 0:
self.start_next_operation()
else:
self.running = False
sublime.set_timeout(remove_status, 1000)
show_message(text)
if self.running:
sublime.set_timeout(self.check_operations, 1000)
def run(self):
self.start_next_operation()
sublime.set_timeout(self.check_operations, 1000)
self.running = True
def start_next_operation(self):
self.current_operation = self.operations.popleft()
self.current_operation.start()
class HandleNoteViewCommand(sublime_plugin.EventListener):
waiting_to_save = []
def on_modified(self, view):
def flush_saves():
if OperationManager.instance().is_running():
sublime.set_timeout(flush_saves, 1000)
return
for entry in HandleNoteViewCommand.waiting_to_save:
if entry['note_key'] == note['key']:
with entry['lock']:
entry['count'] = entry['count'] - 1
if entry['count'] == 0:
view.run_command("save")
break
view_filepath = view.file_name()
note = get_note_from_path(view_filepath)
if note:
debounce_time = settings.get('autosave_debounce_time')
if not debounce_time:
return
debounce_time = debounce_time * 1000
found = False
for entry in HandleNoteViewCommand.waiting_to_save:
if entry['note_key'] == note['key']:
with entry['lock']:
entry['count'] = entry['count'] + 1
found = True
break
if not found:
new_entry = {}
new_entry['note_key'] = note['key']
new_entry['lock'] = Lock()
new_entry['count'] = 1
HandleNoteViewCommand.waiting_to_save.append(new_entry)
sublime.set_timeout(flush_saves, debounce_time)
def on_load(self, view):
view_filepath = view.file_name()
note = get_note_from_path(view_filepath)
syntax = settings.get('note_syntax')
if note and syntax:
view.set_syntax_file(syntax)
def get_current_content(self, view):
note_file_content = ""
try:
with open(view.file_name(),'r') as content_file:
note_file_content = content_file.read()
except (EOFError, IOError) as e:
pass
return note_file_content
def handle_note_changed(self, modified_note_resume, content, old_file_path, open_view):
global notes
# We get all the resume data back. We have to merge it
# with our data (extended fields and content)
for note in notes:
if note['key'] == modified_note_resume['key']:
# Set content to the updated one
# or to the view's content if we don't have any update
updated_from_server = False
if not 'content' in modified_note_resume:
modified_note_resume['content'] = content
else:
updated_from_server = True
update_note(note, modified_note_resume) # Update all fields
name_changed = handle_open_filename_change(old_file_path, note)
# If we didn't reopen the view with the name changed, but the content has changed
# we have to update the view anyway
if updated_from_server and not name_changed:
filepath = get_path_for_note(note)
write_note_to_path(note, filepath)
sublime.set_timeout(functools.partial(open_view.run_command, 'revert'), 0)
break
notes.sort(key=cmp_to_key(sort_notes), reverse=True)
save_notes(notes)
def on_post_save(self, view):
view_filepath = view.file_name()
note = get_note_from_path(view_filepath)
if note:
# Update with new content
updated_note = copy.deepcopy(note)
# Handle when the note changes elsewhere and the user goes to that tab:
# sublime reloads the view, it's handled as changed and sent here
if 'content' in updated_note and updated_note['content'] == self.get_current_content(view):
return
updated_note['content'] = self.get_current_content(view)
# Send update
update_op = NoteUpdater(note=updated_note, simplenote_instance=simplenote_instance)
update_op.set_callback(self.handle_note_changed,
{'content': updated_note['content'],
'old_file_path': view_filepath,
'open_view': view})
OperationManager.instance().add_operation(update_op)
class ShowQuickSimplenoteNotesCommand(sublime_plugin.ApplicationCommand):
def handle_selected(self, selected_index):
if not selected_index > -1:
return
selected_note = notes[selected_index]
open_note(selected_note)
def run(self):
if not started:
if not start():
return
i = 0
keys = []
for note in notes:
i += 1
title = get_note_name(note)
keys.append(title)
sublime.active_window().show_quick_panel(keys, self.handle_selected)
import pickle
class StartQuickSimplenoteSyncCommand(sublime_plugin.ApplicationCommand):
def set_result(self, new_notes):
global notes
notes = new_notes
notes.sort(key=cmp_to_key(sort_notes), reverse=True)
def merge_delta(self, updated_note_resume, existing_notes):
# Here we create the note_resume we use on the rest of the app.
# The note_resume we store consists of:
# The note resume as it comes from the simplenote api.
# The title, filename and last modified date of the local cache entry
# Look at the new resume and find existing entries
for current_updated_note_resume in updated_note_resume:
existing_note_entry = None
for existing_note in existing_notes:
if existing_note['key'] == current_updated_note_resume['key']:
existing_note_entry = existing_note
break
# If we have it already
if existing_note_entry:
# Mark for update if needed
try:
# Note with old content
if existing_note_entry['local_modifydate'] < float(current_updated_note_resume['modifydate']):
synch_note_resume(existing_note_entry, current_updated_note_resume)
existing_note_entry['needs_update'] = True
else:
# Up to date note
existing_note_entry['needs_update'] = False
except KeyError as e:
# Note that never got the content downloaded:
existing_note_entry['needs_update'] = True
# New note
else:
new_note_entry = {'needs_update': True}
synch_note_resume(new_note_entry, current_updated_note_resume)
existing_notes.append(new_note_entry)
# Look at the existing notes to find deletions
updated_note_resume_keys = [note['key'] for note in updated_note_resume]
deleted_notes = [deleted_note for deleted_note in existing_notes if deleted_note['key'] not in updated_note_resume_keys]
for deleted_note in deleted_notes:
existing_notes.remove(deleted_note)
save_notes(existing_notes)
self.notes_synch(existing_notes)
def notes_synch(self, notes):
# Here we synch updated notes in order of priority.
# Open notes:
# Locally unsaved
# Locally saved
# Other notes in order of modifydate and priority
open_files_dirty = []
open_files_ok = []
for view_list in [window.views() for window in sublime.windows()]:
for view in view_list:
if view.file_name() == None:
continue
if view.is_dirty():
open_files_dirty.append(path.split(view.file_name())[1])
else:
open_files_ok.append(path.split(view.file_name())[1])
# Classify notes
lu = []
ls = []
others = []
for note in notes:
if not note['needs_update']:
continue
try:
filename = note['filename']
except KeyError as e:
others.append(note)
continue
if filename in open_files_dirty:
lu.append(note)
elif filename in open_files_ok:
ls.append(note)
else:
others.append(note)
# Sorted by priority/importance
lu.sort(key=cmp_to_key(sort_notes), reverse=True)
ls.sort(key=cmp_to_key(sort_notes), reverse=True)
others.sort(key=cmp_to_key(sort_notes), reverse=True)
# Start updates
sem = Semaphore(3)
show_message('QuickSimplenote: Downloading content')
if lu:
down_op = MultipleNoteContentDownloader(sem, simplenote_instance=simplenote_instance, notes=lu)
down_op.set_callback(self.merge_open, {'existing_notes':notes, 'dirty':True})
OperationManager.instance().add_operation(down_op)
if ls:
down_op = MultipleNoteContentDownloader(sem, simplenote_instance=simplenote_instance, notes=ls)
down_op.set_callback(self.merge_open, {'existing_notes':notes})
OperationManager.instance().add_operation(down_op)
if others:
down_op = MultipleNoteContentDownloader(sem, simplenote_instance=simplenote_instance, notes=others)
down_op.set_callback(self.merge_notes, {'existing_notes':notes})
OperationManager.instance().add_operation(down_op)
def merge_open(self, updated_notes, existing_notes, dirty=False):
global settings
auto_overwrite_on_conflict = settings.get('on_conflict_use_server')
do_nothing_on_conflict = settings.get('on_conflict_leave_alone')
update = False
# If it's not a conflict or it's a conflict we can resolve
if ( not dirty ) or ( dirty and not do_nothing_on_conflict ):
# If we don't have an overwrite policy, ask the user
if ( not auto_overwrite_on_conflict ) and dirty and len( updated_notes ) > 0:
note_names = '\n'.join([get_note_name(updated_note) for updated_note in updated_notes])
update = sublime.ok_cancel_dialog('Note(s):\n%s\nAre in conflict. Overwrite?' % note_names, 'Overwrite')
if ( not dirty ) or update or auto_overwrite_on_conflict:
# Update notes if the change is clean, or we were asked to update
for note in existing_notes:
for updated_note in updated_notes:
# If we find the updated note
if note['key'] == updated_note['key']:
old_file_path = get_path_for_note(note)
new_file_path = get_path_for_note(updated_note)
# Update contents
write_note_to_path(updated_note, new_file_path)
# Handle filename change (note has the old filename value)
handle_open_filename_change(old_file_path, updated_note)
# Reload view of the note if it's selected
for view in [window.active_view() for window in sublime.windows()]:
if view.file_name() == new_file_path:
sublime.set_timeout(functools.partial(view.run_command, 'revert'), 0)
break
# Merge
self.merge_notes(updated_notes, existing_notes)
def merge_notes(self, updated_notes, existing_notes):
# Merge
for note in existing_notes:
if not note['needs_update']:
continue
for updated_note in updated_notes:
if note['key'] == updated_note['key']:
update_note(note, updated_note)
save_notes(existing_notes)
self.set_result(existing_notes)
def run(self):
show_message('QuickSimplenote: Synching')
get_delta_op = GetNotesDelta(simplenote_instance=simplenote_instance)
get_delta_op.set_callback(self.merge_delta, {'existing_notes':notes})
OperationManager.instance().add_operation(get_delta_op)
class CreateQuickSimplenoteNoteCommand(sublime_plugin.ApplicationCommand):
def handle_new_note(self, result):
if result:
global notes
update_note(result, result)
notes.append(result)
notes.sort(key=cmp_to_key(sort_notes), reverse=True)
save_notes(notes)
open_note(result)
def run(self):
creation_op = NoteCreator(simplenote_instance=simplenote_instance)
creation_op.set_callback(self.handle_new_note)
OperationManager.instance().add_operation(creation_op)
class DeleteQuickSimplenoteNoteCommand(sublime_plugin.ApplicationCommand):
def handle_deletion(self, result):
global notes
notes.remove(self.note)
save_notes(notes)
try:
remove(get_path_for_note(self.note))
except OSError as e:
pass
close_view(self.note_view)
def run(self):
self.note_view = sublime.active_window().active_view()
self.note = get_note_from_path(self.note_view.file_name())
if self.note:
deletion_op = NoteDeleter(note=self.note, simplenote_instance=simplenote_instance)
deletion_op.set_callback(self.handle_deletion)
OperationManager.instance().add_operation(deletion_op)
def sync():
if not OperationManager.instance().is_running():
print('QuickSimplenote: Syncing: %s' % time.time())
sublime.run_command('start_quick_simplenote_sync');
else:
print('QuickSimplenote: Sync ommited %s' % time.time())
sync_every = settings.get('sync_every')
if sync_every > 0:
sublime.set_timeout(sync, sync_every * 1000)
def start():
global started, simplenote_instance, settings
username = settings.get('username')
password = settings.get('password')
if (username and password):
simplenote_instance = Simplenote(username, password)
sync()
started = True
else:
filepath = path.join(package_path, 'quick_simplenote.sublime-settings')
sublime.active_window().open_file(filepath)
show_message('QuickSimplenote: Please configure username/password')
sublime.set_timeout(remove_status, 2000)
started = False
return started
def reload_if_needed():
global settings, started, reload_calls
# Sublime calls this twice for some reason :(
reload_calls = reload_calls + 1
if (reload_calls % 2 != 0):
return
if settings.get('autostart'):
sublime.set_timeout(start, 2000) # I know...
print('QuickSimplenote: Autostarting')
reload_calls = -1
simplenote_instance = None
started = False
notes = []
package_path = path.join(sublime.packages_path(), "QuickSimplenote")
temp_path = path.join(package_path, "temp")
notes = load_notes()
note_files = [note['filename'] for note in notes]
if not path.exists(temp_path):
makedirs(temp_path)
for f in listdir(temp_path):
if f not in note_files:
remove(path.join(temp_path, f))
settings = sublime.load_settings('quick_simplenote.sublime-settings')
settings.clear_on_change('username')
settings.clear_on_change('password')
settings.add_on_change('username', reload_if_needed)
settings.add_on_change('password', reload_if_needed)
reload_if_needed()
| |
# Copyright (c) 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume api for Huawei SDSHypervisor systems.
"""
import uuid
from oslo.utils import units
from oslo_config import cfg
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.huaweistorhyper import utils as storhyper_utils
from cinder.volume.drivers.huaweistorhyper import vbs_client
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
QOS_KEY = ["Qos-high", "Qos-normal", "Qos-low"]
LINKED_CLONE_TYPE = 'linked'
FULL_CLONE_TYPE = 'full'
CHECK_VOLUME_DATA_FINISHED_INTERVAL = 10
CHECK_VOLUME_DELETE_FINISHED_INTERVAL = 2
CHECK_SNAPSHOT_DELETE_FINISHED_INTERVAL = 2
huawei_storhyper_opts = [
cfg.StrOpt('cinder_huawei_sds_conf_file',
default='/etc/cinder/cinder_huawei_storac_conf.xml',
help='huawei storagehyper driver config file path'),
]
CONF.register_opts(huawei_storhyper_opts)
class StorACDriver(driver.VolumeDriver):
VERSION = '1.0.0'
del_complete_code = '-900079'
def __init__(self, *args, **kwargs):
super(StorACDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(huawei_storhyper_opts)
self._conf_file = self.configuration.cinder_huawei_sds_conf_file
LOG.debug('Conf_file is: ' + self._conf_file)
self._vbs_client = vbs_client.VbsClient(self._conf_file)
self._volume_stats = self._get_default_volume_stats()
def check_for_setup_error(self):
pass
def initialize_connection(self, volume, connector):
LOG.debug('Initialize connection.')
properties = {}
properties['volume_id'] = volume['name']
return {'driver_volume_type': 'HUAWEISDSHYPERVISOR',
'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate the map."""
pass
def create_volume(self, volume):
"""Create a new volume."""
volume_name = volume['name']
LOG.debug('Create volume, volume name: %s.' % volume_name)
volume_size = self._size_translate(volume['size'])
volume_info = self._create_storage_info('volume_info')
volume_info['vol_name'] = volume_name
volume_info['vol_size'] = volume_size
volume_info['pool_id'] = self._get_volume_pool_id(volume['host'])
self._update_volume_info_from_volume(volume_info, volume)
self._send_request('CREATE_VOLUME_REQ',
volume_info,
'create volume error.')
return {'provider_location': volume['name']}
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
tgt_vol_name = volume['name']
src_snapshot_name = snapshot['name']
LOG.debug('Create volume from snapshot: '
'tgt_vol_name: %(tgt_vol_name)s, '
'src_snapshot_name: %(src_snapshot_name)s, '
'vol_size: %(vol_size)s.'
% {'tgt_vol_name': tgt_vol_name,
'src_snapshot_name': src_snapshot_name,
'vol_size': volume['size']})
self._create_linked_volume_from_snap(src_snapshot_name,
tgt_vol_name,
volume['size'])
return {'provider_location': volume['name']}
def create_cloned_volume(self, tgt_volume, src_volume):
"""Create a clone volume."""
src_vol_name = src_volume['name']
tgt_vol_name = tgt_volume['name']
LOG.debug('Create cloned volume: src volume: %(src)s, '
'tgt volume: %(tgt)s.' % {'src': src_vol_name,
'tgt': tgt_vol_name})
src_vol_id = src_volume.get('provider_location')
if not src_vol_id:
err_msg = (_LE('Source volume %(name)s does not exist.')
% {'name': src_vol_name})
LOG.error(err_msg)
raise exception.VolumeNotFound(volume_id=src_vol_name)
volume_info = self._create_target_volume(src_volume,
tgt_vol_name,
tgt_volume)
tgt_vol_id = volume_info['vol_name']
self.copy_volume_data(context.get_admin_context(), src_volume,
tgt_volume, remote=None)
return {'provider_location': tgt_vol_id}
def delete_volume(self, volume):
"""Delete a volume."""
req_paras = {}
req_paras['vol_name'] = volume['name']
self._send_request('DELETE_VOLUME_REQ',
req_paras,
'Delete volume failed.')
self._wait_for_volume_delete(volume['name'])
def extend_volume(self, volume, new_size):
"""Extend the size of an existing volume."""
LOG.debug('Extend volume: %s.' % volume['name'])
volume_name = volume['name']
new_volume_size = self._size_translate(new_size)
volume_info = {"vol_name": volume_name,
"vol_size": new_volume_size}
self._send_request('EXTEND_VOLUME_REQ',
volume_info,
'extend volume failed.')
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
if refresh:
try:
self._get_volume_stats()
except Exception as ex:
self._volume_stats = self._get_default_volume_stats()
msg = (_LE('Error from get volume stats: '
'%s, using default stats.') % ex)
LOG.error(msg)
return self._volume_stats
def create_snapshot(self, snapshot):
create_snapshot_req = {}
create_snapshot_req['snap_name'] = snapshot['name']
create_snapshot_req['vol_name'] = snapshot['volume_name']
create_snapshot_req['smartflag'] = '1'
self._send_request('CREATE_SNAPSHOT_REQ',
create_snapshot_req,
'create snapshot failed.')
return {'provider_location': snapshot['name']}
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
"""Delete SDS snapshot,ensure source volume is attached """
source_volume_id = snapshot['volume_id']
if not source_volume_id:
self._delete_snapshot(snapshot)
return
is_volume_attached = self._is_volume_attached(source_volume_id)
if is_volume_attached:
LOG.debug('Volume is attached')
self._delete_snapshot(snapshot)
else:
LOG.debug('Volume is not attached')
source_volume = {'name': 'volume-' + source_volume_id,
'id': source_volume_id}
properties = utils.brick_get_connector_properties()
source_volume_attach_info = self._attach_volume(
None, source_volume, properties, False)
try:
self._delete_snapshot(snapshot)
except Exception as ex:
err_msg = (_LE('Delete snapshot failed: '
'%s.') % ex)
LOG.error(err_msg)
self._detach_volume(
None, source_volume_attach_info, source_volume,
properties, False, False)
def create_export(self, context, volume):
"""Export the volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def copy_volume_to_image(self, context, volume, image_service, image_meta):
err_msg = ''
temp_snapshot, temp_volume = self._create_temp_snap_and_volume(volume)
try:
self.create_snapshot(temp_snapshot)
self._create_linked_volume_from_snap(temp_snapshot['name'],
temp_volume['name'],
temp_volume['size'])
temp_volume['status'] = volume['status']
super(StorACDriver, self).copy_volume_to_image(context,
temp_volume,
image_service,
image_meta)
except Exception as ex:
err_msg = (_LE('Copy volume to image failed: %s.') % ex)
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
finally:
self._clean_copy_volume_data(temp_volume,
temp_snapshot,
'copy_volume_to_image')
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
err_msg = ''
temp_snapshot, temp_volume = self._create_temp_snap_and_volume(src_vol)
try:
self.create_snapshot(temp_snapshot)
self._create_linked_volume_from_snap(temp_snapshot['name'],
temp_volume['name'],
temp_volume['size'])
temp_volume['status'] = src_vol['status']
super(StorACDriver, self).copy_volume_data(context,
temp_volume,
dest_vol,
remote)
except Exception as ex:
err_msg = (_LE('Copy volume data failed: %s.') % ex)
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
finally:
self._clean_copy_volume_data(temp_volume,
temp_snapshot,
'copy_volume_data')
def _create_temp_snap_and_volume(self, src_vol):
temp_snapshot = {'name': 'snapshot-' + six.text_type(uuid.uuid1()),
'volume_name': src_vol['name'],
'smartflag': '1',
'volume_id': src_vol['id']}
temp_volume_id = six.text_type(uuid.uuid1())
temp_volume = {'id': temp_volume_id,
'name': 'volume-' + temp_volume_id,
'size': src_vol['size']}
return temp_snapshot, temp_volume
def _clean_copy_volume_data(self, temp_volume, temp_snapshot, method):
try:
self.delete_volume(temp_volume)
except Exception as ex:
err_msg = (_LE('Delete temp volume failed '
'after %(method)s: %(ex)s.')
% {'ex': ex, 'method': method})
LOG.error(err_msg)
try:
self.delete_snapshot(temp_snapshot)
except Exception as ex:
err_msg = (_LE('Delete temp snapshot failed '
'after %(method)s: %(ex)s.')
% {'ex': ex, 'method': method})
LOG.error(err_msg)
def _is_volume_attached(self, volume_id):
if not volume_id:
return False
conn = {'driver_volume_type': 'HUAWEISDSHYPERVISOR',
'data': {'volume_id': 'volume-' + volume_id}}
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(protocol,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
conn=conn)
is_volume_attached = connector.is_volume_connected(
conn['data']['volume_id'])
return is_volume_attached
def _create_target_volume(self, src_volume, tgt_vol_name, tgt_volume):
if int(tgt_volume['size']) == 0:
tgt_vol_size = self._size_translate(src_volume['size'])
else:
tgt_vol_size = self._size_translate(tgt_volume['size'])
volume_info = self._create_storage_info('volume_info')
volume_info['vol_name'] = tgt_vol_name
volume_info['vol_size'] = tgt_vol_size
volume_info['pool_id'] = self._get_volume_pool_id(tgt_volume['host'])
self._update_volume_info_from_volume_type(volume_info,
tgt_volume['volume_type_id'])
self._send_request('CREATE_VOLUME_REQ',
volume_info,
'create volume failed.')
return volume_info
def _create_linked_volume_from_snap(self, src_snapshot_name,
tgt_vol_name, volume_size):
vol_size = self._size_translate(volume_size)
req_paras = {'vol_name': tgt_vol_name,
'vol_size': vol_size,
'snap_name_src': src_snapshot_name,
'vol_num': '1'}
self._send_request('CREATE_VOLUME_FROM_SNAPSHOT_REQ',
req_paras,
'Create volume from snapshot failed.')
def _get_volume_stats(self):
"""Retrieve stats info from volume group."""
capacity = self._get_capacity()
self._volume_stats['pools'] = capacity
if len(capacity) == 1:
for key, value in capacity[0].items():
self._volume_stats[key] = value
def _get_all_pool_capacity(self):
pool_info = {}
poolnum = len(self._volume_stats['pools_id'])
pool_info['pool_num'] = six.text_type(poolnum)
pool_info['pool_id'] = self._volume_stats['pools_id']
result = self._send_request('QUERY_POOLS_CAPABILITY_REQ',
pool_info,
'Get storage capacity failed')
return self._extract_pool_capacity_mapping_from_result(result)
def _get_capacity(self):
storage_capacity = []
try:
all_pool_policy = self._extract_pool_policy_mapping_from_config(
self._conf_file)
all_pool_capacity = self._get_all_pool_capacity()
self._update_all_pool_capacity_from_policy(all_pool_capacity,
all_pool_policy)
storage_capacity = all_pool_capacity.values()
except exception.VolumeBackendAPIException as ex:
msg = (_LE('Error from get block storage capacity: '
'%s.') % ex)
LOG.error(msg)
raise exception.VolumeBackendAPIException(msg)
return storage_capacity
def _delete_snapshot(self, snapshot):
req_paras = {}
req_paras['snap_name'] = snapshot['name']
self._send_request('DELETE_SNAPSHOT_REQ',
req_paras,
'Delete snapshot error.')
self._wait_for_snapshot_delete(snapshot['name'])
def _create_default_volume_stats(self):
default_volume_stats = {'tolerance_disk_failure': ['1', '2', '3'],
'tolerance_cache_failure': ['0', '1'],
'free_capacity_gb': 0,
'total_capacity_gb': 0,
'reserved_percentage': 0,
'vendor_name': 'Huawei',
'driver_version': self.VERSION,
'storage_protocol': 'StorageHypervisor',
'pools_id': []}
backend_name = self.configuration.safe_get('volume_backend_name')
default_volume_stats['volume_backend_name'] = (
backend_name or self.__class__.__name__)
return default_volume_stats
def _get_default_volume_stats(self):
default_volume_stats = self._create_default_volume_stats()
self._update_default_volume_stats_from_config(default_volume_stats,
self._conf_file)
return default_volume_stats
def _wait_for_volume_delete(self, volume_name):
"""Wait for volume delete to complete."""
timer = loopingcall.FixedIntervalLoopingCall(
self._check_volume_delete_finished, volume_name)
LOG.debug('Calling _wait_for_volume_delete: volume_name %s.'
% volume_name)
ret = timer.start(
interval=CHECK_VOLUME_DELETE_FINISHED_INTERVAL).wait()
timer.stop()
if not ret:
msg = (_LE('Delete volume failed,volume_name: %s.')
% volume_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(message=msg)
LOG.debug('Finish _wait_for_volume_delete: volume_name %s.'
% volume_name)
def _wait_for_snapshot_delete(self, snapshot_name):
"""Wait for snapshot delete to complete."""
timer = loopingcall.FixedIntervalLoopingCall(
self._check_snapshot_delete_finished, snapshot_name)
LOG.debug('Calling _wait_for_snapshot_delete: snapshot_name %s.'
% snapshot_name)
ret = timer.start(
interval=CHECK_SNAPSHOT_DELETE_FINISHED_INTERVAL).wait()
timer.stop()
if not ret:
msg = (_LE('Delete snapshot failed,snapshot_name: %s.')
% snapshot_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(message=msg)
LOG.debug('Finish _wait_for_snapshot_delete: snapshot_name %s.'
% snapshot_name)
def _check_volume_delete_finished(self, volume_name):
try:
is_volume_exist = self._is_volume_exist(volume_name)
except Exception as ex:
msg = (_LE('Check volume_name delete finished failed: '
'%s.') % ex)
LOG.error(msg)
raise loopingcall.LoopingCallDone(retvalue=False)
if not is_volume_exist:
raise loopingcall.LoopingCallDone(retvalue=True)
def _check_snapshot_delete_finished(self, snapshot_name):
try:
is_snapshot_exist = self._is_snapshot_exist(snapshot_name)
except Exception as ex:
msg = (_LE('Check snapshot delete finished failed: '
'%s.') % ex)
LOG.error(msg)
raise loopingcall.LoopingCallDone(retvalue=False)
if not is_snapshot_exist:
raise loopingcall.LoopingCallDone(retvalue=True)
def _query_volume(self, volume_name):
request_info = {'vol_name': volume_name}
request_type = 'QUERY_VOLUME_REQ'
rsp_str = self._vbs_client.send_message(
storhyper_utils.serialize(request_type,
request_info)
)
LOG.debug('%s received:%s.' % (request_type, repr(rsp_str)))
result = storhyper_utils.deserialize(six.text_type(rsp_str),
delimiter='\n')
storhyper_utils.log_dict(result)
return result
def _is_volume_exist(self, volume_name):
query_volume_result = self._query_volume(volume_name)
if ((not query_volume_result) or
('retcode' not in query_volume_result) or
(query_volume_result['retcode']
not in ('0', self.del_complete_code))):
msg = _('%(err)s\n') % {'err': 'Query volume failed!'
' Invalid result code'}
raise exception.VolumeBackendAPIException(data=msg)
if query_volume_result['retcode'] == self.del_complete_code:
return False
if query_volume_result['retcode'] == '0':
if 'volume0' not in query_volume_result:
msg = _('%(err)s\n') % {'err': 'Query volume failed! '
'Volume0 not exist!'}
raise exception.VolumeBackendAPIException(data=msg)
query_volume_result['volume0'] = \
storhyper_utils.generate_dict_from_result(
query_volume_result['volume0'])
if (('status' not in query_volume_result['volume0']) or
(query_volume_result['volume0']['status'] not in
('1', '2', '10'))):
msg = _('%(err)s\n') % {'err': 'Query volume failed!'
' Invalid volume status'}
raise exception.VolumeBackendAPIException(data=msg)
return True
def _query_snapshot(self, snapshot_name):
request_info = {'snap_name': snapshot_name}
request_type = 'QUERY_SNAPSHOT_REQ'
rsp_str = self._vbs_client.send_message(
storhyper_utils.serialize(request_type,
request_info)
)
LOG.debug('%s received:%s.' % (request_type, repr(rsp_str)))
result = storhyper_utils.deserialize(six.text_type(rsp_str),
delimiter='\n')
storhyper_utils.log_dict(result)
return result
def _is_snapshot_exist(self, snapshot_name):
query_snapshot_result = self._query_snapshot(snapshot_name)
if ((not query_snapshot_result) or
('retcode' not in query_snapshot_result) or
(query_snapshot_result['retcode']
not in ('0', self.del_complete_code))):
msg = _('%(err)s\n') % {'err': 'Query snapshot failed!'}
raise exception.VolumeBackendAPIException(data=msg)
if query_snapshot_result['retcode'] == self.del_complete_code:
return False
if query_snapshot_result['retcode'] == '0':
if 'snapshot0' not in query_snapshot_result:
msg = _('%(err)s\n') % {'err': 'Query snapshot failed!'}
raise exception.VolumeBackendAPIException(data=msg)
query_snapshot_result['snapshot0'] =\
storhyper_utils.generate_dict_from_result(
query_snapshot_result['snapshot0'])
if (('status' not in query_snapshot_result['snapshot0']) or
(query_snapshot_result['snapshot0']['status'] not in
('1', '2'))):
msg = _('%(err)s\n') % {'err': 'Query snapshot failed!'}
raise exception.VolumeBackendAPIException(data=msg)
return True
def _get_volume_pool_id(self, volume_host):
if volume_host:
if len(volume_host.split('#', 1)) == 2:
return volume_host.split('#')[1]
if len(self._volume_stats['pools_id']) == 1:
return self._volume_stats['pools_id'][0]
else:
msg = (_LE("Get pool id failed, invalid pool id."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _send_request(self, request_type, request_info, error_message):
rsp_str = self._vbs_client.send_message(
storhyper_utils.serialize(request_type, request_info))
LOG.debug('%s received:%s.' % (request_type, repr(rsp_str)))
result = storhyper_utils.deserialize(six.text_type(rsp_str),
delimiter='\n')
storhyper_utils.log_dict(result)
if (len(result) < 0 or 'retcode' not in result
or result['retcode'] != '0'):
msg = _('%(err)s\n') % {'err': error_message}
raise exception.VolumeBackendAPIException(data=msg)
return result
def _update_default_volume_stats_from_config(self,
default_volume_stats,
config_file):
root = storhyper_utils.parse_xml_file(config_file)
for child in root.find('policy').findall('*'):
if child.tag == 'QoS_support':
if child.text.strip() == '0':
default_volume_stats[child.tag] = False
else:
default_volume_stats[child.tag] = True
else:
default_volume_stats[child.tag] = child.text.strip()
for child in root.find('capability').findall('*'):
default_volume_stats[child.tag] = child.text.strip()
pools = root.find('pools').findall('*')
for pool in pools:
for child in pool.findall('*'):
childtext = child.text.strip()
if child.tag == 'pool_id' and len(childtext) > 0:
default_volume_stats['pools_id'].append(childtext)
def _update_all_pool_capacity_from_policy(self,
all_pool_capacity,
all_pool_policy):
for pool_name in all_pool_capacity.keys():
if pool_name in all_pool_policy:
for pool_key, pool_value in all_pool_policy[pool_name].items():
all_pool_capacity[pool_name][pool_key] = pool_value
def _extract_pool_policy_mapping_from_config(self, conf_file):
pools_policy_mapping = {}
root = storhyper_utils.parse_xml_file(conf_file)
pools = root.find('pools').findall('*')
for pool in pools:
policy = {}
pool_id = ''
for child in pool.findall('*'):
if child.tag == 'pool_id':
pool_id = child.text.strip()
else:
policy[child.tag] = child.text.strip()
pools_policy_mapping[pool_id] = policy
return pools_policy_mapping
def _extract_pool_capacity_mapping_from_result(self, result):
pool_capacity_mapping = {}
for key, value in result.items():
if 'pool' in key and value:
pool_capacity = {}
pool_name = ''
pool_str = value.replace('[', '').replace(']', '')
paras = pool_str.split(',')
for para in paras:
key = para.split('=')[0]
value = para.split('=')[1]
if key == 'stor_id':
pool_capacity['pool_name'] = six.text_type(value)
pool_name = six.text_type(value)
elif key == 'total_capacity':
pool_capacity['total_capacity_gb'] = int(value)
elif key == 'usable_capacity':
pool_capacity['free_capacity_gb'] = int(value)
elif key == 'raid_level':
pool_capacity['raid_level'] = int(value)
elif key == 'iops':
pool_capacity['iops'] = int(value)
pool_capacity['allocated_capacity_gb'] = \
pool_capacity['total_capacity_gb'] \
- pool_capacity['free_capacity_gb']
pool_capacity['reserved_percentage'] = 0
pool_capacity_mapping[pool_name] = pool_capacity
return pool_capacity_mapping
def _size_translate(self, size):
volume_size = '%s' % (size * units.Ki)
return volume_size
def _update_volume_info_from_volume_extra_specs(self, volume_info,
extra_specs):
if not extra_specs:
return
for x in extra_specs:
key = x['key']
value = x['value']
LOG.debug('Volume type: key=%(key)s value=%(value)s.'
% {'key': key, 'value': value})
if key in volume_info.keys():
words = value.strip().split()
volume_info[key] = words.pop()
def _update_volume_info_from_volume(self, volume_info, volume):
if not volume['volume_type_id']:
return
else:
spec = volume['volume_type']['extra_specs']
self._update_volume_info_from_volume_extra_specs(volume_info,
spec)
self._update_volume_info_from_qos_specs(volume_info,
volume['volume_type'])
def _update_volume_info_from_extra_specs(self,
volume_info,
extra_specs):
if not extra_specs:
return
for key, value in extra_specs.items():
LOG.debug('key=%(key)s value=%(value)s.'
% {'key': key, 'value': value})
if key in volume_info.keys():
words = value.strip().split()
volume_info[key] = words.pop()
def _update_volume_info_from_qos_specs(self,
volume_info,
qos_specs):
if not qos_specs:
return
if qos_specs.get('qos_specs'):
if qos_specs['qos_specs'].get('specs'):
qos_spec = qos_specs['qos_specs'].get('specs')
for key, value in qos_spec.items():
LOG.debug('key=%(key)s value=%(value)s.'
% {'key': key, 'value': value})
if key in QOS_KEY:
volume_info['IOClASSID'] = value.strip()
qos_level = key
if qos_level == 'Qos-high':
volume_info['IOPRIORITY'] = "3"
elif qos_level == 'Qos-normal':
volume_info['IOPRIORITY'] = "2"
elif qos_level == 'Qos-low':
volume_info['IOPRIORITY'] = "1"
else:
volume_info['IOPRIORITY'] = "2"
def _update_volume_info_from_volume_type(self,
volume_info,
volume_type_id):
if not volume_type_id:
return
else:
volume_type = volume_types.get_volume_type(
context.get_admin_context(), volume_type_id)
extra_specs = volume_type.get('extra_specs')
self._update_volume_info_from_extra_specs(volume_info, extra_specs)
qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id)
self._update_volume_info_from_qos_specs(volume_info, qos_specs)
def _create_storage_info(self, info_type):
if info_type == 'volume_info':
volume_info = {'vol_name': '',
'vol_size': '',
'pool_id': '0',
'thin_flag': '0',
'reserved': '0',
'volume_space_reserved': '0',
'force_provision_size': '0',
'iops': '100',
'max_iops': '100',
'min_iops': '0',
'cache_size': '0',
'repicate_num': '1',
'repicate_tolerant_num': '1',
'encrypt_algorithm': '0',
'consistency': '0',
'stor_space_level': '1',
'compress_algorithm': '0',
'deduplication': '0',
'snapshot': '0',
'backup_cycle': '0',
'tolerance_disk_failure': '0',
'tolerance_cache_failure': '1'}
return volume_info
else:
LOG.error(_LE('Invalid info type.'))
return None
| |
# Visual Interfaces Spring 2015 Assignment 3
# Roberto Amorim - rja2139
import cv2, cv
import time, math
import numpy as np
# Array that holds characteristics for the campus buildings
charact = []
image = cv2.imread("ass3-labeled.pgm", cv.CV_LOAD_IMAGE_UNCHANGED)
display = cv2.imread("ass3-campus.pgm", cv.CV_LOAD_IMAGE_UNCHANGED)
contours = cv2.findContours(image.copy(), cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE)
# Here we open the building names table
try:
with open("ass3-table.txt", 'rb') as f:
buildings = f.readlines()
f.close()
except IOError:
print "ERROR: The file containing building names can not be read"
exit()
# We create an array "names" that can associate building names with their index colors
names = []
names.append("None")
for line in buildings:
line = line.rstrip('\r\n')
toks = line.split('=')
names.append(toks[1].replace('"', ''))
# Here we detect which shape is the northernmost
def northernmost(shapes):
most = float("inf")
building = 0
for shape in shapes:
if shape[1][1] < most:
most = shape[1][1]
building = shape[0]
return building
# Here we detect which shape is the southernmost
def southernmost(shapes):
most = float("-inf")
building = 0
for shape in shapes:
if (shape[1][1]+shape[1][3]) > most:
most = shape[1][1]+shape[1][3]
building = shape[0]
return building
# Here we detect which shape is the westernmost
def westernmost(shapes):
most = float("inf")
building = 0
for shape in shapes:
if shape[1][0] < most:
most = shape[1][0]
building = shape[0]
return building
# Here we detect which shape is the easternmost
def easternmost(shapes):
most = float("-inf")
building = 0
for shape in shapes:
if shape[1][0]+shape[1][2] > most:
most = shape[1][0]+shape[1][2]
building = shape[0]
return building
# Here we detect which shape is the squarest
def squarest(shapes):
most = 10.0
building = 0
for shape in shapes:
# We only care about buildings that approach a rectangle
if shape[4]:
ma = float(max(shape[1][2], shape[1][3]))
mi = float(min(shape[1][2], shape[1][3]))
# The squarest building is the one with smallest ratio height/width
if ma / mi < most:
most = ma / mi
building = shape[0]
return building
# Here we detect which shape is the most rectangular
def mostrectangular(shapes):
most = 0.0
building = 0
for shape in shapes:
# We only care about buildings that approach a rectangle
if shape[4]:
ma = float(max(shape[1][2], shape[1][3]))
mi = float(min(shape[1][2], shape[1][3]))
# The most rectangular building is the one with largest ratio height/width
if ma / mi > most:
most = ma / mi
building = shape[0]
return building
# Here we detect which shape is the largest
def largest(shapes):
most = float("-inf")
building = 0
for shape in shapes:
if shape[2] > most:
most = shape[2]
building = shape[0]
return building
# Here we detect which shape is the smallest
def smallest(shapes):
most = float("inf")
building = 0
for shape in shapes:
if shape[2] < most:
most = shape[2]
building = shape[0]
return building
# Here we detect which shape is the longest
def longest(shapes):
most = 0
building = 0
for shape in shapes:
if shape[1][2] > most:
most = shape[1][2]
building = shape[0]
if shape[1][3] > most:
most = shape[1][3]
building = shape[0]
return building
# Here we detect which shape is the thinnest
def thinnest(shapes):
most = float("inf")
building = 0
for shape in shapes:
if shape[1][2] < most:
most = shape[1][2]
building = shape[0]
if shape[1][3] < most:
most = shape[1][3]
building = shape[0]
return building
# Here we decide whether a building is I-shaped or C-shaped
def lettershape(shape):
# Method that returns the amount of sides in the shape polygon
poly = cv2.approxPolyDP(shape, 0.009*cv2.arcLength(shape, True), True)
# Method that calculates the convex hull of the shape
hull = cv2.convexHull(shape, returnPoints=False)
# Method that identifies each convexity defect in the hull
defects = cv2.convexityDefects(shape, hull)
point = 0
try:
for point in range(defects.shape[0]):
pass
point += 1
except:
point = 0
# If the shape has 12 sides and two convexity points, it looks like an I
if point == 2 and len(poly) == 12:
return "I-shaped building"
# If the shape has eight sides and one convexity point, it looks like a C
if point == 1 and len(poly) == 8:
return "C-shaped building"
else:
return None
# Here we decide whether a building has "chewed" corners
def corners(shape):
# Method that returns the amount of sides in the shape polygon
poly = cv2.approxPolyDP(shape, 0.009*cv2.arcLength(shape, True), True)
hull = cv2.convexHull(shape, returnPoints=False)
defects = cv2.convexityDefects(shape, hull)
point = 0
try:
for point in range(defects.shape[0]):
pass
point += 1
except:
point = 0
# If the shape has 12 or 16 sides and 4 convexity points, it has "chewed corners"
if point == 4 and (len(poly) == 12 or len(poly) == 16):
return "'chewed' corners building"
else:
return None
# Figures the geographical position of the building within the campus
def geoposition(c):
# First we divide the map dimensions in thirds
width = len(image)
height = len(image[0])
ythird = width / 3
xthird = height / 3
if c[0] < xthird and c[1] < ythird:
return "building position: northwest"
elif c[0] > xthird and c[0] < xthird*2 and c[1] < ythird:
return "building position: north"
elif c[0] > xthird*2 and c[1] < ythird:
return "building position: northeast"
elif c[0] < xthird and c[1] > ythird and c[1] < ythird*2:
return "building position: west"
elif c[0] > xthird and c[0] < xthird*2 and c[1] > ythird and c[1] < ythird*2:
return "building position: center"
elif c[0] > xthird*2 and c[1] > ythird and c[1] < ythird*2:
return "building position: east"
elif c[0] < xthird and c[1] > ythird*2:
return "building position: southwest"
elif c[0] > xthird and c[0] < xthird*2 and c[1] > ythird*2:
return "building position: south"
elif c[0] > xthird*2 and c[1] < ythird*2:
return "building position: southeast"
else:
return None
# Here we decide whether the shape is oriented north-south or east-west
def orientation(dimen):
l = float(max(dimen[0], dimen[1]))
s = float(min(dimen[0], dimen[1]))
# I reckon that if the reason between the largest dimension and the smallest dimension is smaller than
# 1.25, that shape is too close to a square for the orientation to be visually meaningful
if l / s < 1.25:
return None
else:
if dimen[0] > dimen[1]:
return "oriented east-west (horizontal)"
else:
return "oriented north-south (vertical)"
# Here we cluster areas to decide which shapes are "small", "medium" and "large"
def areaclust(shapes):
# Start by creating an array with areas. To suit kmeans' finnicky rules, we must reshape the array into
# a matrix with a single column and convert the values to float32
areas = []
for shape in shapes:
areas.append(shape[2])
areas = np.array(areas)
areas = areas.reshape((27, 1))
areas = np.float32(areas)
# cluster criteria: either 10 max iteractions or epsilon = 1.0
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# calculate the clusters: 3 of them (small, medium and large), starting with random centers
compactness, labels, centers = cv2.kmeans(areas, 3, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
i = 0
for label in labels:
if label == 0:
charact.append([shapes[i][0], "small building"])
elif label == 1:
charact.append([shapes[i][0], "medium building"])
elif label == 2:
charact.append([shapes[i][0], "large building"])
i += 1
# Here we detect if there is horizontal symmetry
def hsymmetry(roi):
if len(roi) % 2 == 0:
fsthalf = roi[0:len(roi)/2, :]
sndhalf = roi[len(roi)/2:, :]
cv2.flip(sndhalf.copy(), 0, sndhalf)
if fsthalf.__eq__(sndhalf).all():
return "horizontally symmetrical"
else:
fsthalf = roi[0:len(roi)/2, :]
sndhalf = roi[(len(roi)/2)+1:, :]
cv2.flip(sndhalf.copy(), 0, sndhalf)
if fsthalf.__eq__(sndhalf).all():
return "horizontally symmetrical"
return None
# Here we detect if there is vertical symmetry
def vsymmetry(roi):
if len(roi[0]) % 2 == 0:
fsthalf = roi[:, 0:len(roi[0])/2]
sndhalf = roi[:, len(roi[0])/2:]
cv2.flip(sndhalf.copy(), 1, sndhalf)
if fsthalf.__eq__(sndhalf).all():
return "vertically symmetrical"
else:
fsthalf = roi[:, 0:len(roi[0])/2]
sndhalf = roi[:, (len(roi[0])/2)+1:]
cv2.flip(sndhalf.copy(), 1, sndhalf)
if fsthalf.__eq__(sndhalf).all():
return "vertically symmetrical"
return None
# Here we detect whether the shape is "quadrilateral-ish", that is, approaches a rectangle;
# or if it contains substantial negative space in the mbr
def quadrilateral(mbr, shape):
flag = False
area_shape = cv2.contourArea(shape)
# Remove one pixel from each dimension of the bounding rectangle to make it "snug"
area_mbr = (mbr[0]-1) * (mbr[1]-1)
# I decided on values of 1.25 and lower for the areas ratio as a good approximation for a rectangular shape
if (area_mbr / area_shape) <= 1.25:
flag = True
return flag
shapes = []
# Here we analyze each shape individually, obtaining basic information such as area and MBR
for shape in contours[0]:
# Here we get the color (index):
color = image[shape[0][0][1]][shape[0][0][0]]
# Here we obtain the minimum bounding rectangle
x, y, w, h = cv2.boundingRect(shape)
# We use the MBR to extract the shape as a roi
roi = image[y:y+h, x:x+w]
# Sometimes a part of a building gets into the MBR we're working with. This line removes this intrusion:
roi[roi != color] = 0
# Does the shape approach a quadrilateral or nah?
quadr = quadrilateral((w, h), shape)
if quadr:
charact.append([color, orientation((w, h))])
# Is the shape horizontally symmetric? And Vertically?
hsymm = hsymmetry(roi)
if hsymm:
charact.append([color, hsymm])
vsymm = vsymmetry(roi)
if vsymm:
charact.append([color, vsymm])
# Here we find the area of the shape
area = int(cv2.contourArea(shape))
# And here the center of mass
moments = cv2.moments(shape)
if moments['m00'] != 0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
center = (cx, cy)
# Does it look like a letter?
letter = lettershape(shape)
if letter:
charact.append([color, letter])
charact.append([color, "sharp corners building"])
corner = corners(shape)
if corner:
charact.append([color, corner])
position = geoposition(center)
if position:
charact.append([color, position])
# All values are added to an array that will be used elsewhere in the program
shapes.append([color, (x, y, w, h), area, center, quadr, shape])
charact.append([northernmost(shapes), "northernmost building"])
charact.append([southernmost(shapes), "southernmost building"])
charact.append([westernmost(shapes), "westernmost building"])
charact.append([easternmost(shapes), "easternmost building"])
charact.append([squarest(shapes), "squarest building"])
charact.append([mostrectangular(shapes), "most rectangular building"])
charact.append([largest(shapes), "largest building"])
charact.append([smallest(shapes), "smallest building"])
charact.append([longest(shapes), "longest building"])
charact.append([thinnest(shapes), "thinnest building"])
areaclust(shapes)
'''
shapes.reverse()
for shape in shapes:
print names[shape[0]] + " characteristics: "
print "Center of mass: " + str(shape[3])
print "Area: " + str(shape[2])
print "MBR coordinates: " + str(shape[1][0]) + ", " + str(shape[1][1]) + " - " + str(shape[1][0] + shape[1][2]) + ", " + str(shape[1][1] + shape[1][3])
for item in [p for p in charact if p[0] == shape[0]]:
print item[1]
print " "
'''
# Evaluates building spacial relations relations based on angle between each building's center of mass
def spatialrelation(ptS, ptT):
# Calculate the angle between a vertical line passing through the source and a line from source to target
a = np.array([ptS[0]+10, ptS[1]])
b = np.array([ptS[0], ptS[1]])
c = np.array([ptT[0], ptT[1]])
ba = a - b
bc = b - c
s = np.arctan2(*ba)
if s < 0:
s += 2 * np.pi
e = np.arctan2(*bc)
if e < 0:
e += 2 * np.pi
delta = e - s
deg = np.rad2deg(delta)
# I reckon that an angle of 45 to 135 degrees from the first building can be safely considered north, visually
if 45 < deg <= 135:
return "N"
# And so forth for all other cardinal points
if 135 < deg <= 225:
return "W"
if 225 < deg <= 315:
return "S"
if deg <= 45 or deg > 315:
return "E"
else:
return False
# Calculates the minimal distance between two shapes
def shapedistance(shapeS, shapeT, area):
for i in shapeS[::4]:
min = float("inf")
for j in shapeT[::4]:
dx = (i[0][0] - j[0][0])
dy = (i[0][1] - j[0][1])
tmpDist = math.hypot(dx, dy)
if tmpDist < min:
min = tmpDist
if tmpDist == 0:
break # You can't get a closer distance than 0
# I believe a threshold of sqrt(area)*3 is as good approximation as any to what constitutes "near" and "far"
threshold = math.sqrt(area) * 3
if min < threshold:
return True
else:
return False
relationsN = []
relationsS = []
relationsE = []
relationsW = []
relationsD = []
# Here I evaluate spatial relations between buildings
for shape in shapes:
for i in shapes:
# no point comparing a building to itself...
if shape == i:
continue
result = spatialrelation(shape[3], i[3])
if result == "N":
relationsN.append([shape[0], i[0], True])
if result == "S":
relationsS.append([shape[0], i[0], True])
if result == "E":
relationsE.append([shape[0], i[0], True])
if result == "W":
relationsW.append([shape[0], i[0], True])
result = shapedistance(shape[5], i[5], shape[2])
if result:
relationsD.append([shape[0], i[0], result])
def transitivefiltering(relations):
for i in relations:
for j in relations:
for k in relations:
if i[1] == j[0] and i[0] == k[0] and j[1] == k[1] and (i[2] == j[2] == k[2]):
relations.pop(relations.index(k))
return relations
# And now we filter the relations
relationsN = transitivefiltering(relationsN)
relationsS = transitivefiltering(relationsS)
relationsE = transitivefiltering(relationsE)
relationsW = transitivefiltering(relationsW)
relationsD = transitivefiltering(relationsD)
# User interface code
cv2.imshow('campus', display)
frame = np.zeros((495, 700, 3), np.uint8)
frame[:] = (20, 20, 20)
def update(x, y):
line = 16
frame[:] = (20, 20, 20)
font = cv2.FONT_HERSHEY_PLAIN
txtcolor = (255, 255, 255)
index = image[y][x]
if index == 0:
# Print only mouse position if the mouse is hovering over the empty spaces
cv2.putText(frame, 'Mouse pos: ' + str(x) + ", " + str(y), (500, line*30), font, 1, txtcolor, 1, cv2.CV_AA)
else:
# Print the building characteristics when the mouse is over one
cv2.putText(frame, 'Hovering over: ' + names[index], (10, line), font, 1, txtcolor, 1, cv2.CV_AA)
pos = shapes.index([p for p in shapes if p[0] == index][0])
ctr = "X: " + str(shapes[pos][3][0]) + ", Y: " + str(shapes[pos][3][1])
cv2.putText(frame, 'Center of mass: ' + ctr, (10, line*2), font, 1, txtcolor, 1, cv2.CV_AA)
cv2.putText(frame, 'Area: ' + str(shapes[pos][2]), (10, line*3), font, 1, txtcolor, 1, cv2.CV_AA)
mbru = "Upper left X: " + str(shapes[pos][1][0]) + " Y: " + str(shapes[pos][1][1])
mbrl = "Lower right X: " + str(shapes[pos][1][0] + shapes[pos][1][2]) + " Y: " + str(shapes[pos][1][1] + shapes[pos][1][3])
cv2.putText(frame, 'Minimum bounding rectangle: ', (10, line*4), font, 1, txtcolor, 1, cv2.CV_AA)
cv2.putText(frame, mbru, (30, line*5), font, 1, txtcolor, 1, cv2.CV_AA)
cv2.putText(frame, mbrl, (30, line*6), font, 1, txtcolor, 1, cv2.CV_AA)
cv2.putText(frame, "Building characteristics: ", (10, line*9), font, 1, txtcolor, 1, cv2.CV_AA)
i = 10
for item in [p for p in charact if p[0] == index]:
cv2.putText(frame, item[1], (30, line*i), font, 1, txtcolor, 1, cv2.CV_AA)
i += 1
i += 2
cv2.putText(frame, "Building relations: ", (10, line*i), font, 1, txtcolor, 1, cv2.CV_AA)
for item in [p for p in relationsN if p[0] == index]:
i += 1
relation = "located north of " + names[item[1]]
cv2.putText(frame, relation, (30, line*i), font, 1, txtcolor, 1, cv2.CV_AA)
for item in [p for p in relationsS if p[0] == index]:
i += 1
relation = "located south of " + names[item[1]]
cv2.putText(frame, relation, (30, line*i), font, 1, txtcolor, 1, cv2.CV_AA)
for item in [p for p in relationsE if p[0] == index]:
i += 1
relation = "located east of " + names[item[1]]
cv2.putText(frame, relation, (30, line*i), font, 1, txtcolor, 1, cv2.CV_AA)
for item in [p for p in relationsW if p[0] == index]:
i += 1
relation = "located west of " + names[item[1]]
cv2.putText(frame, relation, (30, line*i), font, 1, txtcolor, 1, cv2.CV_AA)
for item in [p for p in relationsD if p[0] == index]:
i += 1
relation = "near " + names[item[1]]
cv2.putText(frame, relation, (30, line*i), font, 1, txtcolor, 1, cv2.CV_AA)
#cv2.putText(frame, 'Mouse pos: ' + str(x) + ", " + str(y), (500, line*30), font, 1, txtcolor, 1, cv2.CV_AA)
cv2.imshow("information", frame)
def pointdistance(point, shape):
min = float("inf")
for j in shape[::4]:
dx = (point[0] - j[0][0])
dy = (point[1] - j[0][1])
tmpDist = math.hypot(dx, dy)
if tmpDist < min:
min = tmpDist
if tmpDist == 0:
break # You can't get a closer distance than 0
# Here the distance threshold is even more arbitrary. I for now am considering it to be "100 pixels"
threshold = 100
if min < threshold:
return True
else:
return False
pixelattrib = []
cloudpixels = []
def computecloud(x, y):
# Here we compute the descriptions for the point we are studying
pointattrib = []
for shape in shapes:
pointattrib.append([spatialrelation((x, y), shape[3]), pointdistance([x, y], shape[5])])
if pointattrib == pixelattrib:
if display[y][x] == 0 or display[y][x] == 255:
cloudpixels.append([x, y])
display[y][x] = 128
if x > 0:
computecloud(x-1, y)
if x < len(image[y]) - 1:
computecloud(x+1, y)
if y > 0:
computecloud(x, y-1)
if y < len(image) - 1:
computecloud(x, y+1)
else:
return
cv2.imshow('campus', display)
cv2.imwrite("campus.png", display)
def cloud(x, y):
global cloudpixels
cloudpixels = []
global pixelattrib
pixelattrib = []
print "X: " + str(x) + ", Y: " + str(y)
for shape in shapes:
result = [spatialrelation((x, y), shape[3]), pointdistance([x, y], shape[5])]
pixelattrib.append(result)
if result[0] == "N":
print "North of " + names[shape[0]]
if result[0] == "S":
print "South of " + names[shape[0]]
if result[0] == "E":
print "East of " + names[shape[0]]
if result[0] == "W":
print "West of " + names[shape[0]]
if result[1] == True:
print "Near " + names[shape[0]]
computecloud(x, y)
print len(cloudpixels)
def onmouse(event, x, y, flags, param):
time.sleep(0.01)
update(x-1, y-1)
if flags & cv2.EVENT_FLAG_LBUTTON:
cloud(x, y)
cv2.setMouseCallback("campus", onmouse)
cv2.imshow("information", frame)
cv2.moveWindow("information", 50, 50)
cv2.moveWindow("campus", 765, 50)
cv2.waitKey(0)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class GammaTest(test.TestCase):
def testGammaShape(self):
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
self.assertEqual(self.evaluate(gamma.batch_shape_tensor()), (5,))
self.assertEqual(gamma.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(gamma.event_shape_tensor()), [])
self.assertEqual(gamma.event_shape, tensor_shape.TensorShape([]))
def testGammaLogPDF(self):
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
pdf = gamma.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testGammaLogPDFBoundary(self):
# When concentration = 1, we have an exponential distribution. Check that at
# 0 we have finite log prob.
rate = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=1., rate=rate)
log_pdf = gamma.log_prob(0.)
self.assertAllClose(np.log(rate), self.evaluate(log_pdf))
def testGammaLogPDFMultidimensional(self):
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
log_pdf = gamma.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.get_shape(), (6, 2))
pdf = gamma.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.get_shape(), (6, 2))
if not stats:
return
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
cdf = gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
if not stats:
return
expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testGammaMean(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.mean().get_shape(), (3,))
if not stats:
return
expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.mean()), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
self.evaluate(gamma.mode())
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaVariance(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.variance().get_shape(), (3,))
if not stats:
return
expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.variance()), expected_variances)
def testGammaStd(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.stddev().get_shape(), (3,))
if not stats:
return
expected_stddev = stats.gamma.std(alpha_v, scale=1. / beta_v)
self.assertAllClose(self.evaluate(gamma.stddev()), expected_stddev)
def testGammaEntropy(self):
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v)
self.assertAllClose(self.evaluate(gamma.entropy()), expected_entropy)
def testGammaSampleSmallAlpha(self):
alpha_v = 0.05
beta_v = 1.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
if not stats:
return
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
def testGammaSample(self):
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
if not stats:
return
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
def testGammaFullyReparameterized(self):
alpha = constant_op.constant(4.0)
beta = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
tape.watch(alpha)
tape.watch(beta)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(100)
grad_alpha, grad_beta = tape.gradient(samples, [alpha, beta])
self.assertIsNotNone(grad_alpha)
self.assertIsNotNone(grad_beta)
def testGammaSampleMultiDimensional(self):
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
n = 10000
samples = gamma.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
if not stats:
return
self.assertAllClose(
sample_values.mean(axis=0),
stats.gamma.mean(alpha_bc, scale=1 / beta_bc),
atol=0.,
rtol=.05)
self.assertAllClose(
sample_values.var(axis=0),
stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=10.0,
rtol=0.)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
if not stats:
return True # If we can't test, return that the test passes.
ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
gamma = gamma_lib.Gamma(concentration=[7., 11.], rate=[[5.], [6.]])
num = 50000
samples = gamma.sample(num, seed=137)
pdfs = gamma.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
if not stats:
return
self.assertAllClose(
stats.gamma.mean([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
with self.assertRaisesOpError("x > 0"):
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
with self.assertRaisesOpError("x > 0"):
gamma = gamma_lib.Gamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
self.evaluate(gamma.mean())
def testGammaWithSoftplusConcentrationRate(self):
alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
beta_v = constant_op.constant([1.0, -3.6], name="beta")
gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
concentration=alpha_v, rate=beta_v)
self.assertAllEqual(
self.evaluate(nn_ops.softplus(alpha_v)),
self.evaluate(gamma.concentration))
self.assertAllEqual(
self.evaluate(nn_ops.softplus(beta_v)), self.evaluate(gamma.rate))
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
g0 = gamma_lib.Gamma(concentration=alpha0, rate=beta0)
g1 = gamma_lib.Gamma(concentration=alpha1, rate=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = self.evaluate([kl_sample, kl_actual])
self.assertEqual(beta0.shape, kl_actual.get_shape())
if not special:
return
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-1)
if __name__ == "__main__":
test.main()
| |
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import timedelta
from django import forms
from django.db.models import Q
from django.db.models.sql.constants import QUERY_TERMS
from django.utils import six
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from .fields import (
RangeField, LookupTypeField, Lookup, DateRangeField, TimeRangeField)
__all__ = [
'Filter', 'CharFilter', 'BooleanFilter', 'ChoiceFilter',
'TypedChoiceFilter', 'MultipleChoiceFilter', 'DateFilter',
'DateTimeFilter', 'TimeFilter', 'ModelChoiceFilter',
'ModelMultipleChoiceFilter', 'NumberFilter', 'NumericRangeFilter', 'RangeFilter',
'DateRangeFilter', 'DateFromToRangeFilter', 'TimeRangeFilter',
'AllValuesFilter', 'MethodFilter'
]
LOOKUP_TYPES = sorted(QUERY_TERMS)
class Filter(object):
creation_counter = 0
field_class = forms.Field
def __init__(self, name=None, label=None, widget=None, action=None,
lookup_type='exact', required=False, distinct=False, exclude=False, **kwargs):
self.name = name
self.label = label
if action:
self.filter = action
self.lookup_type = lookup_type
self.widget = widget
self.required = required
self.extra = kwargs
self.distinct = distinct
self.exclude = exclude
self.creation_counter = Filter.creation_counter
Filter.creation_counter += 1
@property
def field(self):
if not hasattr(self, '_field'):
help_text = self.extra.pop('help_text', None)
if help_text is None:
help_text = _('This is an exclusion filter') if self.exclude else _('Filter')
if (self.lookup_type is None or
isinstance(self.lookup_type, (list, tuple))):
if self.lookup_type is None:
lookup = [(x, x) for x in LOOKUP_TYPES]
else:
lookup = [
(x, x) for x in LOOKUP_TYPES if x in self.lookup_type]
self._field = LookupTypeField(self.field_class(
required=self.required, widget=self.widget, **self.extra),
lookup, required=self.required, label=self.label, help_text=help_text)
else:
self._field = self.field_class(required=self.required,
label=self.label, widget=self.widget,
help_text=help_text, **self.extra)
return self._field
def filter(self, qs, value):
if isinstance(value, Lookup):
lookup = six.text_type(value.lookup_type)
value = value.value
else:
lookup = self.lookup_type
if value in ([], (), {}, None, ''):
return qs
method = qs.exclude if self.exclude else qs.filter
qs = method(**{'%s__%s' % (self.name, lookup): value})
if self.distinct:
qs = qs.distinct()
return qs
class CharFilter(Filter):
field_class = forms.CharField
class BooleanFilter(Filter):
field_class = forms.NullBooleanField
def filter(self, qs, value):
if value is not None:
return qs.filter(**{self.name: value})
return qs
class ChoiceFilter(Filter):
field_class = forms.ChoiceField
class TypedChoiceFilter(Filter):
field_class = forms.TypedChoiceField
class MultipleChoiceFilter(Filter):
"""
This filter preforms OR(by default) or AND(using conjoined=True) query
on the selected options.
Advanced Use
------------
Depending on your application logic, when all or no choices are selected, filtering may be a noop. In this case you may wish to avoid the filtering overhead, particularly of the `distinct` call.
Set `always_filter` to False after instantiation to enable the default `is_noop` test.
Override `is_noop` if you require a different test for your application.
"""
field_class = forms.MultipleChoiceField
always_filter = True
def __init__(self, *args, **kwargs):
conjoined = kwargs.pop('conjoined', False)
self.conjoined = conjoined
super(MultipleChoiceFilter, self).__init__(*args, **kwargs)
def is_noop(self, qs, value):
"""
Return True to short-circuit unnecessary and potentially slow filtering.
"""
if self.always_filter:
return False
# A reasonable default for being a noop...
if self.required and len(value) == len(self.field.choices):
return True
return False
def filter(self, qs, value):
value = value or () # Make sure we have an iterable
if self.is_noop(qs, value):
return qs
# Even though not a noop, no point filtering if empty
if not value:
return qs
if self.conjoined:
for v in value:
qs = qs.filter(**{self.name: v})
return qs
q = Q()
for v in value:
q |= Q(**{self.name: v})
return qs.filter(q).distinct()
class DateFilter(Filter):
field_class = forms.DateField
class DateTimeFilter(Filter):
field_class = forms.DateTimeField
class TimeFilter(Filter):
field_class = forms.TimeField
class ModelChoiceFilter(Filter):
field_class = forms.ModelChoiceField
class ModelMultipleChoiceFilter(MultipleChoiceFilter):
field_class = forms.ModelMultipleChoiceField
class NumberFilter(Filter):
field_class = forms.DecimalField
class NumericRangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
if value.start is not None and value.stop is not None:
lookup = '%s__%s' % (self.name, self.lookup_type)
return qs.filter(**{lookup: (value.start, value.stop)})
else:
if value.start is not None:
qs = qs.filter(**{'%s__startswith' % self.name: value.start})
if value.stop is not None:
qs = qs.filter(**{'%s__endswith' % self.name: value.stop})
return qs
class RangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
if value.start is not None and value.stop is not None:
lookup = '%s__range' % self.name
return qs.filter(**{lookup: (value.start, value.stop)})
else:
if value.start is not None:
qs = qs.filter(**{'%s__gte'%self.name:value.start})
if value.stop is not None:
qs = qs.filter(**{'%s__lte'%self.name:value.stop})
return qs
_truncate = lambda dt: dt.replace(hour=0, minute=0, second=0)
class DateRangeFilter(ChoiceFilter):
options = {
'': (_('Any date'), lambda qs, name: qs.all()),
1: (_('Today'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: now().day
})),
2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=7)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
3: (_('This month'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month
})),
4: (_('This year'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
})),
5: (_('Yesterday'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: (now() - timedelta(days=1)).day,
})),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(DateRangeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](qs, self.name)
class DateFromToRangeFilter(RangeFilter):
field_class = DateRangeField
class TimeRangeFilter(RangeFilter):
field_class = TimeRangeField
class AllValuesFilter(ChoiceFilter):
@property
def field(self):
qs = self.model._default_manager.distinct()
qs = qs.order_by(self.name).values_list(self.name, flat=True)
self.extra['choices'] = [(o, o) for o in qs]
return super(AllValuesFilter, self).field
class MethodFilter(Filter):
"""
This filter will allow you to run a method that exists on the filterset class
"""
def __init__(self, *args, **kwargs):
# Get the action out of the kwargs
action = kwargs.get('action', None)
# If the action is a string store the action and set the action to our own filter method
# so it can be backwards compatible and work as expected, the parent will still treat it as
# a filter that has an action
self.parent_action = ''
text_types = (str, six.text_type)
if type(action) in text_types:
self.parent_action = str(action)
kwargs.update({
'action': self.filter
})
# Call the parent
super(MethodFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
"""
This filter method will act as a proxy for the actual method we want to
call.
It will try to find the method on the parent filterset,
if not it attempts to search for the method `field_{{attribute_name}}`.
Otherwise it defaults to just returning the queryset.
"""
parent = getattr(self, 'parent', None)
parent_filter_method = getattr(parent, self.parent_action, None)
if not parent_filter_method:
func_str = 'filter_{0}'.format(self.name)
parent_filter_method = getattr(parent, func_str, None)
if parent_filter_method is not None:
return parent_filter_method(qs, value)
return qs
| |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wraps ml.exe or ml64.exe and postprocesses the output to be deterministic.
Sets timestamp in .obj file to 0, hence incompatible with link.exe /incremental.
Use by prefixing the ml(64).exe invocation with this script:
python ml.py ml.exe [args...]"""
import array
import collections
import struct
import subprocess
import sys
class Struct(object):
"""A thin wrapper around the struct module that returns a namedtuple"""
def __init__(self, name, *args):
"""Pass the name of the return type, and then an interleaved list of
format strings as used by the struct module and of field names."""
self.fmt = '<' + ''.join(args[0::2])
self.type = collections.namedtuple(name, args[1::2])
def pack_into(self, buffer, offset, data):
return struct.pack_into(self.fmt, buffer, offset, *data)
def unpack_from(self, buffer, offset=0):
return self.type(*struct.unpack_from(self.fmt, buffer, offset))
def size(self):
return struct.calcsize(self.fmt)
def Subtract(nt, **kwargs):
"""Subtract(nt, f=2) returns a new namedtuple with 2 subtracted from nt.f"""
return nt._replace(**{k: getattr(nt, k) - v for k, v in kwargs.items()})
def MakeDeterministic(objdata):
# Takes data produced by ml(64).exe (without any special flags) and
# 1. Sets the timestamp to 0
# 2. Strips the .debug$S section (which contains an unwanted absolute path)
# This makes several assumptions about ml's output:
# - Section data is in the same order as the corresponding section headers:
# section headers preceding the .debug$S section header have their data
# preceding the .debug$S section data; likewise for section headers
# following the .debug$S section.
# - The .debug$S section contains only the absolute path to the obj file and
# nothing else, in particular there's only a single entry in the symbol
# table referring to the .debug$S section.
# - There are no COFF line number entries.
# - There's no IMAGE_SYM_CLASS_CLR_TOKEN symbol.
# These seem to hold in practice; if they stop holding this script needs to
# become smarter.
objdata = array.array('b', objdata) # Writable, e.g. via struct.pack_into.
# Read coff header.
COFFHEADER = Struct('COFFHEADER',
'H', 'Machine',
'H', 'NumberOfSections',
'I', 'TimeDateStamp',
'I', 'PointerToSymbolTable',
'I', 'NumberOfSymbols',
'H', 'SizeOfOptionalHeader',
'H', 'Characteristics')
coff_header = COFFHEADER.unpack_from(objdata)
assert coff_header.SizeOfOptionalHeader == 0 # Only set for binaries.
# Read section headers following coff header.
SECTIONHEADER = Struct('SECTIONHEADER',
'8s', 'Name',
'I', 'VirtualSize',
'I', 'VirtualAddress',
'I', 'SizeOfRawData',
'I', 'PointerToRawData',
'I', 'PointerToRelocations',
'I', 'PointerToLineNumbers',
'H', 'NumberOfRelocations',
'H', 'NumberOfLineNumbers',
'I', 'Characteristics')
section_headers = []
debug_section_index = -1
for i in range(0, coff_header.NumberOfSections):
section_header = SECTIONHEADER.unpack_from(
objdata, offset=COFFHEADER.size() + i * SECTIONHEADER.size())
assert not section_header[0].startswith(b'/') # Support short names only.
section_headers.append(section_header)
if section_header.Name == b'.debug$S':
assert debug_section_index == -1
debug_section_index = i
assert debug_section_index != -1
data_start = COFFHEADER.size() + len(section_headers) * SECTIONHEADER.size()
# Verify the .debug$S section looks like we expect.
assert section_headers[debug_section_index].Name == b'.debug$S'
assert section_headers[debug_section_index].VirtualSize == 0
assert section_headers[debug_section_index].VirtualAddress == 0
debug_size = section_headers[debug_section_index].SizeOfRawData
debug_offset = section_headers[debug_section_index].PointerToRawData
assert section_headers[debug_section_index].PointerToRelocations == 0
assert section_headers[debug_section_index].PointerToLineNumbers == 0
assert section_headers[debug_section_index].NumberOfRelocations == 0
assert section_headers[debug_section_index].NumberOfLineNumbers == 0
# Make sure sections in front of .debug$S have their data preceding it.
for header in section_headers[:debug_section_index]:
assert header.PointerToRawData < debug_offset
assert header.PointerToRelocations < debug_offset
assert header.PointerToLineNumbers < debug_offset
# Make sure sections after of .debug$S have their data following it.
for header in section_headers[debug_section_index + 1:]:
# Make sure the .debug$S data is at the very end of section data:
assert header.PointerToRawData > debug_offset
assert header.PointerToRelocations == 0
assert header.PointerToLineNumbers == 0
# Make sure the first non-empty section's data starts right after the section
# headers.
for section_header in section_headers:
if section_header.PointerToRawData == 0:
assert section_header.PointerToRelocations == 0
assert section_header.PointerToLineNumbers == 0
continue
assert section_header.PointerToRawData == data_start
break
# Make sure the symbol table (and hence, string table) appear after the last
# section:
assert (coff_header.PointerToSymbolTable >=
section_headers[-1].PointerToRawData + section_headers[-1].SizeOfRawData)
# The symbol table contains a symbol for the no-longer-present .debug$S
# section. If we leave it there, lld-link will complain:
#
# lld-link: error: .debug$S should not refer to non-existent section 5
#
# so we need to remove that symbol table entry as well. This shifts symbol
# entries around and we need to update symbol table indices in:
# - relocations
# - line number records (never present)
# - one aux symbol entry (IMAGE_SYM_CLASS_CLR_TOKEN; not present in ml output)
SYM = Struct('SYM',
'8s', 'Name',
'I', 'Value',
'h', 'SectionNumber', # Note: Signed!
'H', 'Type',
'B', 'StorageClass',
'B', 'NumberOfAuxSymbols')
i = 0
debug_sym = -1
while i < coff_header.NumberOfSymbols:
sym_offset = coff_header.PointerToSymbolTable + i * SYM.size()
sym = SYM.unpack_from(objdata, sym_offset)
# 107 is IMAGE_SYM_CLASS_CLR_TOKEN, which has aux entry "CLR Token
# Definition", which contains a symbol index. Check it's never present.
assert sym.StorageClass != 107
# Note: sym.SectionNumber is 1-based, debug_section_index is 0-based.
if sym.SectionNumber - 1 == debug_section_index:
assert debug_sym == -1, 'more than one .debug$S symbol found'
debug_sym = i
# Make sure the .debug$S symbol looks like we expect.
# In particular, it should have exactly one aux symbol.
assert sym.Name == b'.debug$S'
assert sym.Value == 0
assert sym.Type == 0
assert sym.StorageClass == 3
assert sym.NumberOfAuxSymbols == 1
elif sym.SectionNumber > debug_section_index:
sym = Subtract(sym, SectionNumber=1)
SYM.pack_into(objdata, sym_offset, sym)
i += 1 + sym.NumberOfAuxSymbols
assert debug_sym != -1, '.debug$S symbol not found'
# Note: Usually the .debug$S section is the last, but for files saying
# `includelib foo.lib`, like safe_terminate_process.asm in 32-bit builds,
# this isn't true: .drectve is after .debug$S.
# Update symbol table indices in relocations.
# There are a few processor types that have one or two relocation types
# where SymbolTableIndex has a different meaning, but not for x86.
REL = Struct('REL',
'I', 'VirtualAddress',
'I', 'SymbolTableIndex',
'H', 'Type')
for header in section_headers[0:debug_section_index]:
for j in range(0, header.NumberOfRelocations):
rel_offset = header.PointerToRelocations + j * REL.size()
rel = REL.unpack_from(objdata, rel_offset)
assert rel.SymbolTableIndex != debug_sym
if rel.SymbolTableIndex > debug_sym:
rel = Subtract(rel, SymbolTableIndex=2)
REL.pack_into(objdata, rel_offset, rel)
# Update symbol table indices in line numbers -- just check they don't exist.
for header in section_headers:
assert header.NumberOfLineNumbers == 0
# Now that all indices are updated, remove the symbol table entry referring to
# .debug$S and its aux entry.
del objdata[coff_header.PointerToSymbolTable + debug_sym * SYM.size():
coff_header.PointerToSymbolTable + (debug_sym + 2) * SYM.size()]
# Now we know that it's safe to write out the input data, with just the
# timestamp overwritten to 0, the last section header cut out (and the
# offsets of all other section headers decremented by the size of that
# one section header), and the last section's data cut out. The symbol
# table offset needs to be reduced by one section header and the size of
# the missing section.
# (The COFF spec only requires on-disk sections to be aligned in image files,
# for obj files it's not required. If that wasn't the case, deleting slices
# if data would not generally be safe.)
# Update section offsets and remove .debug$S section data.
for i in range(0, debug_section_index):
header = section_headers[i]
if header.SizeOfRawData:
header = Subtract(header, PointerToRawData=SECTIONHEADER.size())
if header.NumberOfRelocations:
header = Subtract(header, PointerToRelocations=SECTIONHEADER.size())
if header.NumberOfLineNumbers:
header = Subtract(header, PointerToLineNumbers=SECTIONHEADER.size())
SECTIONHEADER.pack_into(
objdata, COFFHEADER.size() + i * SECTIONHEADER.size(), header)
for i in range(debug_section_index + 1, len(section_headers)):
header = section_headers[i]
shift = SECTIONHEADER.size() + debug_size
if header.SizeOfRawData:
header = Subtract(header, PointerToRawData=shift)
if header.NumberOfRelocations:
header = Subtract(header, PointerToRelocations=shift)
if header.NumberOfLineNumbers:
header = Subtract(header, PointerToLineNumbers=shift)
SECTIONHEADER.pack_into(
objdata, COFFHEADER.size() + i * SECTIONHEADER.size(), header)
del objdata[debug_offset:debug_offset + debug_size]
# Finally, remove .debug$S section header and update coff header.
coff_header = coff_header._replace(TimeDateStamp=0)
coff_header = Subtract(coff_header,
NumberOfSections=1,
PointerToSymbolTable=SECTIONHEADER.size() + debug_size,
NumberOfSymbols=2)
COFFHEADER.pack_into(objdata, 0, coff_header)
del objdata[
COFFHEADER.size() + debug_section_index * SECTIONHEADER.size():
COFFHEADER.size() + (debug_section_index + 1) * SECTIONHEADER.size()]
# All done!
if sys.version_info.major == 2:
return objdata.tostring()
else:
return objdata.tobytes()
def main():
ml_result = subprocess.call(sys.argv[1:])
if ml_result != 0:
return ml_result
objfile = None
for i in range(1, len(sys.argv)):
if sys.argv[i].startswith('/Fo'):
objfile = sys.argv[i][len('/Fo'):]
assert objfile, 'failed to find ml output'
with open(objfile, 'rb') as f:
objdata = f.read()
objdata = MakeDeterministic(objdata)
with open(objfile, 'wb') as f:
f.write(objdata)
if __name__ == '__main__':
sys.exit(main())
| |
"""
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.conf import settings
from django.contrib.gis import gdal
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = {}
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geography type?
geography = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
if isinstance(name, unicode):
name = name.encode('ascii')
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplmentedError
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError
def spatial_ref_sys(self):
raise NotImplementedError
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception, msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception, msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m: return (float(m.group('major')), float(m.group('flattening')))
else: return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __unicode__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return unicode(self.srs)
except:
return unicode(self.wkt)
| |
"""Test cases for traceback module"""
from collections import namedtuple
from io import StringIO
import linecache
import sys
import unittest
import re
from test import support
from test.support import TESTFN, Error, captured_output, unlink, cpython_only
from test.support.script_helper import assert_python_ok
import textwrap
import traceback
test_code = namedtuple('code', ['co_filename', 'co_name'])
test_frame = namedtuple('frame', ['f_code', 'f_globals', 'f_locals'])
test_tb = namedtuple('tb', ['tb_frame', 'tb_lineno', 'tb_next'])
class TracebackCases(unittest.TestCase):
# For now, a very minimal set of tests. I want to be sure that
# formatting of SyntaxErrors works based on changes for 2.1.
def get_exception_format(self, func, exc):
try:
func()
except exc as value:
return traceback.format_exception_only(exc, value)
else:
raise ValueError("call did not raise exception")
def syntax_error_with_caret(self):
compile("def fact(x):\n\treturn x!\n", "?", "exec")
def syntax_error_with_caret_2(self):
compile("1 +\n", "?", "exec")
def syntax_error_bad_indentation(self):
compile("def spam():\n print(1)\n print(2)", "?", "exec")
def syntax_error_with_caret_non_ascii(self):
compile('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', "?", "exec")
def syntax_error_bad_indentation2(self):
compile(" print(2)", "?", "exec")
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
self.assertEqual(len(err), 4)
self.assertTrue(err[1].strip() == "return x!")
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[1].find("!"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_2,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_non_ascii,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[2].count('\n'), 1) # and no additional newline
self.assertEqual(err[1].find("+"), err[2].find("^")) # in the right place
def test_nocaret(self):
exc = SyntaxError("error", ("x.py", 23, None, "bad syntax"))
err = traceback.format_exception_only(SyntaxError, exc)
self.assertEqual(len(err), 3)
self.assertEqual(err[1].strip(), "bad syntax")
def test_bad_indentation(self):
err = self.get_exception_format(self.syntax_error_bad_indentation,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find(")"), err[2].find("^"))
err = self.get_exception_format(self.syntax_error_bad_indentation2,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find("p"), err[2].find("^"))
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
lst = traceback.format_exception_only(e.__class__, e)
self.assertEqual(lst, ['KeyboardInterrupt\n'])
def test_format_exception_only_bad__str__(self):
class X(Exception):
def __str__(self):
1/0
err = traceback.format_exception_only(X, X())
self.assertEqual(len(err), 1)
str_value = '<unprintable %s object>' % X.__name__
if X.__module__ in ('__main__', 'builtins'):
str_name = X.__qualname__
else:
str_name = '.'.join([X.__module__, X.__qualname__])
self.assertEqual(err[0], "%s: %s\n" % (str_name, str_value))
def test_encoded_file(self):
# Test that tracebacks are correctly printed for encoded source files:
# - correct line number (Issue2384)
# - respect file encoding (Issue3975)
import tempfile, sys, subprocess, os
# The spawned subprocess has its stdout redirected to a PIPE, and its
# encoding may be different from the current interpreter, on Windows
# at least.
process = subprocess.Popen([sys.executable, "-c",
"import sys; print(sys.stdout.encoding)"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
output_encoding = str(stdout, 'ascii').splitlines()[0]
def do_test(firstlines, message, charset, lineno):
# Raise the message in a subprocess, and catch the output
try:
with open(TESTFN, "w", encoding=charset) as output:
output.write("""{0}if 1:
import traceback;
raise RuntimeError('{1}')
""".format(firstlines, message))
process = subprocess.Popen([sys.executable, TESTFN],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
stdout = stdout.decode(output_encoding).splitlines()
finally:
unlink(TESTFN)
# The source lines are encoded with the 'backslashreplace' handler
encoded_message = message.encode(output_encoding,
'backslashreplace')
# and we just decoded them with the output_encoding.
message_ascii = encoded_message.decode(output_encoding)
err_line = "raise RuntimeError('{0}')".format(message_ascii)
err_msg = "RuntimeError: {0}".format(message_ascii)
self.assertIn(("line %s" % lineno), stdout[1],
"Invalid line number: {0!r} instead of {1}".format(
stdout[1], lineno))
self.assertTrue(stdout[2].endswith(err_line),
"Invalid traceback line: {0!r} instead of {1!r}".format(
stdout[2], err_line))
self.assertTrue(stdout[3] == err_msg,
"Invalid error message: {0!r} instead of {1!r}".format(
stdout[3], err_msg))
do_test("", "foo", "ascii", 3)
for charset in ("ascii", "iso-8859-1", "utf-8", "GBK"):
if charset == "ascii":
text = "foo"
elif charset == "GBK":
text = "\u4E02\u5100"
else:
text = "h\xe9 ho"
do_test("# coding: {0}\n".format(charset),
text, charset, 4)
do_test("#!shebang\n# coding: {0}\n".format(charset),
text, charset, 5)
do_test(" \t\f\n# coding: {0}\n".format(charset),
text, charset, 5)
# Issue #18960: coding spec should have no effect
do_test("x=0\n# coding: GBK\n", "h\xe9 ho", 'utf-8', 5)
@support.requires_type_collecting
def test_print_traceback_at_exit(self):
# Issue #22599: Ensure that it is possible to use the traceback module
# to display an exception at Python exit
code = textwrap.dedent("""
import sys
import traceback
class PrintExceptionAtExit(object):
def __init__(self):
try:
x = 1 / 0
except Exception:
self.exc_info = sys.exc_info()
# self.exc_info[1] (traceback) contains frames:
# explicitly clear the reference to self in the current
# frame to break a reference cycle
self = None
def __del__(self):
traceback.print_exception(*self.exc_info)
# Keep a reference in the module namespace to call the destructor
# when the module is unloaded
obj = PrintExceptionAtExit()
""")
rc, stdout, stderr = assert_python_ok('-c', code)
expected = [b'Traceback (most recent call last):',
b' File "<string>", line 8, in __init__',
b'ZeroDivisionError: division by zero']
self.assertEqual(stderr.splitlines(), expected)
def test_print_exception(self):
output = StringIO()
traceback.print_exception(
Exception, Exception("projector"), None, file=output
)
self.assertEqual(output.getvalue(), "Exception: projector\n")
class TracebackFormatTests(unittest.TestCase):
def some_exception(self):
raise KeyError('blah')
@cpython_only
def check_traceback_format(self, cleanup_func=None):
from _testcapi import traceback_print
try:
self.some_exception()
except KeyError:
type_, value, tb = sys.exc_info()
if cleanup_func is not None:
# Clear the inner frames, not this one
cleanup_func(tb.tb_next)
traceback_fmt = 'Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
file_ = StringIO()
traceback_print(tb, file_)
python_fmt = file_.getvalue()
# Call all _tb and _exc functions
with captured_output("stderr") as tbstderr:
traceback.print_tb(tb)
tbfile = StringIO()
traceback.print_tb(tb, file=tbfile)
with captured_output("stderr") as excstderr:
traceback.print_exc()
excfmt = traceback.format_exc()
excfile = StringIO()
traceback.print_exc(file=excfile)
else:
raise Error("unable to create test traceback string")
# Make sure that Python and the traceback module format the same thing
self.assertEqual(traceback_fmt, python_fmt)
# Now verify the _tb func output
self.assertEqual(tbstderr.getvalue(), tbfile.getvalue())
# Now verify the _exc func output
self.assertEqual(excstderr.getvalue(), excfile.getvalue())
self.assertEqual(excfmt, excfile.getvalue())
# Make sure that the traceback is properly indented.
tb_lines = python_fmt.splitlines()
self.assertEqual(len(tb_lines), 5)
banner = tb_lines[0]
location, source_line = tb_lines[-2:]
self.assertTrue(banner.startswith('Traceback'))
self.assertTrue(location.startswith(' File'))
self.assertTrue(source_line.startswith(' raise'))
def test_traceback_format(self):
self.check_traceback_format()
def test_traceback_format_with_cleared_frames(self):
# Check that traceback formatting also works with a clear()ed frame
def cleanup_tb(tb):
tb.tb_frame.clear()
self.check_traceback_format(cleanup_tb)
def test_stack_format(self):
# Verify _stack functions. Note we have to use _getframe(1) to
# compare them without this frame appearing in the output
with captured_output("stderr") as ststderr:
traceback.print_stack(sys._getframe(1))
stfile = StringIO()
traceback.print_stack(sys._getframe(1), file=stfile)
self.assertEqual(ststderr.getvalue(), stfile.getvalue())
stfmt = traceback.format_stack(sys._getframe(1))
self.assertEqual(ststderr.getvalue(), "".join(stfmt))
def test_print_stack(self):
def prn():
traceback.print_stack()
with captured_output("stderr") as stderr:
prn()
lineno = prn.__code__.co_firstlineno
self.assertEqual(stderr.getvalue().splitlines()[-4:], [
' File "%s", line %d, in test_print_stack' % (__file__, lineno+3),
' prn()',
' File "%s", line %d, in prn' % (__file__, lineno+1),
' traceback.print_stack()',
])
# issue 26823 - Shrink recursive tracebacks
def _check_recursive_traceback_display(self, render_exc):
# Always show full diffs when this test fails
# Note that rearranging things may require adjusting
# the relative line numbers in the expected tracebacks
self.maxDiff = None
# Check hitting the recursion limit
def f():
f()
with captured_output("stderr") as stderr_f:
try:
f()
except RecursionError as exc:
render_exc()
else:
self.fail("no recursion occurred")
lineno_f = f.__code__.co_firstlineno
result_f = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_f+5}, in _check_recursive_traceback_display\n'
' f()\n'
f' File "{__file__}", line {lineno_f+1}, in f\n'
' f()\n'
f' File "{__file__}", line {lineno_f+1}, in f\n'
' f()\n'
f' File "{__file__}", line {lineno_f+1}, in f\n'
' f()\n'
# XXX: The following line changes depending on whether the tests
# are run through the interactive interpreter or with -m
# It also varies depending on the platform (stack size)
# Fortunately, we don't care about exactness here, so we use regex
r' \[Previous line repeated (\d+) more times\]' '\n'
'RecursionError: maximum recursion depth exceeded\n'
)
expected = result_f.splitlines()
actual = stderr_f.getvalue().splitlines()
# Check the output text matches expectations
# 2nd last line contains the repetition count
self.assertEqual(actual[:-2], expected[:-2])
self.assertRegex(actual[-2], expected[-2])
self.assertEqual(actual[-1], expected[-1])
# Check the recursion count is roughly as expected
rec_limit = sys.getrecursionlimit()
self.assertIn(int(re.search(r"\d+", actual[-2]).group()), range(rec_limit-60, rec_limit))
# Check a known (limited) number of recursive invocations
def g(count=10):
if count:
return g(count-1)
raise ValueError
with captured_output("stderr") as stderr_g:
try:
g()
except ValueError as exc:
render_exc()
else:
self.fail("no value error was raised")
lineno_g = g.__code__.co_firstlineno
result_g = (
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
f' File "{__file__}", line {lineno_g+2}, in g\n'
' return g(count-1)\n'
' [Previous line repeated 6 more times]\n'
f' File "{__file__}", line {lineno_g+3}, in g\n'
' raise ValueError\n'
'ValueError\n'
)
tb_line = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_g+7}, in _check_recursive_traceback_display\n'
' g()\n'
)
expected = (tb_line + result_g).splitlines()
actual = stderr_g.getvalue().splitlines()
self.assertEqual(actual, expected)
# Check 2 different repetitive sections
def h(count=10):
if count:
return h(count-1)
g()
with captured_output("stderr") as stderr_h:
try:
h()
except ValueError as exc:
render_exc()
else:
self.fail("no value error was raised")
lineno_h = h.__code__.co_firstlineno
result_h = (
'Traceback (most recent call last):\n'
f' File "{__file__}", line {lineno_h+7}, in _check_recursive_traceback_display\n'
' h()\n'
f' File "{__file__}", line {lineno_h+2}, in h\n'
' return h(count-1)\n'
f' File "{__file__}", line {lineno_h+2}, in h\n'
' return h(count-1)\n'
f' File "{__file__}", line {lineno_h+2}, in h\n'
' return h(count-1)\n'
' [Previous line repeated 6 more times]\n'
f' File "{__file__}", line {lineno_h+3}, in h\n'
' g()\n'
)
expected = (result_h + result_g).splitlines()
actual = stderr_h.getvalue().splitlines()
self.assertEqual(actual, expected)
def test_recursive_traceback_python(self):
self._check_recursive_traceback_display(traceback.print_exc)
@cpython_only
def test_recursive_traceback_cpython_internal(self):
from _testcapi import exception_print
def render_exc():
exc_type, exc_value, exc_tb = sys.exc_info()
exception_print(exc_value)
self._check_recursive_traceback_display(render_exc)
def test_format_stack(self):
def fmt():
return traceback.format_stack()
result = fmt()
lineno = fmt.__code__.co_firstlineno
self.assertEqual(result[-2:], [
' File "%s", line %d, in test_format_stack\n'
' result = fmt()\n' % (__file__, lineno+2),
' File "%s", line %d, in fmt\n'
' return traceback.format_stack()\n' % (__file__, lineno+1),
])
cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
boundaries = re.compile(
'(%s|%s)' % (re.escape(cause_message), re.escape(context_message)))
class BaseExceptionReportingTests:
def get_exception(self, exception_or_callable):
if isinstance(exception_or_callable, Exception):
return exception_or_callable
try:
exception_or_callable()
except Exception as e:
return e
def zero_div(self):
1/0 # In zero_div
def check_zero_div(self, msg):
lines = msg.splitlines()
self.assertTrue(lines[-3].startswith(' File'))
self.assertIn('1/0 # In zero_div', lines[-2])
self.assertTrue(lines[-1].startswith('ZeroDivisionError'), lines[-1])
def test_simple(self):
try:
1/0 # Marker
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('1/0 # Marker', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as e:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError:
raise KeyError
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], context_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context_suppression(self):
try:
try:
raise Exception
except:
raise ZeroDivisionError from None
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('ZeroDivisionError from None', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause_and_context(self):
# When both a cause and a context are set, only the cause should be
# displayed and the context should be muted.
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as _e:
e = _e
try:
xyzzy
except NameError:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_cause_recursive(self):
def inner_raise():
try:
try:
self.zero_div()
except ZeroDivisionError as e:
z = e
raise KeyError from e
except KeyError as e:
raise z from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
# The first block is the KeyError raised from the ZeroDivisionError
self.assertIn('raise KeyError from e', blocks[0])
self.assertNotIn('1/0', blocks[0])
# The second block (apart from the boundary) is the ZeroDivisionError
# re-raised from the KeyError
self.assertIn('inner_raise() # Marker', blocks[2])
self.check_zero_div(blocks[2])
def test_syntax_error_offset_at_eol(self):
# See #10186.
def e():
raise SyntaxError('', ('', 0, 5, 'hello'))
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], " ^")
def e():
exec("x = 5 | 4 |")
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], ' ^')
def test_message_none(self):
# A message that looks like "None" should not be treated specially
err = self.get_report(Exception(None))
self.assertIn('Exception: None\n', err)
err = self.get_report(Exception('None'))
self.assertIn('Exception: None\n', err)
err = self.get_report(Exception())
self.assertIn('Exception\n', err)
err = self.get_report(Exception(''))
self.assertIn('Exception\n', err)
class PyExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks reporting through the 'traceback' module, with both
# format_exception() and print_exception().
#
def get_report(self, e):
e = self.get_exception(e)
s = ''.join(
traceback.format_exception(type(e), e, e.__traceback__))
with captured_output("stderr") as sio:
traceback.print_exception(type(e), e, e.__traceback__)
self.assertEqual(sio.getvalue(), s)
return s
class CExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks built-in reporting by the interpreter.
#
@cpython_only
def get_report(self, e):
from _testcapi import exception_print
e = self.get_exception(e)
with captured_output("stderr") as s:
exception_print(e)
return s.getvalue()
class LimitTests(unittest.TestCase):
''' Tests for limit argument.
It's enough to test extact_tb, extract_stack and format_exception '''
def last_raises1(self):
raise Exception('Last raised')
def last_raises2(self):
self.last_raises1()
def last_raises3(self):
self.last_raises2()
def last_raises4(self):
self.last_raises3()
def last_raises5(self):
self.last_raises4()
def last_returns_frame1(self):
return sys._getframe()
def last_returns_frame2(self):
return self.last_returns_frame1()
def last_returns_frame3(self):
return self.last_returns_frame2()
def last_returns_frame4(self):
return self.last_returns_frame3()
def last_returns_frame5(self):
return self.last_returns_frame4()
def test_extract_stack(self):
frame = self.last_returns_frame5()
def extract(**kwargs):
return traceback.extract_stack(frame, **kwargs)
def assertEqualExcept(actual, expected, ignore):
self.assertEqual(actual[:ignore], expected[:ignore])
self.assertEqual(actual[ignore+1:], expected[ignore+1:])
self.assertEqual(len(actual), len(expected))
with support.swap_attr(sys, 'tracebacklimit', 1000):
nolim = extract()
self.assertGreater(len(nolim), 5)
self.assertEqual(extract(limit=2), nolim[-2:])
assertEqualExcept(extract(limit=100), nolim[-100:], -5-1)
self.assertEqual(extract(limit=-2), nolim[:2])
assertEqualExcept(extract(limit=-100), nolim[:100], len(nolim)-5-1)
self.assertEqual(extract(limit=0), [])
del sys.tracebacklimit
assertEqualExcept(extract(), nolim, -5-1)
sys.tracebacklimit = 2
self.assertEqual(extract(), nolim[-2:])
self.assertEqual(extract(limit=3), nolim[-3:])
self.assertEqual(extract(limit=-3), nolim[:3])
sys.tracebacklimit = 0
self.assertEqual(extract(), [])
sys.tracebacklimit = -1
self.assertEqual(extract(), [])
def test_extract_tb(self):
try:
self.last_raises5()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
def extract(**kwargs):
return traceback.extract_tb(tb, **kwargs)
with support.swap_attr(sys, 'tracebacklimit', 1000):
nolim = extract()
self.assertEqual(len(nolim), 5+1)
self.assertEqual(extract(limit=2), nolim[:2])
self.assertEqual(extract(limit=10), nolim)
self.assertEqual(extract(limit=-2), nolim[-2:])
self.assertEqual(extract(limit=-10), nolim)
self.assertEqual(extract(limit=0), [])
del sys.tracebacklimit
self.assertEqual(extract(), nolim)
sys.tracebacklimit = 2
self.assertEqual(extract(), nolim[:2])
self.assertEqual(extract(limit=3), nolim[:3])
self.assertEqual(extract(limit=-3), nolim[-3:])
sys.tracebacklimit = 0
self.assertEqual(extract(), [])
sys.tracebacklimit = -1
self.assertEqual(extract(), [])
def test_format_exception(self):
try:
self.last_raises5()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
# [1:-1] to exclude "Traceback (...)" header and
# exception type and value
def extract(**kwargs):
return traceback.format_exception(exc_type, exc_value, tb, **kwargs)[1:-1]
with support.swap_attr(sys, 'tracebacklimit', 1000):
nolim = extract()
self.assertEqual(len(nolim), 5+1)
self.assertEqual(extract(limit=2), nolim[:2])
self.assertEqual(extract(limit=10), nolim)
self.assertEqual(extract(limit=-2), nolim[-2:])
self.assertEqual(extract(limit=-10), nolim)
self.assertEqual(extract(limit=0), [])
del sys.tracebacklimit
self.assertEqual(extract(), nolim)
sys.tracebacklimit = 2
self.assertEqual(extract(), nolim[:2])
self.assertEqual(extract(limit=3), nolim[:3])
self.assertEqual(extract(limit=-3), nolim[-3:])
sys.tracebacklimit = 0
self.assertEqual(extract(), [])
sys.tracebacklimit = -1
self.assertEqual(extract(), [])
class MiscTracebackCases(unittest.TestCase):
#
# Check non-printing functions in traceback module
#
def test_clear(self):
def outer():
middle()
def middle():
inner()
def inner():
i = 1
1/0
try:
outer()
except:
type_, value, tb = sys.exc_info()
# Initial assertion: there's one local in the inner frame.
inner_frame = tb.tb_next.tb_next.tb_next.tb_frame
self.assertEqual(len(inner_frame.f_locals), 1)
# Clear traceback frames
traceback.clear_frames(tb)
# Local variable dict should now be empty.
self.assertEqual(len(inner_frame.f_locals), 0)
def test_extract_stack(self):
def extract():
return traceback.extract_stack()
result = extract()
lineno = extract.__code__.co_firstlineno
self.assertEqual(result[-2:], [
(__file__, lineno+2, 'test_extract_stack', 'result = extract()'),
(__file__, lineno+1, 'extract', 'return traceback.extract_stack()'),
])
class TestFrame(unittest.TestCase):
def test_basics(self):
linecache.clearcache()
linecache.lazycache("f", globals())
f = traceback.FrameSummary("f", 1, "dummy")
self.assertEqual(f,
("f", 1, "dummy", '"""Test cases for traceback module"""'))
self.assertEqual(tuple(f),
("f", 1, "dummy", '"""Test cases for traceback module"""'))
self.assertEqual(f, traceback.FrameSummary("f", 1, "dummy"))
self.assertEqual(f, tuple(f))
# Since tuple.__eq__ doesn't support FrameSummary, the equality
# operator fallbacks to FrameSummary.__eq__.
self.assertEqual(tuple(f), f)
self.assertIsNone(f.locals)
def test_lazy_lines(self):
linecache.clearcache()
f = traceback.FrameSummary("f", 1, "dummy", lookup_line=False)
self.assertEqual(None, f._line)
linecache.lazycache("f", globals())
self.assertEqual(
'"""Test cases for traceback module"""',
f.line)
def test_explicit_line(self):
f = traceback.FrameSummary("f", 1, "dummy", line="line")
self.assertEqual("line", f.line)
class TestStack(unittest.TestCase):
def test_walk_stack(self):
def deeper():
return list(traceback.walk_stack(None))
s1 = list(traceback.walk_stack(None))
s2 = deeper()
self.assertEqual(len(s2) - len(s1), 1)
self.assertEqual(s2[1:], s1)
def test_walk_tb(self):
try:
1/0
except Exception:
_, _, tb = sys.exc_info()
s = list(traceback.walk_tb(tb))
self.assertEqual(len(s), 1)
def test_extract_stack(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None))
self.assertIsInstance(s, traceback.StackSummary)
def test_extract_stack_limit(self):
s = traceback.StackSummary.extract(traceback.walk_stack(None), limit=5)
self.assertEqual(len(s), 5)
def test_extract_stack_lookup_lines(self):
linecache.clearcache()
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=True)
linecache.clearcache()
self.assertEqual(s[0].line, "import sys")
def test_extract_stackup_deferred_lookup_lines(self):
linecache.clearcache()
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
s = traceback.StackSummary.extract(iter([(f, 6)]), lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(s[0].line, "import sys")
def test_from_list(self):
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_from_list_edited_stack(self):
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
s[0] = ('foo.py', 2, 'fred', 'line')
s2 = traceback.StackSummary.from_list(s)
self.assertEqual(
[' File "foo.py", line 2, in fred\n line\n'],
s2.format())
def test_format_smoke(self):
# For detailed tests see the format_list tests, which consume the same
# code.
s = traceback.StackSummary.from_list([('foo.py', 1, 'fred', 'line')])
self.assertEqual(
[' File "foo.py", line 1, in fred\n line\n'],
s.format())
def test_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]), capture_locals=True)
self.assertEqual(s[0].locals, {'something': '1'})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
s = traceback.StackSummary.extract(iter([(f, 6)]))
self.assertEqual(s[0].locals, None)
def test_format_locals(self):
def some_inner(k, v):
a = 1
b = 2
return traceback.StackSummary.extract(
traceback.walk_stack(None), capture_locals=True, limit=1)
s = some_inner(3, 4)
self.assertEqual(
[' File "%s", line %d, in some_inner\n'
' traceback.walk_stack(None), capture_locals=True, limit=1)\n'
' a = 1\n'
' b = 2\n'
' k = 3\n'
' v = 4\n' % (__file__, some_inner.__code__.co_firstlineno + 4)
], s.format())
class TestTracebackException(unittest.TestCase):
def test_smoke(self):
try:
1/0
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_from_exception(self):
# Check all the parameters are accepted.
def foo():
1/0
try:
foo()
except Exception as e:
exc_info = sys.exc_info()
self.expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=1, lookup_lines=False,
capture_locals=True)
self.exc = traceback.TracebackException.from_exception(
e, limit=1, lookup_lines=False, capture_locals=True)
expected_stack = self.expected_stack
exc = self.exc
self.assertEqual(None, exc.__cause__)
self.assertEqual(None, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_cause(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
cause = Exception("cause")
raise Exception("uh oh") from cause
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
exc_cause = traceback.TracebackException(Exception, cause, None)
self.assertEqual(exc_cause, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(True, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_context(self):
try:
try:
1/0
finally:
exc_info_context = sys.exc_info()
exc_context = traceback.TracebackException(*exc_info_context)
raise Exception("uh oh")
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]))
self.assertEqual(None, exc.__cause__)
self.assertEqual(exc_context, exc.__context__)
self.assertEqual(False, exc.__suppress_context__)
self.assertEqual(expected_stack, exc.stack)
self.assertEqual(exc_info[0], exc.exc_type)
self.assertEqual(str(exc_info[1]), str(exc))
def test_limit(self):
def recurse(n):
if n:
recurse(n-1)
else:
1/0
try:
recurse(10)
except Exception:
exc_info = sys.exc_info()
exc = traceback.TracebackException(*exc_info, limit=5)
expected_stack = traceback.StackSummary.extract(
traceback.walk_tb(exc_info[2]), limit=5)
self.assertEqual(expected_stack, exc.stack)
def test_lookup_lines(self):
linecache.clearcache()
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, None, None)
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb, lookup_lines=False)
self.assertEqual({}, linecache.cache)
linecache.updatecache('/foo.py', globals())
self.assertEqual(exc.stack[0].line, "import sys")
def test_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1, 'other': 'string'})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(
Exception, e, tb, capture_locals=True)
self.assertEqual(
exc.stack[0].locals, {'something': '1', 'other': "'string'"})
def test_no_locals(self):
linecache.updatecache('/foo.py', globals())
e = Exception("uh oh")
c = test_code('/foo.py', 'method')
f = test_frame(c, globals(), {'something': 1})
tb = test_tb(f, 6, None)
exc = traceback.TracebackException(Exception, e, tb)
self.assertEqual(exc.stack[0].locals, None)
def test_traceback_header(self):
# do not print a traceback header if exc_traceback is None
# see issue #24695
exc = traceback.TracebackException(Exception, Exception("haven"), None)
self.assertEqual(list(exc.format()), ["Exception: haven\n"])
class MiscTest(unittest.TestCase):
def test_all(self):
expected = set()
blacklist = {'print_list'}
for name in dir(traceback):
if name.startswith('_') or name in blacklist:
continue
module_object = getattr(traceback, name)
if getattr(module_object, '__module__', None) == 'traceback':
expected.add(name)
self.assertCountEqual(traceback.__all__, expected)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_sflow
short_description: Configure FortiSwitch sFlow in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and sflow category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
switch_controller_sflow:
description:
- Configure FortiSwitch sFlow.
default: null
type: dict
suboptions:
collector_ip:
description:
- Collector IP.
type: str
collector_port:
description:
- SFlow collector port (0 - 65535).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FortiSwitch sFlow.
fortios_switch_controller_sflow:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
switch_controller_sflow:
collector_ip: "<your_own_value>"
collector_port: "4"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_switch_controller_sflow_data(json):
option_list = ['collector_ip', 'collector_port']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_sflow(data, fos):
vdom = data['vdom']
switch_controller_sflow_data = data['switch_controller_sflow']
filtered_data = underscore_to_hyphen(filter_switch_controller_sflow_data(switch_controller_sflow_data))
return fos.set('switch-controller',
'sflow',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_sflow']:
resp = switch_controller_sflow(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"switch_controller_sflow": {
"required": False, "type": "dict", "default": None,
"options": {
"collector_ip": {"required": False, "type": "str"},
"collector_port": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import bisect
import io
import logging
import six
from os.path import splitext
from flask import current_app
from mongoengine.fields import BaseField
from werkzeug.datastructures import FileStorage
from .files import extension
from .images import make_thumbnail, resize, optimize
log = logging.getLogger(__name__)
class FileReference(object):
'''Implements the FileField interface'''
def __init__(self, fs=None, filename=None, upload_to=None, basename=None,
instance=None, name=None):
self.fs = fs
self.upload_to = upload_to
self._filename = filename
self.basename = basename
self._instance = instance
self._name = name
def to_mongo(self):
return {
'filename': self.filename
}
def save(self, wfs, filename=None):
'''Save a Werkzeug FileStorage object'''
if self.basename and not filename:
ext = extension(filename or wfs.filename)
filename = '.'.join([self.basename(self._instance), ext])
prefix = self.upload_to(self._instance) if callable(self.upload_to) else self.upload_to
self.filename = self.fs.save(wfs, filename, prefix=prefix)
return self.filename
@property
def filename(self):
return self._filename
@filename.setter
def filename(self, value):
self._mark_as_changed()
self._filename = value
@property
def url(self):
if self.filename:
return self.fs.url(self.filename)
def __unicode__(self):
return self.url or ''
__str__ = __unicode__
def __nonzero__(self):
return bool(self.filename)
__bool__ = __nonzero__
def _mark_as_changed(self):
if hasattr(self._instance, '_mark_as_changed'):
self._instance._mark_as_changed(self._name)
class ImageReference(FileReference):
'''Implements the ImageField interface'''
def __init__(self, original=None, max_size=None, thumbnail_sizes=None, thumbnails=None,
bbox=None, optimize=None, **kwargs):
super(ImageReference, self).__init__(**kwargs)
self._original = original
self.max_size = max_size
self.thumbnails = thumbnails or {}
self.bbox = bbox
self.optimize = optimize
self.thumbnail_sizes = thumbnail_sizes
def to_mongo(self):
data = super(ImageReference, self).to_mongo()
if self._original:
data['original'] = self._original
if self.thumbnails:
data['thumbnails'] = self.thumbnails
if self.bbox:
data['bbox'] = self.bbox
return data
def save(self, file_or_wfs, filename=None, bbox=None, overwrite=None):
'''Save a Werkzeug FileStorage object'''
self._mark_as_changed()
override = filename is not None
filename = filename or getattr(file_or_wfs, 'filename')
if self.basename and not override:
basename = self.basename(self._instance)
elif filename:
basename = splitext(filename)[0]
else:
raise ValueError('Filename is required')
ext = extension(filename)
prefix = self.upload_to(self._instance) if callable(self.upload_to) else self.upload_to
kwargs = {'prefix': prefix, 'overwrite': overwrite}
if self.optimize is not None:
should_optimize = self.optimize
else:
should_optimize = current_app.config['FS_IMAGES_OPTIMIZE']
def name(size=None):
if size:
return '.'.join(['-'.join([basename, str(size)]), ext])
else:
return '.'.join([basename, ext])
if self.max_size:
resized = resize(file_or_wfs, self.max_size)
file_or_wfs.seek(0)
if resized:
self.original = self.fs.save(file_or_wfs, name('original'), **kwargs)
self.filename = self.fs.save(resized, name(), **kwargs)
else:
self.filename = self.fs.save(file_or_wfs, name(), **kwargs)
elif should_optimize:
optimized = optimize(file_or_wfs)
file_or_wfs.seek(0)
self.original = self.fs.save(file_or_wfs, name('original'), **kwargs)
self.filename = self.fs.save(optimized, name(), **kwargs)
else:
self.filename = self.fs.save(file_or_wfs, name(), **kwargs)
if self.thumbnail_sizes:
self.bbox = bbox
for size in self.thumbnail_sizes:
file_or_wfs.seek(0)
thumbnail = make_thumbnail(file_or_wfs, size, self.bbox)
self.thumbnails[str(size)] = self.fs.save(FileStorage(thumbnail),
name(size),
**kwargs)
return self.filename
@property
def original(self):
return self._original or self.filename
@original.setter
def original(self, value):
self._mark_as_changed()
self._original = value
def thumbnail(self, size):
'''Get the thumbnail filename for a given size'''
if size in self.thumbnail_sizes:
return self.thumbnails.get(str(size))
else:
raise ValueError('Unregistered thumbnail size {0}'.format(size))
def full(self, external=False):
'''Get the full image URL in respect with ``max_size``'''
return self.fs.url(self.filename, external=external) if self.filename else None
def best_url(self, size=None, external=False):
'''
Provide the best thumbnail for downscaling.
If there is no match, provide the bigger if exists or the original
'''
if not self.thumbnail_sizes:
return self.url
elif not size:
self.thumbnail_sizes.sort()
best_size = self.thumbnail_sizes[-1]
else:
self.thumbnail_sizes.sort()
index = bisect.bisect_left(self.thumbnail_sizes, size)
if index >= len(self.thumbnail_sizes):
best_size = self.thumbnail_sizes[-1]
else:
best_size = self.thumbnail_sizes[index]
filename = self.thumbnail(best_size)
return self.fs.url(filename, external=external) if filename else None
def rerender(self):
'''
Rerender all derived images from the original.
If optmization settings or expected sizes changed,
they will be used for the new rendering.
'''
with self.fs.open(self.original, 'rb') as f_img:
img = io.BytesIO(f_img.read()) # Store the image in memory to avoid overwritting
self.save(img, filename=self.filename, bbox=self.bbox, overwrite=True)
__call__ = best_url
class FileField(BaseField):
'''
Store reference to files in a given storage.
'''
proxy_class = FileReference
def __init__(self, fs=None, upload_to=None, basename=None, *args, **kwargs):
self.fs = fs
self.upload_to = upload_to
self.basename = basename
super(FileField, self).__init__(*args, **kwargs)
def proxy(self, filename=None, instance=None, **kwargs):
return self.proxy_class(
fs=self.fs,
filename=filename,
upload_to=self.upload_to,
basename=self.basename,
instance=instance,
name=self.name,
**kwargs
)
def to_python(self, value):
if not isinstance(value, self.proxy_class):
if isinstance(value, dict):
value = self.proxy(**value)
elif isinstance(value, six.text_type):
value = self.proxy(filename=value)
return value
def __set__(self, instance, value):
if not isinstance(value, self.proxy_class):
value = self.proxy(filename=value, instance=instance)
return super(FileField, self).__set__(instance, value)
def __get__(self, instance, owner):
if instance is None:
return self
fileref = instance._data.get(self.name)
if not isinstance(fileref, self.proxy_class):
fileref = self.proxy(filename=fileref, instance=instance)
instance._data[self.name] = fileref
elif fileref._instance is None:
fileref._instance = instance
return fileref
def to_mongo(self, value):
if not value:
return None
return value.to_mongo()
class ImageField(FileField):
'''
Store reference to images in a given Storage.
Allow to automatically generate thumbnails or resized image.
Original image always stay untouched.
'''
proxy_class = ImageReference
def __init__(self, max_size=None, thumbnails=None, optimize=None, *args, **kwargs):
self.max_size = max_size
self.thumbnail_sizes = thumbnails
self.optimize = optimize
super(ImageField, self).__init__(*args, **kwargs)
def proxy(self, **kwargs):
return super(ImageField, self).proxy(max_size=self.max_size,
thumbnail_sizes=self.thumbnail_sizes,
optimize=self.optimize,
**kwargs)
| |
#!/usr/bin/env python
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-a] [-d] [-g] [-i <model_names>] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-a, --all_applications
show models from all applications.
-d, --disable_fields
don't show the class member fields.
-g, --group_models
draw an enclosing box around models from the same app.
-i, --include_models=User,Person,Car
only include selected models in graph.
"""
__version__ = "0.9"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Bas van Oostveen <v.oostveen@gmail.com>",
]
import getopt, sys
from django.core.management import setup_environ
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.utils.safestring import mark_safe
from django.template import Template, Context
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
head_template = """
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
"""
body_template = """
{% if use_subgraph %}
subgraph {{ cluster_app_name }} {
label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER"
><FONT FACE="Helvetica Bold" COLOR="Black" POINT-SIZE="12"
>{{ app_name }}</FONT></TD></TR>
</TABLE>
>
color=olivedrab4
style="rounded"
{% endif %}
{% for model in models %}
{{ model.app_name }}_{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.name }}{% if model.abstracts %}<BR/><<FONT FACE="Helvetica Italic">{{ model.abstracts|join:"," }}</FONT>>{% endif %}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.name }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
{% if use_subgraph %}
}
{% endif %}
"""
rel_template = """
{% for model in models %}
{% for relation in model.relations %}
{% if relation.needs_node %}
{{ relation.target_app }}_{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{% endif %}
{{ model.app_name }}_{{ model.name }} -> {{ relation.target_app }}_{{ relation.target }}
[label="{{ relation.name }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}
"""
tail_template = """
}
"""
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = kwargs.get('include_models', [])
all_applications = kwargs.get('all_applications', False)
use_subgraph = kwargs.get('group_models', False)
dot = head_template
apps = []
if all_applications:
apps = models.get_apps()
for app_label in app_labels:
app = models.get_app(app_label)
if not app in apps:
apps.append(app)
graphs = []
for app in apps:
graph = Context({
'name': '"%s"' % app.__name__,
'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
'disable_fields': disable_fields,
'use_subgraph': use_subgraph,
'models': []
})
for appmodel in get_models(app):
abstracts = [e.__name__ for e in appmodel.__bases__ if hasattr(e, '_meta') and e._meta.abstract]
abstract_fields = []
for e in appmodel.__bases__:
if hasattr(e, '_meta') and e._meta.abstract:
abstract_fields.extend(e._meta.fields)
model = {
'app_name': app.__name__.replace(".", "_"),
'name': appmodel.__name__,
'abstracts': abstracts,
'fields': [],
'relations': []
}
# consider given model name ?
def consider(model_name):
return not include_models or model_name in include_models
if not consider(appmodel._meta.object_name):
continue
# model attributes
def add_attributes(field):
model['fields'].append({
'name': field.name,
'type': type(field).__name__,
'blank': field.blank,
'abstract': field in abstract_fields,
})
for field in appmodel._meta.fields:
add_attributes(field)
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
add_attributes(field)
# relations
def add_relation(field, extras=""):
_rel = {
'target_app': field.rel.to.__module__.replace('.','_'),
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'arrows': extras,
'needs_node': True
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.fields:
if isinstance(field, ForeignKey):
add_relation(field)
elif isinstance(field, OneToOneField):
add_relation(field, '[arrowhead=none arrowtail=none]')
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
if isinstance(field, ManyToManyField) and getattr(field, 'creates_table', False):
add_relation(field, '[arrowhead=normal arrowtail=normal]')
elif isinstance(field, GenericRelation):
add_relation(field, mark_safe('[style="dotted"] [arrowhead=normal arrowtail=normal]'))
graph['models'].append(model)
graphs.append(graph)
nodes = []
for graph in graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in graphs:
# don't draw duplication nodes because of relations
for model in graph['models']:
for relation in model['relations']:
if relation['target'] in nodes:
relation['needs_node'] = False
# render templates
t = Template(body_template)
dot += '\n' + t.render(graph)
for graph in graphs:
t = Template(rel_template)
dot += '\n' + t.render(graph)
dot += '\n' + tail_template
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hadgi:",
["help", "all_applications", "disable_fields", "group_models", "include_models="])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-a", "--all_applications"):
kwargs['all_applications'] = True
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
if opt in ("-g", "--group_models"):
kwargs['group_models'] = True
if opt in ("-i", "--include_models"):
kwargs['include_models'] = arg.split(',')
if not args and not kwargs.get('all_applications', False):
print __doc__
sys.exit()
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
| |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def construct_model(images,
actions=None,
states=None,
iter_num=-1.0,
k=-1,
use_state=True,
num_masks=10,
stp=False,
cdna=True,
dna=False,
context_frames=2):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
if stp + cdna + dna != 1:
raise ValueError('More than one, or no network option specified.')
batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4]
lstm_func = basic_conv_lstm_cell
# Generated robot states and images.
gen_states, gen_images = [], []
current_state = states[0]
if k == -1:
feedself = True
else:
# Scheduled sampling:
# Calculate number of ground-truth frames to pass in.
num_ground_truth = tf.to_int32(
tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k)))))
feedself = False
# LSTM state sizes and states.
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32]))
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
lstm_state5, lstm_state6, lstm_state7 = None, None, None
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep.
reuse = bool(gen_images)
done_warm_start = len(gen_images) > context_frames - 1
with slim.arg_scope(
[lstm_func, slim.layers.conv2d, slim.layers.fully_connected,
tf_layers.layer_norm, slim.layers.conv2d_transpose],
reuse=reuse):
if feedself and done_warm_start:
# Feed in generated image.
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size,
num_ground_truth)
else:
# Always feed in ground_truth
prev_image = image
# Predicted state is always fed back in
state_action = tf.concat(axis=1, values=[action, current_state])
enc0 = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1'})
hidden1, lstm_state1 = lstm_func(
enc0, lstm_state1, lstm_size[0], scope='state1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden2, lstm_state2 = lstm_func(
hidden1, lstm_state2, lstm_size[1], scope='state2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
enc1 = slim.layers.conv2d(
hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2')
hidden3, lstm_state3 = lstm_func(
enc1, lstm_state3, lstm_size[2], scope='state3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4')
hidden4, lstm_state4 = lstm_func(
hidden3, lstm_state4, lstm_size[3], scope='state4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5')
enc2 = slim.layers.conv2d(
hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3')
# Pass in state and action.
smear = tf.reshape(
state_action,
[int(batch_size), 1, 1, int(state_action.get_shape()[1])])
smear = tf.tile(
smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1])
if use_state:
enc2 = tf.concat(axis=3, values=[enc2, smear])
enc3 = slim.layers.conv2d(
enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4')
hidden5, lstm_state5 = lstm_func(
enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6')
enc4 = slim.layers.conv2d_transpose(
hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1')
hidden6, lstm_state6 = lstm_func(
enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16
hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7')
# Skip connection.
hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
enc5 = slim.layers.conv2d_transpose(
hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2')
hidden7, lstm_state7 = lstm_func(
enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32
hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8')
# Skip connection.
hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
enc6 = slim.layers.conv2d_transpose(
hidden7,
hidden7.get_shape()[3], 3, stride=2, scope='convt3',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9'})
if dna:
# Using largest hidden state for predicting untied conv kernels.
enc7 = slim.layers.conv2d_transpose(
enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4')
else:
# Using largest hidden state for predicting a new image layer.
enc7 = slim.layers.conv2d_transpose(
enc6, color_channels, 1, stride=1, scope='convt4')
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
transformed = [tf.nn.sigmoid(enc7)]
if stp:
stp_input0 = tf.reshape(hidden5, [int(batch_size), -1])
stp_input1 = slim.layers.fully_connected(
stp_input0, 100, scope='fc_stp')
transformed += stp_transformation(prev_image, stp_input1, num_masks)
elif cdna:
cdna_input = tf.reshape(hidden5, [int(batch_size), -1])
transformed += cdna_transformation(prev_image, cdna_input, num_masks,
int(color_channels))
elif dna:
# Only one mask is supported (more should be unnecessary).
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
transformed = [dna_transformation(prev_image, enc7)]
masks = slim.layers.conv2d_transpose(
enc6, num_masks + 1, 1, stride=1, scope='convt7')
masks = tf.reshape(
tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),
[int(batch_size), int(img_height), int(img_width), num_masks + 1])
mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks)
output = mask_list[0] * prev_image
for layer, mask in zip(transformed, mask_list[1:]):
output += layer * mask
gen_images.append(output)
current_state = slim.layers.fully_connected(
state_action,
int(current_state.get_shape()[1]),
scope='state_pred',
activation_fn=None)
gen_states.append(current_state)
return gen_images, gen_states
## Utility functions
def stp_transformation(prev_image, stp_input, num_masks):
"""Apply spatial transformer predictor (STP) to previous image.
Args:
prev_image: previous image to be transformed.
stp_input: hidden layer to be used for computing STN parameters.
num_masks: number of masks and hence the number of STP transformations.
Returns:
List of images transformed by the predicted STP parameters.
"""
# Only import spatial transformer if needed.
from spatial_transformer import transformer
identity_params = tf.convert_to_tensor(
np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
transformed = []
for i in range(num_masks - 1):
params = slim.layers.fully_connected(
stp_input, 6, scope='stp_params' + str(i),
activation_fn=None) + identity_params
transformed.append(transformer(prev_image, params))
return transformed
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: the number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = int(cdna_input.get_shape()[0])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = slim.layers.fully_connected(
cdna_input,
DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks,
scope='cdna_params',
activation_fn=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks])
cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
cdna_kerns = tf.tile(cdna_kerns, [1, 1, 1, color_channels, 1])
cdna_kerns = tf.split(axis=0, num_or_size_splits=batch_size, value=cdna_kerns)
prev_images = tf.split(axis=0, num_or_size_splits=batch_size, value=prev_image)
# Transform image.
transformed = []
for kernel, preimg in zip(cdna_kerns, prev_images):
kernel = tf.squeeze(kernel)
if len(kernel.get_shape()) == 3:
kernel = tf.expand_dims(kernel, -1)
transformed.append(
tf.nn.depthwise_conv2d(preimg, kernel, [1, 1, 1, 1], 'SAME'))
transformed = tf.concat(axis=0, values=transformed)
transformed = tf.split(axis=3, num_or_size_splits=num_masks, value=transformed)
return transformed
def dna_transformation(prev_image, dna_input):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(DNA_KERN_SIZE):
for ykern in range(DNA_KERN_SIZE):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT
kernel = tf.expand_dims(
kernel / tf.reduce_sum(
kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""Sample batch with specified mix of ground truth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
idx = tf.random_shuffle(tf.range(int(batch_size)))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
return tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
| |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from oslo_config import cfg
import six
from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _
from sahara.plugins.spark import config_helper as c_helper
from sahara.plugins import utils as plugin_utils
from sahara.service.edp import base_engine
from sahara.service.edp.binary_retrievers import dispatch
from sahara.service.edp import job_utils
from sahara.service.validations.edp import job_execution as j
from sahara.swift import swift_helper as sw
from sahara.swift import utils as su
from sahara.utils import edp
from sahara.utils import files
from sahara.utils import general
from sahara.utils import remote
from sahara.utils import xmlutils
conductor = c.API
CONF = cfg.CONF
class SparkJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
def _get_pid_and_inst_id(self, job_id):
try:
pid, inst_id = job_id.split("@", 1)
if pid and inst_id:
return (pid, inst_id)
except Exception:
pass
return "", ""
def _get_instance_if_running(self, job_execution):
pid, inst_id = self._get_pid_and_inst_id(job_execution.oozie_job_id)
if not pid or not inst_id or (
job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED):
return None, None
# TODO(tmckay): well, if there is a list index out of range
# error here it probably means that the instance is gone. If we
# have a job execution that is not terminated, and the instance
# is gone, we should probably change the status somehow.
# For now, do nothing.
try:
instance = general.get_instances(self.cluster, [inst_id])[0]
except Exception:
instance = None
return pid, instance
def _get_result_file(self, r, job_execution):
result = os.path.join(job_execution.extra['spark-path'], "result")
return r.execute_command("cat %s" % result,
raise_when_error=False)
def _check_pid(self, r, pid):
ret, stdout = r.execute_command("ps hp %s" % pid,
raise_when_error=False)
return ret
def _get_job_status_from_remote(self, r, pid, job_execution):
# If the pid is there, it's still running
if self._check_pid(r, pid) == 0:
return {"status": edp.JOB_STATUS_RUNNING}
# The process ended. Look in the result file to get the exit status
ret, stdout = self._get_result_file(r, job_execution)
if ret == 0:
exit_status = stdout.strip()
if exit_status == "0":
return {"status": edp.JOB_STATUS_SUCCEEDED}
# SIGINT will yield either -2 or 130
elif exit_status in ["-2", "130"]:
return {"status": edp.JOB_STATUS_KILLED}
# Well, process is done and result is missing or unexpected
return {"status": edp.JOB_STATUS_DONEWITHERROR}
def _job_script(self):
path = "service/edp/resources/launch_command.py"
return files.get_file_text(path)
def _upload_wrapper_xml(self, where, job_dir, job_configs):
xml_name = 'spark.xml'
proxy_configs = job_configs.get('proxy_configs')
configs = {}
if proxy_configs:
configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get(
'proxy_username')
configs[sw.HADOOP_SWIFT_PASSWORD] = proxy_configs.get(
'proxy_password')
configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get(
'proxy_trust_id')
configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name
else:
cfgs = job_configs.get('configs', {})
targets = [sw.HADOOP_SWIFT_USERNAME, sw.HADOOP_SWIFT_PASSWORD]
configs = {k: cfgs[k] for k in targets if k in cfgs}
content = xmlutils.create_hadoop_xml(configs)
with remote.get_remote(where) as r:
dst = os.path.join(job_dir, xml_name)
r.write_file_to(dst, content)
return xml_name
def _upload_job_files(self, where, job_dir, job, job_configs):
def upload(r, dir, job_file, proxy_configs):
dst = os.path.join(dir, job_file.name)
raw_data = dispatch.get_raw_binary(job_file, proxy_configs)
r.write_file_to(dst, raw_data)
return dst
def upload_builtin(r, dir, builtin):
dst = os.path.join(dir, builtin['name'])
r.write_file_to(dst, builtin['raw'])
return dst
builtin_libs = []
if edp.is_adapt_spark_for_swift_enabled(
job_configs.get('configs', {})):
path = 'service/edp/resources/edp-spark-wrapper.jar'
name = 'builtin-%s.jar' % six.text_type(uuid.uuid4())
builtin_libs = [{'raw': files.get_file_text(path),
'name': name}]
uploaded_paths = []
builtin_paths = []
with remote.get_remote(where) as r:
mains = list(job.mains) if job.mains else []
libs = list(job.libs) if job.libs else []
for job_file in mains+libs:
uploaded_paths.append(
upload(r, job_dir, job_file,
job_configs.get('proxy_configs')))
for builtin in builtin_libs:
builtin_paths.append(
upload_builtin(r, job_dir, builtin))
return uploaded_paths, builtin_paths
def cancel_job(self, job_execution):
pid, instance = self._get_instance_if_running(job_execution)
if instance is not None:
with remote.get_remote(instance) as r:
ret, stdout = r.execute_command("kill -SIGINT %s" % pid,
raise_when_error=False)
if ret == 0:
# We had some effect, check the status
return self._get_job_status_from_remote(r,
pid, job_execution)
def get_job_status(self, job_execution):
pid, instance = self._get_instance_if_running(job_execution)
if instance is not None:
with remote.get_remote(instance) as r:
return self._get_job_status_from_remote(r, pid, job_execution)
def run_job(self, job_execution):
ctx = context.ctx()
job = conductor.job_get(ctx, job_execution.job_id)
additional_sources, updated_job_configs = (
job_utils.resolve_data_source_references(job_execution.job_configs)
)
# We'll always run the driver program on the master
master = plugin_utils.get_instance(self.cluster, "master")
# TODO(tmckay): wf_dir should probably be configurable.
# The only requirement is that the dir is writable by the image user
wf_dir = job_utils.create_workflow_dir(master, '/tmp/spark-edp', job,
job_execution.id, "700")
paths, builtin_paths = self._upload_job_files(
master, wf_dir, job, updated_job_configs)
# We can shorten the paths in this case since we'll run out of wf_dir
paths = [os.path.basename(p) for p in paths]
builtin_paths = [os.path.basename(p) for p in builtin_paths]
# TODO(tmckay): for now, paths[0] is always assumed to be the app
# jar and we generate paths in order (mains, then libs).
# When we have a Spark job type, we can require a "main" and set
# the app jar explicitly to be "main"
app_jar = paths.pop(0)
job_class = updated_job_configs["configs"]["edp.java.main_class"]
# If we uploaded builtins then we are using a wrapper jar. It will
# be the first one on the builtin list and the original app_jar needs
# to be added to the 'additional' jars
if builtin_paths:
wrapper_jar = builtin_paths.pop(0)
wrapper_class = 'org.openstack.sahara.edp.SparkWrapper'
wrapper_xml = self._upload_wrapper_xml(master,
wf_dir,
updated_job_configs)
wrapper_args = "%s %s" % (wrapper_xml, job_class)
additional_jars = ",".join([app_jar] + paths + builtin_paths)
else:
wrapper_jar = wrapper_class = wrapper_args = ""
additional_jars = ",".join(paths)
# All additional jars are passed with the --jars option
if additional_jars:
additional_jars = " --jars " + additional_jars
# Launch the spark job using spark-submit and deploy_mode = client
host = master.hostname()
port = c_helper.get_config_value("Spark", "Master port", self.cluster)
spark_submit = os.path.join(
c_helper.get_config_value("Spark",
"Spark home",
self.cluster),
"bin/spark-submit")
# TODO(tmckay): we need to clean up wf_dirs on long running clusters
# TODO(tmckay): probably allow for general options to spark-submit
args = updated_job_configs.get('args', [])
args = " ".join([su.inject_swift_url_suffix(arg) for arg in args])
if args:
args = " " + args
if wrapper_jar and wrapper_class:
# Substrings which may be empty have spaces
# embedded if they are non-empty
cmd = (
'%(spark_submit)s%(driver_cp)s'
' --class %(wrapper_class)s%(addnl_jars)s'
' --master spark://%(host)s:%(port)s'
' %(wrapper_jar)s %(wrapper_args)s%(args)s') % (
{
"spark_submit": spark_submit,
"driver_cp": self.get_driver_classpath(),
"wrapper_class": wrapper_class,
"addnl_jars": additional_jars,
"host": host,
"port": port,
"wrapper_jar": wrapper_jar,
"wrapper_args": wrapper_args,
"args": args
})
else:
cmd = (
'%(spark_submit)s --class %(job_class)s%(addnl_jars)s'
' --master spark://%(host)s:%(port)s %(app_jar)s%(args)s') % (
{
"spark_submit": spark_submit,
"job_class": job_class,
"addnl_jars": additional_jars,
"host": host,
"port": port,
"app_jar": app_jar,
"args": args
})
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
# If an exception is raised here, the job_manager will mark
# the job failed and log the exception
# The redirects of stdout and stderr will preserve output in the wf_dir
with remote.get_remote(master) as r:
# Upload the command launch script
launch = os.path.join(wf_dir, "launch_command")
r.write_file_to(launch, self._job_script())
r.execute_command("chmod +x %s" % launch)
ret, stdout = r.execute_command(
"cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!"
% (wf_dir, cmd))
if ret == 0:
# Success, we'll add the wf_dir in job_execution.extra and store
# pid@instance_id as the job id
# We know the job is running so return "RUNNING"
return (stdout.strip() + "@" + master.id,
edp.JOB_STATUS_RUNNING,
{'spark-path': wf_dir})
# Hmm, no execption but something failed.
# Since we're using backgrounding with redirect, this is unlikely.
raise e.EDPError(_("Spark job execution failed. Exit status = "
"%(status)s, stdout = %(stdout)s") %
{'status': ret, 'stdout': stdout})
def validate_job_execution(self, cluster, job, data):
j.check_main_class_present(data, job)
@staticmethod
def get_possible_job_config(job_type):
return {'job_config': {'configs': [], 'args': []}}
@staticmethod
def get_supported_job_types():
return [edp.JOB_TYPE_SPARK]
def get_driver_classpath(self):
cp = c_helper.get_config_value("Spark",
"Executor extra classpath",
self.cluster)
if cp:
cp = " --driver-class-path " + cp
return cp
| |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import time
from netforce.model import Model, fields, get_model
from netforce.access import get_active_company
class LandedCost(Model):
_name = "landed.cost"
_name_field = "number"
_string = "Landed Costs"
_audit_log = True
_multi_company=True
_key = ["number"]
_fields = {
"number": fields.Char("Number",required=True,search=True),
"date": fields.DateTime("Date",required=True,search=True),
"state": fields.Selection([["draft","Draft"],["posted","Posted"],["reversed","Reversed"]],"Status",search=True),
"cost_allocs": fields.One2Many("landed.cost.alloc","landed_id","Cost Allocations"),
"cost_alloc_method": fields.Selection([["amount", "By Amount"], ["qty", "By Qty"]], "Cost Allocation Method"),
"move_id": fields.Many2One("account.move","Journal Entry"),
"est_ship": fields.Decimal("Estimate Shipping Cost",function="_get_total",function_multi=True),
"est_duty": fields.Decimal("Estimate Duty Cost",function="_get_total",function_multi=True),
"act_ship": fields.Decimal("Actual Shipping Cost",function="_get_total",function_multi=True),
"act_duty": fields.Decimal("Actual Duty Cost",function="_get_total",function_multi=True),
"alloc_amount": fields.Decimal("Allocate Amount"),
"alloc_type": fields.Selection([["amount","Amount"],["qty","Qty"]],"Allocation Type"),
"alloc_cost_type": fields.Selection([["est_ship","Est Shipping"],["est_duty","Estimate Duty"],["act_ship","Actual Shipping"],["act_duty","Actual Duty"]],"Cost Type"),
"reverse_move_id": fields.Many2One("account.move","Reverse Journal Entry"),
"stock_moves": fields.One2Many("stock.move","related_id","Stock Movements"),
'company_id': fields.Many2One("company","Company"),
}
def _get_number(self, context={}):
seq_id = get_model("sequence").find_sequence("landed_cost")
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id)
res = self.search([["number", "=", num]])
if not res:
return num
get_model("sequence").increment_number(seq_id)
_defaults={
"state": "draft",
"date": lambda *a: time.strftime("%Y-%m-%d %H:%M:%S"),
"cost_alloc_method": "amount",
"number": _get_number,
'company_id': lambda *a: get_active_company(),
}
def delete(self, ids, **kw):
for obj in self.browse(ids):
if obj.move_id:
obj.move_id.to_draft()
obj.move_id.delete()
if obj.reverse_move_id:
obj.reverse_move_id.to_draft()
obj.reverse_move_id.delete()
obj.stock_moves.delete()
res = super().delete(ids, **kw)
return res
def post(self, ids, context={}):
settings=get_model("settings").browse(1)
obj=self.browse(ids[0])
total_qty=0
total_amt=0
for alloc in obj.cost_allocs:
total_qty+=alloc.qty
total_amt+=alloc.cost_amount or 0
accounts={}
for alloc in obj.cost_allocs:
total_alloc_amt=0
if alloc.est_ship:
acc_id=settings.est_ship_account_id.id
if not acc_id:
raise Exception("Missing estimate shipping account")
k=(acc_id,alloc.track_id.id)
accounts.setdefault(k,0)
accounts[k]-=alloc.est_ship
if alloc.est_duty:
acc_id=settings.est_duty_account_id.id
if not acc_id:
raise Exception("Missing estimate duty account")
k=(acc_id,alloc.track_id.id)
accounts.setdefault(k,0)
accounts[k]-=alloc.est_duty
if alloc.act_ship:
acc_id=settings.act_ship_account_id.id
if not acc_id:
raise Exception("Missing actual shipping account")
k=(acc_id,alloc.track_id.id)
accounts.setdefault(k,0)
accounts[k]-=alloc.act_ship
if alloc.act_duty:
acc_id=settings.act_duty_account_id.id
if not acc_id:
raise Exception("Missing actual duty account")
k=(acc_id,alloc.track_id.id)
accounts.setdefault(k,0)
accounts[k]-=alloc.act_duty
if alloc.qty_stock_lc>=alloc.qty_stock_gr:
inv_amt=alloc.amount
var_amt=0
else:
ratio=min(alloc.qty_stock_lc/alloc.qty_stock_gr,1) if alloc.qty_stock_gr else 0 # XXX
inv_amt=alloc.amount*ratio
var_amt=alloc.amount*(1-ratio)
if inv_amt:
inv_account_id=alloc.location_to_id.account_id.id or alloc.product_id.stock_in_account_id.id
if not inv_account_id:
raise Exception("Missing inventory account")
k=(inv_account_id,alloc.track_id.id)
accounts.setdefault(k,0)
accounts[k]+=inv_amt
if var_amt:
var_account_id=settings.landed_cost_variance_account_id.id
if not var_account_id:
raise Exception("Missing landed cost variance account")
k=(var_account_id,alloc.track_id.id)
accounts.setdefault(k,0)
accounts[k]+=var_amt
desc="Landed costs %s"%obj.number
vals={
"narration": desc,
"date": obj.date,
"related_id": "landed.cost,%d"%obj.id,
"lines": [],
}
for (acc_id,track_id),amt in accounts.items():
line_vals={
"description": desc,
"account_id": acc_id,
"track_id": track_id,
"debit": amt>0 and amt or 0,
"credit": amt<0 and -amt or 0,
}
vals["lines"].append(("create",line_vals))
account_move_id=get_model("account.move").create(vals)
get_model("account.move").post([account_move_id])
obj.write({"move_id":account_move_id, "state": "posted"})
stock_move_ids=[]
for line in obj.cost_allocs:
move=line.move_id
if not move.qty:
raise Exception("Missing qty in stock movement %s"%move.number)
ratio=min(line.qty_stock_lc/line.qty_stock_gr,1) if line.qty_stock_gr else 0
journal_id=settings.landed_cost_journal_id.id
if not journal_id:
raise Exception("Missing landed cost journal")
vals={
"journal_id": journal_id,
"date": obj.date,
"related_id": "landed.cost,%s"%obj.id,
"ref": obj.number,
"product_id": move.product_id.id,
"qty": 0,
"uom_id": move.uom_id.id,
"location_from_id": move.location_from_id.id,
"location_to_id": move.location_to_id.id,
"cost_price": 0,
"cost_amount": line.amount*ratio,
"move_id": account_move_id,
}
stock_move_id=get_model("stock.move").create(vals)
stock_move_ids.append(stock_move_id)
get_model("stock.move").set_done(stock_move_ids,context={"no_post": True})
def to_draft(self,ids,context={}):
obj=self.browse(ids[0])
if obj.move_id:
obj.move_id.to_draft()
obj.move_id.delete()
if obj.reverse_move_id:
obj.reverse_move_id.to_draft()
obj.reverse_move_id.delete()
obj.stock_moves.delete()
obj.write({"state": "draft"})
def view_journal_entry(self,ids,context={}):
obj=self.browse(ids[0])
if not obj.move_id:
raise Exception("Journal entry not found")
return {
"next": {
"name": "journal_entry",
"mode": "form",
"active_id": obj.move_id.id,
}
}
def view_reverse_journal_entry(self,ids,context={}):
obj=self.browse(ids[0])
if not obj.reverse_move_id:
raise Exception("Reverse journal entry not found")
return {
"next": {
"name": "journal_entry",
"mode": "form",
"active_id": obj.reverse_move_id.id,
}
}
def _get_total(self,ids,context={}):
vals={}
for obj in self.browse(ids):
est_ship=0
est_duty=0
act_ship=0
act_duty=0
for line in obj.cost_allocs:
est_ship+=line.est_ship or 0
est_duty+=line.est_duty or 0
act_ship+=line.act_ship or 0
act_duty+=line.act_duty or 0
vals[obj.id]={
"est_ship": est_ship,
"est_duty": est_duty,
"act_ship": act_ship,
"act_duty": act_duty,
}
return vals
def copy_to_actual(self,ids,context={}):
obj=self.browse(ids[0])
vals={
"cost_allocs": [],
}
for line in obj.cost_allocs:
alloc_vals={
"move_id": line.move_id.id,
"est_ship": -line.est_ship,
"est_duty": -line.est_duty,
}
vals["cost_allocs"].append(("create",alloc_vals))
land_id=self.create(vals)
new_land=self.browse(land_id)
return {
"next": {
"name": "landed_cost",
"mode": "form",
"active_id": land_id,
},
"flash": "Actual landed costs %s copied from estimate landed costs %s"%(new_land.number,obj.number),
}
def alloc_amount(self,ids,context={}):
obj=self.browse(ids[0])
if obj.alloc_amount is None:
raise Exception("Missing allocation amount")
if obj.alloc_type=="amount":
total_amt=0
for line in obj.cost_allocs:
total_amt+=line.cost_amount or 0
if not total_amt:
raise Exception("Total amount is zero")
elif obj.alloc_type=="qty":
total_qty=0
for line in obj.cost_allocs:
total_qty+=line.qty
if not total_qty:
raise Exception("Total qty is zero")
for line in obj.cost_allocs:
if obj.alloc_type=="amount":
alloc_amt=obj.alloc_amount*(line.cost_amount or 0)/total_amt
elif obj.alloc_type=="qty":
alloc_amt=obj.alloc_amount*line.qty/total_qty if total_qty else 0
vals={
obj.alloc_cost_type: alloc_amt,
}
line.write(vals)
def reverse(self,ids,context={}):
obj=self.browse(ids)[0]
if obj.state!="posted":
raise Exception("Failed to reverse landed cost: invalid state")
if not obj.move_id:
raise Exception("Missing journal entry")
res=obj.move_id.reverse()
obj.write({"state": "reversed","reverse_move_id": res["reverse_move_id"]})
for move in obj.stock_moves:
move.reverse()
def merge_lc(self,ids,context={}):
if len(ids)<2:
raise Exception("Can not merge less than two landed costs")
vals = {
"cost_allocs": [],
}
seq=0
for obj in self.browse(ids):
for line in obj.cost_allocs:
cost_vals={
"move_id": line.move_id.id,
"est_ship": line.est_ship,
"est_duty": line.est_duty,
"act_ship": line.act_ship,
"act_duty": line.act_duty,
}
vals["cost_allocs"].append(("create",cost_vals))
new_id = self.create(vals, context=context)
new_obj = self.browse(new_id)
return {
"next": {
"name": "landed_cost",
"mode": "form",
"active_id": new_id,
},
"flash": "Landed costs merged",
}
LandedCost.register()
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import numpy as np
from pymatgen.core import PeriodicSite
from pymatgen.io.vasp import Vasprun, Poscar, Outcar
from pymatgen.analysis.defects.core import Vacancy, Interstitial, DefectEntry
from pymatgen.analysis.defects.defect_compatibility import DefectCompatibility
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files')
class DefectCompatibilityTest(PymatgenTest):
def setUp(self):
struc = PymatgenTest.get_structure("VO2")
struc.make_supercell(3)
struc = struc
self.vac = Vacancy(struc, struc.sites[0], charge=-3)
abc = self.vac.bulk_structure.lattice.abc
axisdata = [np.arange(0., lattval, 0.2) for lattval in abc]
bldata = [np.array([1. for u in np.arange(0., lattval, 0.2)]) for lattval in abc]
dldata = [
np.array([(-1 - np.cos(2 * np.pi * u / lattval)) for u in np.arange(0., lattval, 0.2)]) for lattval in abc
]
self.frey_params = {'axis_grid': axisdata, 'bulk_planar_averages': bldata,
'defect_planar_averages': dldata, 'dielectric': 15,
'initial_defect_structure': struc.copy(),
'defect_frac_sc_coords': struc.sites[0].frac_coords[:]}
kumagai_bulk_struc = Poscar.from_file(os.path.join(test_dir, 'defect', 'CONTCAR_bulk')).structure
bulk_out = Outcar(os.path.join(test_dir, 'defect', 'OUTCAR_bulk.gz'))
defect_out = Outcar(os.path.join(test_dir, 'defect', 'OUTCAR_vac_Ga_-3.gz'))
self.kumagai_vac = Vacancy(kumagai_bulk_struc, kumagai_bulk_struc.sites[0], charge=-3)
kumagai_defect_structure = self.kumagai_vac.generate_defect_structure()
self.kumagai_params = {'bulk_atomic_site_averages': bulk_out.electrostatic_potential,
'defect_atomic_site_averages': defect_out.electrostatic_potential,
'site_matching_indices': [[ind, ind - 1] for ind in range(len(kumagai_bulk_struc))],
'defect_frac_sc_coords': [0., 0., 0.],
'initial_defect_structure': kumagai_defect_structure,
'dielectric': 18.118 * np.identity(3),
'gamma': 0.153156 # not neccessary to load gamma, but speeds up unit test
}
v = Vasprun(os.path.join(test_dir, 'vasprun.xml'))
eigenvalues = v.eigenvalues.copy()
kptweights = v.actual_kpoints_weights
potalign = -0.1
vbm = v.eigenvalue_band_properties[2]
cbm = v.eigenvalue_band_properties[1]
self.bandfill_params = {'eigenvalues': eigenvalues,
'kpoint_weights': kptweights,
'potalign': potalign,
'vbm': vbm, 'cbm': cbm}
self.band_edge_params = {'hybrid_cbm': 1., 'hybrid_vbm': -1., 'vbm': -0.5,
'cbm': 0.6, 'num_hole_vbm': 1., 'num_elec_cbm': 1.}
def test_process_entry(self):
# basic process with no corrections
dentry = DefectEntry(self.vac, 0., corrections={}, parameters={'vbm': 0., 'cbm': 0.}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry(dentry)
self.assertIsNotNone(dentry)
# process with corrections from parameters used in other unit tests
params = self.frey_params.copy()
params.update(self.bandfill_params)
params.update({'hybrid_cbm': params['cbm'] + .2, 'hybrid_vbm': params['vbm'] - .4, })
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry(dentry)
self.assertAlmostEqual(dentry.corrections['bandedgeshifting_correction'], 1.2)
self.assertAlmostEqual(dentry.corrections['bandfilling_correction'], 0.0)
self.assertAlmostEqual(dentry.corrections['charge_correction'], 5.44595036)
# test over delocalized free carriers which forces skipping charge correction
# modify the eigenvalue list to have free holes
hole_eigenvalues = {}
for spinkey, spinset in params['eigenvalues'].items():
hole_eigenvalues[spinkey] = []
for kptset in spinset:
hole_eigenvalues[spinkey].append([])
for eig in kptset:
if (eig[0] < params['vbm']) and (eig[0] > params['vbm'] - .8):
hole_eigenvalues[spinkey][-1].append([eig[0], 0.5])
else:
hole_eigenvalues[spinkey][-1].append(eig)
params.update({'eigenvalues': hole_eigenvalues})
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility(free_chg_cutoff=0.8)
dentry = dc.process_entry(dentry)
self.assertAlmostEqual(dentry.corrections['bandedgeshifting_correction'], 1.19999999)
self.assertAlmostEqual(dentry.corrections['bandfilling_correction'], -1.62202400)
self.assertAlmostEqual(dentry.corrections['charge_correction'], 0.)
# turn off band filling and band edge shifting
dc = DefectCompatibility(free_chg_cutoff=0.8, use_bandfilling=False, use_bandedgeshift=False)
dentry = dc.process_entry(dentry)
self.assertAlmostEqual(dentry.corrections['bandedgeshifting_correction'], 0.)
self.assertAlmostEqual(dentry.corrections['bandfilling_correction'], 0.)
self.assertAlmostEqual(dentry.corrections['charge_correction'], 0.)
def test_perform_all_corrections(self):
# return entry even if insufficent values are provided
# for freysoldt, kumagai, bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_all_corrections(de)
self.assertIsNotNone(dentry)
# all other correction applications are tested in unit tests below
def test_perform_freysoldt(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_freysoldt(de)
val = dentry.parameters['freysoldt_meta']
self.assertAlmostEqual(val['freysoldt_electrostatic'], 0.975893)
self.assertAlmostEqual(val['freysoldt_potential_alignment_correction'], 4.4700574)
self.assertAlmostEqual(val['freysoldt_potalign'], 1.4900191)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_perform_kumagai(self):
de = DefectEntry(self.kumagai_vac, 0., parameters=self.kumagai_params)
dc = DefectCompatibility()
dentry = dc.perform_kumagai(de)
val = dentry.parameters['kumagai_meta']
self.assertAlmostEqual(val['kumagai_electrostatic'], 0.88236299)
self.assertAlmostEqual(val['kumagai_potential_alignment_correction'], 2.09704862)
self.assertAlmostEqual(val['kumagai_potalign'], 0.69901620)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_run_bandfilling(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.bandfill_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_bandfilling(de)
val = dentry.parameters['bandfilling_meta']
self.assertAlmostEqual(val['num_hole_vbm'], 0.)
self.assertAlmostEqual(val['num_elec_cbm'], 0.)
self.assertAlmostEqual(val['bandfilling_correction'], 0.)
def test_run_band_edge_shifting(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.band_edge_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_band_edge_shifting(de)
val = dentry.parameters['bandshift_meta']
self.assertEqual(val['vbmshift'], -0.5)
self.assertEqual(val['cbmshift'], 0.4)
self.assertEqual(val['bandedgeshifting_correction'], 1.5)
def test_delocalization_analysis(self):
# return entry even if insufficent values are provided
# for delocalization analysis with freysoldt, kumagai,
# bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.delocalization_analysis(de)
self.assertIsNotNone(dentry)
# all other correction applications are tested in unit tests below
def test_check_freysoldt_delocalized(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
de.parameters.update({'is_compatible': True}) # needs to be initialized with this here for unittest
dc = DefectCompatibility(plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.5)
dentry = dc.perform_freysoldt(de)
# check case which fits under compatibility constraints
dentry = dc.check_freysoldt_delocalized(dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertTrue(frey_delocal['is_compatible'])
ans_var = [0.00038993, 0.02119532, 0.02119532]
ans_window = [0.048331509, 0.36797169, 0.36797169]
for ax in range(3):
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertAlmostEqual(ax_metadata['frey_variance'], ans_var[ax])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
self.assertAlmostEqual(ax_metadata['frey_minmax_window'], ans_window[ax])
self.assertTrue(dentry.parameters['is_compatible'])
# check planar delocalization on 2nd and 3rd axes
dc = DefectCompatibility(plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.2)
dentry.parameters.update({'is_compatible': True})
dentry = dc.check_freysoldt_delocalized(dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse(frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
for ax in [1, 2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertFalse(ax_metadata['frey_minmax_compatible'])
self.assertFalse(dentry.parameters['is_compatible'])
# check variance based delocalization on 2nd and 3rd axes
dc = DefectCompatibility(plnr_avg_var_tol=0.01, plnr_avg_minmax_tol=0.5)
dentry.parameters.update({'is_compatible': True})
dentry = dc.check_freysoldt_delocalized(dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse(frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue(ax_metadata['frey_variance_compatible'])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
for ax in [1, 2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertFalse(ax_metadata['frey_variance_compatible'])
self.assertTrue(ax_metadata['frey_minmax_compatible'])
self.assertFalse(dentry.parameters['is_compatible'])
def test_check_kumagai_delocalized(self):
de = DefectEntry(self.kumagai_vac, 0., parameters=self.kumagai_params)
de.parameters.update({'is_compatible': True}) # needs to be initialized with this here for unittest
dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=20.95)
dentry = dc.perform_kumagai(de)
# check case which fits under compatibility constraints
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertTrue(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
true_variance = 13.262304401193997
true_minmax = 20.9435
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertTrue(dentry.parameters['is_compatible'])
# break variable compatibility
dc = DefectCompatibility(atomic_site_var_tol=0.1, atomic_site_minmax_tol=20.95)
de.parameters.update({'is_compatible': True})
dentry = dc.perform_kumagai(de)
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertFalse(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse(dentry.parameters['is_compatible'])
# break maxmin compatibility
dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=0.5)
de.parameters.update({'is_compatible': True})
dentry = dc.perform_kumagai(de)
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertFalse(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse(dentry.parameters['is_compatible'])
def test_check_final_relaxed_structure_delocalized(self):
# test structure delocalization analysis
# first test no movement in atoms
initial_defect_structure = self.vac.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
sampling_radius = 4.55
defect_frac_sc_coords = self.vac.site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility(tot_relax_tol=0.1, perc_relax_tol=0.1, defect_tot_relax_tol=0.1)
dentry = dc.check_final_relaxed_structure_delocalized(dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertTrue(dentry.parameters['is_compatible'])
self.assertTrue(struc_delocal['is_compatible'])
self.assertTrue(struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertEqual(struc_delocal['metadata']['tot_relax_outside_rad'], 0.)
self.assertTrue(struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertEqual(struc_delocal['metadata']['perc_relax_outside_rad'], 0.)
self.assertEqual(len(struc_delocal['metadata']['full_structure_relax_data']), len(initial_defect_structure))
self.assertIsNone(struc_delocal['metadata']['defect_index'])
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertTrue(defect_delocal['is_compatible'])
self.assertIsNone(defect_delocal['metadata']['relax_amount'])
# next test for when structure has delocalized outside of radius from defect
pert_struct_fin_struct = initial_defect_structure.copy()
pert_struct_fin_struct.perturb(0.1)
dentry.parameters.update({'final_defect_structure': pert_struct_fin_struct})
dentry = dc.check_final_relaxed_structure_delocalized(dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertFalse(dentry.parameters['is_compatible'])
self.assertFalse(struc_delocal['is_compatible'])
self.assertFalse(struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertAlmostEqual(struc_delocal['metadata']['tot_relax_outside_rad'], 12.5)
self.assertFalse(struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertAlmostEqual(struc_delocal['metadata']['perc_relax_outside_rad'], 77.63975155)
# now test for when an interstitial defect has migrated too much
inter_def_site = PeriodicSite('H', [7.58857304, 11.70848069, 12.97817518],
self.vac.bulk_structure.lattice, to_unit_cell=True,
coords_are_cartesian=True)
inter = Interstitial(self.vac.bulk_structure, inter_def_site, charge=0)
initial_defect_structure = inter.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
poss_deflist = sorted(
final_defect_structure.get_sites_in_sphere(inter.site.coords,
2, include_index=True), key=lambda x: x[1])
def_index = poss_deflist[0][2]
final_defect_structure.translate_sites(indices=[def_index],
vector=[0., 0., 0.008]) # fractional coords translation
defect_frac_sc_coords = inter_def_site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(inter, 0., corrections={}, parameters=params, entry_id=None)
dentry = dc.check_final_relaxed_structure_delocalized(dentry)
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertFalse(defect_delocal['is_compatible'])
self.assertAlmostEqual(defect_delocal['metadata']['relax_amount'], 0.10836054)
if __name__ == "__main__":
unittest.main()
| |
import wx
from wx import glcanvas
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import matplotlib
from matplotlib import animation
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
import numpy as np
import scipy
import scipy.io as sio
from scipy.io import wavfile
from sys import exit, argv
import os
import math
import time
import pygame
from CoverSongInfo import *
from SelfSimilarityGUI import *
DEFAULT_SIZE = wx.Size(1000, 1000)
DEFAULT_POS = wx.Point(10, 10)
SCROLL_RATE = 0.9
#Using PyOpenGL to help with automatic updating/threading. SUPER HACKY!
class DummyGLCanvas(glcanvas.GLCanvas):
def __init__(self, parent, plot):
attribs = (glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DOUBLEBUFFER, glcanvas.WX_GL_DEPTH_SIZE, 24)
glcanvas.GLCanvas.__init__(self, parent, -1, attribList = attribs)
self.context = glcanvas.GLContext(self)
self.plot = plot
glutInit('')
glEnable(GL_NORMALIZE)
glEnable(GL_DEPTH_TEST)
wx.EVT_PAINT(self, self.processPaintEvent)
def processEraseBackgroundEvent(self, event): pass #avoid flashing on MSW.
def processPaintEvent(self, event):
dc = wx.PaintDC(self)
self.SetCurrent(self.context)
self.repaint()
def repaint(self):
time.sleep(0.2)
self.plot.draw()
self.SwapBuffers()
if self.plot.Playing:
self.Refresh()
class CrossSimilarityPlot(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.parent = parent
self.figure = Figure((10.0, 10.0), dpi=100)
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.CSM = np.array([])
self.songnames = ["", ""]
self.SampleDelays = [np.array([]), np.array([])]
self.bts = [np.array([]), np.array([])]
self.MFCCs = [np.array([[]]), np.array([[]])]
self.beatIdxs = [np.array([]), np.array([])]
self.drawRange = [0, 1, 0, 1]
self.drawRadius = 1
#Song Playing info
self.currSong = 0 #Playing the first or second song? (first is along vertical, second is along horizontal)
self.currPos = 0 #Position in the distance matrix
self.startTime = 0
self.Playing = False
self.updatingScroll = False
self.cover1Info = None
self.cover2Info = None
self.cid = self.canvas.mpl_connect('button_press_event', self.OnClick)
self.canvas.mpl_connect('scroll_event', self.OnScroll)
def updateInfo(self, CSM, Fs, BeatsPerWin, songfilename1, songfilename2, SampleDelays1, SampleDelays2, bts1, bts2, MFCCs1, MFCCs2, beatIdx1, beatIdx2):
self.CSM = CSM
self.drawRange = [0, CSM.shape[0], 0, CSM.shape[1]]
self.Fs = Fs
self.BeatsPerWin = BeatsPerWin
self.songnames = [songfilename1, songfilename2]
self.SampleDelays = [SampleDelays1, SampleDelays2]
self.bts = [bts1, bts2]
self.MFCCs = [MFCCs1, MFCCs2]
self.beatIdxs = [beatIdx1, beatIdx2]
self.currSong = 0
self.currPos = -1
self.startTime = 0
pygame.mixer.init(frequency=self.Fs)
pygame.mixer.music.load(songfilename1)
self.draw(firstTime = True)
def draw(self, firstTime = False):
if self.CSM.size == 0:
return
thisTime = self.startTime
if self.Playing:
thisTime += float(pygame.mixer.music.get_pos()) / 1000.0
thisPos = self.currPos
while self.bts[self.currSong][thisPos] < thisTime:
thisPos = thisPos + 1
if thisPos == len(self.bts[self.currSong]) - 1:
break
if thisPos != self.currPos or firstTime:
self.currPos = thisPos
self.axes.clear()
imgplot = self.axes.imshow(self.CSM[self.drawRange[0]:self.drawRange[1], self.drawRange[2]:self.drawRange[3]])
imgplot.set_interpolation('nearest')
self.axes.hold(True)
#Plot current marker in song
if self.currSong == 0:
#Horizontal line for first song
self.axes.plot([0, self.drawRange[3]], [self.currPos-self.drawRange[0], self.currPos-self.drawRange[0]], 'r')
else:
#Vertical line for second song
self.axes.plot([self.currPos-self.drawRange[2], self.currPos-self.drawRange[2]], [0, self.drawRange[1]], 'r')
self.axes.set_xlim([0, self.drawRange[3]-self.drawRange[2]])
self.axes.set_ylim([self.drawRange[1]-self.drawRange[0], 0])
self.canvas.draw()
def OnClick(self, evt):
if evt.dblclick:
self.Playing = False
pygame.mixer.music.stop()
idx = [0, 0]
idx[0] = int(math.floor(evt.ydata)) + self.drawRange[0]
idx[1] = int(math.floor(evt.xdata)) + self.drawRange[2]
#Precompute PCA on all beat blocks (may take a few seconds the first time
#but all subsequent times it will be faster)
if not self.cover1Info:
self.cover1Info = CoverSongInfo(self.songnames[0], self.MFCCs[0], self.SampleDelays[0], self.beatIdxs[0], self.BeatsPerWin)
if not self.cover2Info:
self.cover2Info = CoverSongInfo(self.songnames[1], self.MFCCs[1], self.SampleDelays[1], self.beatIdxs[1], self.BeatsPerWin)
app = wx.App()
frame = CoverSongsFrame(None, -1, 'Cover Songs GUI', self.cover1Info, self.cover2Info, self.CSM, idx)
frame.Show(True)
app.MainLoop()
app.Destroy()
return
if self.CSM.size == 0:
return
thisSong = 0
if evt.button == 1: #TODO: Magic numbers?
thisSong = 0
elif evt.button == 2:
#Reset scrolling to normal
self.drawRange = [0, self.CSM.shape[0], 0, self.CSM.shape[1]]
self.drawRadius = 1
self.draw()
return
else:
thisSong = 1
if not (thisSong == self.currSong):
self.currSong = thisSong
print "\n\nIniting mixer with sampling frequency Fs = %g"%self.Fs
pygame.mixer.init(frequency=self.Fs)
pygame.mixer.music.load(self.songnames[self.currSong])
idx = [0, 0]
idx[0] = int(math.floor(evt.ydata)) + self.drawRange[0]
idx[1] = int(math.floor(evt.xdata)) + self.drawRange[2]
print "Jumping to %g seconds in %s"%(self.bts[self.currSong][idx[self.currSong]], self.songnames[self.currSong])
self.startTime = self.bts[self.currSong][idx[self.currSong]]
pygame.mixer.music.play(0, self.startTime)
self.Playing = True
self.dummyCanvas.Refresh()
self.currPos = idx[self.currSong]
self.draw()
def OnScroll(self, evt):
idx = [0, 0]
idx[0] = int(math.floor(evt.ydata))
idx[1] = int(math.floor(evt.xdata))
if evt.step > 0:
#Zoom in
self.drawRadius = self.drawRadius*SCROLL_RATE
else:
#Zoom out
self.drawRadius = self.drawRadius/SCROLL_RATE
if self.drawRadius > 1:
self.drawRadius = 1
#Find selected point in original coordinates
selX = idx[1] + self.drawRange[2]
selY = idx[0] + self.drawRange[0]
#Find new window size
dXWin = int(np.round(self.drawRadius*self.CSM.shape[1]/2.0))
dYWin = int(np.round(self.drawRadius*self.CSM.shape[0]/2.0))
d = [selY - dYWin, selY + dYWin, selX - dXWin, selX + dXWin]
d[0] = max(0, d[0])
d[1] = min(self.CSM.shape[0], d[1])
d[2] = max(0, d[2])
d[3] = min(self.CSM.shape[1], d[1])
print d
self.drawRange = d
self.draw()
def OnPlayButton(self, evt):
if len(self.bts[0]) == 0:
return
self.Playing = True
if self.currPos == -1:
self.currPos = 0
self.startTime = self.bts[self.currSong][self.currPos]
pygame.mixer.music.play(0, self.startTime)
self.draw()
def OnPauseButton(self, evt):
self.Playing = False
pygame.mixer.music.stop()
self.draw()
class CrossSimilaritysFrame(wx.Frame):
(ID_LOADMATRIX) = (1)
def __init__(self, parent, id, title, pos=DEFAULT_POS, size=DEFAULT_SIZE, style=wx.DEFAULT_FRAME_STYLE, name = 'GLWindow'):
style = style | wx.NO_FULL_REPAINT_ON_RESIZE
super(CrossSimilaritysFrame, self).__init__(parent, id, title, pos, size, style, name)
#Initialize the menu
self.CreateStatusBar()
#Sound variables
self.size = size
self.pos = pos
filemenu = wx.Menu()
menuLoadMatrix = filemenu.Append(CrossSimilaritysFrame.ID_LOADMATRIX, "&Load Cross-Similarity Matrix","Load Cross-Similarity Matrix")
self.Bind(wx.EVT_MENU, self.OnLoadMatrix, menuLoadMatrix)
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
#The numpy plot that will store the cross-similarity matrix and all
#of the info needed to construct self-similarity matrices
self.CSPlot = CrossSimilarityPlot(self)
#The play/pause buttons
buttonRow = wx.BoxSizer(wx.HORIZONTAL)
playButton = wx.Button(self, label = 'PLAY')
playButton.Bind(wx.EVT_BUTTON, self.CSPlot.OnPlayButton)
pauseButton = wx.Button(self, label = 'PAUSE')
pauseButton.Bind(wx.EVT_BUTTON, self.CSPlot.OnPauseButton)
buttonRow.Add(playButton, 0, wx.EXPAND | wx.GROW)
buttonRow.Add(pauseButton, 0, wx.EXPAND | wx.GROW)
self.glcanvas = DummyGLCanvas(self, self.CSPlot)
self.CSPlot.dummyCanvas = self.glcanvas
self.glcanvas.Refresh()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(buttonRow, 0, wx.EXPAND)
self.sizer.Add(self.CSPlot, 0, wx.GROW)
self.SetSizer(self.sizer)
self.Layout()
self.Show()
def OnLoadMatrix(self, evt):
dlg = wx.FileDialog(self, "Choose a file", ".", "", "*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
print "Loading %s...."%filename
filepath = os.path.join(dirname, filename)
data = sio.loadmat(filepath)
CSM = data['CSM']
Fs = data['Fs'].flatten()[0]
BeatsPerWin = data['BeatsPerWin'].flatten()[0]
#The sound files need to be in the same directory
songfilename1 = str(data['songfilename1'][0])
songfilename2 = str(data['songfilename2'][0])
SampleDelays1 = data['SampleDelays1'].flatten()
SampleDelays2 = data['SampleDelays2'].flatten()
bts1 = data['bts1'].flatten()
bts2 = data['bts2'].flatten()
MFCCs1 = data['MFCCs1']
MFCCs2 = data['MFCCs2']
beatIdx1 = data['beatIdx1'].flatten()
beatIdx2 = data['beatIdx2'].flatten()
self.CSPlot.updateInfo(CSM, Fs, BeatsPerWin, songfilename1, songfilename2, SampleDelays1, SampleDelays2, bts1, bts2, MFCCs1, MFCCs2, beatIdx1, beatIdx2)
dlg.Destroy()
return
if __name__ == "__main__":
app = wx.App()
frame = CrossSimilaritysFrame(None, -1, 'Cross Similarity GUI')
frame.Show(True)
app.MainLoop()
app.Destroy()
| |
from __future__ import annotations
import contextlib
import getpass
from typing import Any, Literal
import pandas as pd
import sqlalchemy as sa
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
import ibis.expr.types as ir
import ibis.util as util
from ibis.backends.base.sql import BaseSQLBackend
from .database import AlchemyDatabase, AlchemyTable
from .datatypes import schema_from_table, table_from_schema, to_sqla_type
from .geospatial import geospatial_supported
from .query_builder import AlchemyCompiler
from .registry import (
fixed_arity,
get_sqla_table,
infix_op,
reduction,
sqlalchemy_operation_registry,
sqlalchemy_window_functions_registry,
unary,
varargs,
variance_reduction,
)
from .translator import AlchemyContext, AlchemyExprTranslator
__all__ = (
'BaseAlchemyBackend',
'AlchemyExprTranslator',
'AlchemyContext',
'AlchemyCompiler',
'AlchemyTable',
'AlchemyDatabase',
'AlchemyContext',
'sqlalchemy_operation_registry',
'sqlalchemy_window_functions_registry',
'reduction',
'variance_reduction',
'fixed_arity',
'unary',
'infix_op',
'get_sqla_table',
'to_sqla_type',
'schema_from_table',
'table_from_schema',
'varargs',
)
class BaseAlchemyBackend(BaseSQLBackend):
"""Backend class for backends that compile to SQLAlchemy expressions."""
database_class = AlchemyDatabase
table_class = AlchemyTable
compiler = AlchemyCompiler
def _build_alchemy_url(
self, url, host, port, user, password, database, driver
):
if url is not None:
return sa.engine.url.make_url(url)
user = user or getpass.getuser()
return sa.engine.url.URL(
driver,
host=host,
port=port,
username=user,
password=password,
database=database,
)
@property
def _current_schema(self) -> str | None:
return None
def do_connect(self, con: sa.engine.Engine) -> None:
self.con = con
self._inspector = sa.inspect(self.con)
self.meta = sa.MetaData(bind=self.con)
self._schemas: dict[str, sch.Schema] = {}
@property
def version(self):
return '.'.join(map(str, self.con.dialect.server_version_info))
def list_tables(self, like=None, database=None):
tables = self._inspector.get_table_names(
schema=database
) + self._inspector.get_view_names(schema=database)
return self._filter_with_like(tables, like)
def list_databases(self, like=None):
"""List databases in the current server."""
databases = self.inspector.get_schema_names()
return self._filter_with_like(databases, like)
@property
def inspector(self):
self._inspector.info_cache.clear()
return self._inspector
@staticmethod
def _to_geodataframe(df, schema):
"""Convert `df` to a `GeoDataFrame`.
Required libraries for geospatial support must be installed and a
geospatial column is present in the dataframe.
"""
import geopandas
from geoalchemy2 import shape
def to_shapely(row, name):
return shape.to_shape(row[name]) if row[name] is not None else None
geom_col = None
for name, dtype in schema.items():
if isinstance(dtype, dt.GeoSpatial):
geom_col = geom_col or name
df[name] = df.apply(lambda x: to_shapely(x, name), axis=1)
if geom_col:
df = geopandas.GeoDataFrame(df, geometry=geom_col)
return df
def fetch_from_cursor(self, cursor, schema):
df = pd.DataFrame.from_records(
cursor.fetchall(),
columns=cursor.keys(),
coerce_float=True,
)
df = schema.apply_to(df)
if len(df) and geospatial_supported:
return self._to_geodataframe(df, schema)
return df
@contextlib.contextmanager
def begin(self):
with self.con.begin() as bind:
yield bind
def create_table(
self,
name: str,
expr: pd.DataFrame | ir.TableExpr | None = None,
schema: sch.Schema | None = None,
database: str | None = None,
force: bool = False,
) -> None:
"""Create a table.
Parameters
----------
name
Table name to create
expr
DataFrame or table expression to use as the data source
schema
An ibis schema
database
A database
force
Check whether a table exists before creating it
"""
if database == self.current_database:
# avoid fully qualified name
database = None
if database is not None:
raise NotImplementedError(
'Creating tables from a different database is not yet '
'implemented'
)
if expr is None and schema is None:
raise ValueError('You must pass either an expression or a schema')
if expr is not None and schema is not None:
if not expr.schema().equals(ibis.schema(schema)):
raise TypeError(
'Expression schema is not equal to passed schema. '
'Try passing the expression without the schema'
)
if schema is None:
schema = expr.schema()
self._schemas[self._fully_qualified_name(name, database)] = schema
t = self._table_from_schema(
name, schema, database=database or self.current_database
)
with self.begin() as bind:
t.create(bind=bind, checkfirst=force)
if expr is not None:
bind.execute(
t.insert().from_select(list(expr.columns), expr.compile())
)
def _columns_from_schema(
self, name: str, schema: sch.Schema
) -> list[sa.Column]:
return [
sa.Column(colname, to_sqla_type(dtype), nullable=dtype.nullable)
for colname, dtype in zip(schema.names, schema.types)
]
def _table_from_schema(
self, name: str, schema: sch.Schema, database: str | None = None
) -> sa.Table:
columns = self._columns_from_schema(name, schema)
return sa.Table(name, self.meta, *columns)
def drop_table(
self,
table_name: str,
database: str | None = None,
force: bool = False,
) -> None:
"""Drop a table.
Parameters
----------
table_name
Table to drop
database
Database to drop table from
force
Check for existence before dropping
"""
if database == self.current_database:
# avoid fully qualified name
database = None
if database is not None:
raise NotImplementedError(
'Dropping tables from a different database is not yet '
'implemented'
)
t = self._get_sqla_table(table_name, schema=database, autoload=False)
t.drop(checkfirst=force)
assert (
not t.exists()
), f'Something went wrong during DROP of table {t.name!r}'
self.meta.remove(t)
qualified_name = self._fully_qualified_name(table_name, database)
try:
del self._schemas[qualified_name]
except KeyError: # schemas won't be cached if created with raw_sql
pass
def load_data(
self,
table_name: str,
data: pd.DataFrame,
database: str | None = None,
if_exists: Literal['fail', 'replace', 'append'] = 'fail',
) -> None:
"""Load data from a dataframe to the backend.
Parameters
----------
table_name
Name of the table in which to load data
data
Pandas DataFrame
database
Database in which the table exists
if_exists
What to do when data in `name` already exists
Raises
------
NotImplementedError
Loading data to a table from a different database is not
yet implemented
"""
if database == self.current_database:
# avoid fully qualified name
database = None
if database is not None:
raise NotImplementedError(
'Loading data to a table from a different database is not '
'yet implemented'
)
data.to_sql(
table_name,
con=self.con,
index=False,
if_exists=if_exists,
schema=self._current_schema,
)
def truncate_table(
self,
table_name: str,
database: str | None = None,
) -> None:
t = self._get_sqla_table(table_name, schema=database)
t.delete().execute()
def schema(self, name: str) -> sch.Schema:
"""Get an ibis schema from the current database for the table `name`.
Parameters
----------
name
Table name
Returns
-------
Schema
The ibis schema of `name`
"""
return self.database().schema(name)
@property
def current_database(self) -> str:
"""The name of the current database this client is connected to."""
return self.database_name
@util.deprecated(version='2.0', instead='`list_databases`')
def list_schemas(self, like: str | None = None) -> list[str]:
return self.list_databases()
def _log(self, sql):
try:
query_str = str(sql)
except sa.exc.UnsupportedCompilationError:
pass
else:
util.log(query_str)
def _get_sqla_table(
self,
name: str,
schema: str | None = None,
autoload: bool = True,
**kwargs: Any,
) -> sa.Table:
return sa.Table(name, self.meta, schema=schema, autoload=autoload)
def _sqla_table_to_expr(self, table: sa.Table) -> ir.TableExpr:
schema = self._schemas.get(table.name)
node = self.table_class(table, self, schema)
return self.table_expr_class(node)
def table(
self,
name: str,
database: str | None = None,
schema: str | None = None,
) -> ir.TableExpr:
"""Create a table expression from a table in the database.
Parameters
----------
name
Table name
database
The database the table resides in
schema
The schema inside `database` where the table resides.
!!! warning "`schema` refers to database organization"
The `schema` parameter does **not** refer to the column names
and types of `table`.
Returns
-------
TableExpr
Table expression
"""
if database is not None and database != self.current_database:
return self.database(database=database).table(
name=name,
database=database,
schema=schema,
)
sqla_table = self._get_sqla_table(
name,
database=database,
schema=schema,
)
return self._sqla_table_to_expr(sqla_table)
def insert(
self,
table_name: str,
obj: pd.DataFrame | ir.TableExpr,
database: str | None = None,
overwrite: bool = False,
) -> None:
"""Insert data into a table.
Parameters
----------
table_name
The name of the table to which data needs will be inserted
obj
The source data or expression to insert
database
Name of the attached database that the table is located in.
overwrite
If `True` then replace existing contents of table
Raises
------
NotImplementedError
If inserting data from a different database
ValueError
If the type of `obj` isn't supported
"""
if database == self.current_database:
# avoid fully qualified name
database = None
if database is not None:
raise NotImplementedError(
'Inserting data to a table from a different database is not '
'yet implemented'
)
if isinstance(obj, pd.DataFrame):
obj.to_sql(
table_name,
self.con,
index=False,
if_exists='replace' if overwrite else 'append',
schema=self._current_schema,
)
elif isinstance(obj, ir.TableExpr):
to_table_expr = self.table(table_name)
to_table_schema = to_table_expr.schema()
if overwrite:
self.drop_table(table_name, database=database)
self.create_table(
table_name,
schema=to_table_schema,
database=database,
)
to_table = self._get_sqla_table(table_name, schema=database)
from_table_expr = obj
with self.begin() as bind:
if from_table_expr is not None:
bind.execute(
to_table.insert().from_select(
list(from_table_expr.columns),
from_table_expr.compile(),
)
)
else:
raise ValueError(
"No operation is being performed. Either the obj parameter "
"is not a pandas DataFrame or is not a ibis TableExpr."
f"The given obj is of type {type(obj).__name__} ."
)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metdata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import copy
import numpy as np
from tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.platform import tf_logging as logging
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
class _TPUContext(object):
"""A context holds immutable states of TPU computation.
This immutable object holds TPUEstimator config, train/eval batch size, and
`TPUEstimator.use_tpu`, which is expected to be passed around. It also
provides utility functions, based on the current state, to determine other
information commonly required by TPU computation, such as TPU device names,
TPU hosts, shard batch size, etc.
N.B. As `mode` is not immutable state in Estimator, but essential to
distinguish between TPU training and evaluation, a common usage for
_TPUContext with `mode` is as follows:
```
with _ctx.with_mode(mode) as ctx:
if ctx.is_running_on_cpu():
...
```
"""
def __init__(self, config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
self._config = config
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._predict_batch_size = predict_batch_size
self._use_tpu = use_tpu
self._model_parallelism_enabled = (
use_tpu and config.tpu_config.computation_shape)
self._mode = None
self._lazy_tpu_system_metadata_dict = {} # key by master address
self._lazy_device_assignment_dict = {} # key by master address
self._lazy_validation_dict = {} # key by ModeKeys
def _assert_mode(self):
if self._mode is None:
raise RuntimeError(
'`mode` needs to be set via contextmanager `with_mode`.')
return self._mode
@contextmanager
def with_mode(self, mode):
# NOTE(xiejw): Shallow copy is enough. It will share he lazy dictionaries,
# such as _lazy_tpu_system_metadata_dict between new copy and the original
# one. Note that all lazy states stored in properties _lazy_foo are sort of
# immutable as they should be same for the process lifetime.
new_ctx = copy.copy(self)
new_ctx._mode = mode # pylint: disable=protected-access
yield new_ctx
@property
def mode(self):
return self._assert_mode()
def _get_master_address(self):
mode = self._assert_mode()
config = self._config
master = (
config.master
if mode != model_fn_lib.ModeKeys.EVAL else config.evaluation_master)
return master
def _get_tpu_system_metadata(self):
"""Gets the (maybe cached) TPU system metadata."""
master = self._get_master_address()
tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master)
if tpu_system_metadata is not None:
return tpu_system_metadata
# pylint: disable=protected-access
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
run_config=self._config,
query_topology=self.model_parallelism_enabled))
self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata
return tpu_system_metadata
def _get_device_assignment(self):
"""Gets the (maybe cached) TPU device assignment."""
master = self._get_master_address()
device_assignment = self._lazy_device_assignment_dict.get(master)
if device_assignment is not None:
return device_assignment
tpu_system_metadata = self._get_tpu_system_metadata()
device_assignment = tpu_device_assignment.device_assignment(
tpu_system_metadata.topology,
computation_shape=self._config.tpu_config.computation_shape,
num_replicas=self.num_replicas)
logging.info('computation_shape: %s',
str(self._config.tpu_config.computation_shape))
logging.info('num_replicas: %d', self.num_replicas)
logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
self._lazy_device_assignment_dict[master] = device_assignment
return device_assignment
@property
def model_parallelism_enabled(self):
return self._model_parallelism_enabled
@property
def device_assignment(self):
return (self._get_device_assignment()
if self._model_parallelism_enabled else None)
@property
def num_of_cores_per_host(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_of_cores_per_host
@property
def num_cores(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_cores
@property
def num_of_replicas_per_host(self):
if self.model_parallelism_enabled:
return self.num_replicas // self.num_hosts
else:
return self.num_of_cores_per_host
@property
def num_replicas(self):
num_cores_in_system = self.num_cores
if self.model_parallelism_enabled:
computation_shape_array = np.asarray(
self._config.tpu_config.computation_shape, dtype=np.int32)
num_cores_per_replica = np.prod(computation_shape_array)
if num_cores_per_replica > num_cores_in_system:
raise ValueError(
'The num of cores required by the model parallelism, specified by '
'TPUConfig.computation_shape, is larger than the total num of '
'TPU cores in the system. computation_shape: {}, num cores '
'in the system: {}'.format(
self._config.tpu_config.computation_shape,
num_cores_in_system))
if num_cores_in_system % num_cores_per_replica != 0:
raise RuntimeError(
'The num of cores in the system ({}) is not divisible by the num '
'of cores ({}) required by the model parallelism, specified by '
'TPUConfig.computation_shape. This should never happen!'.format(
num_cores_in_system, num_cores_per_replica))
return num_cores_in_system // num_cores_per_replica
else:
return num_cores_in_system
@property
def num_hosts(self):
metadata = self._get_tpu_system_metadata()
return metadata.num_hosts
@property
def config(self):
return self._config
def is_input_sharded_per_core(self):
"""Return true if input_fn is invoked per-core (other than per-host)."""
mode = self._assert_mode()
return (mode == model_fn_lib.ModeKeys.TRAIN and
(self._config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1))
def is_input_per_host_with_iterators(self):
"""Return true if input_fn should be run in the per-host v2 config."""
return (self._config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_HOST_V2)
def is_running_on_cpu(self, is_export_mode=False):
"""Determines whether the input_fn and model_fn should be invoked on CPU.
This API also validates user provided configuration, such as batch size,
according the lazy initialized TPU system metadata.
Args:
is_export_mode: Indicates whether the current mode is for exporting the
model, when mode == PREDICT. Only with this bool, we could
tell whether user is calling the Estimator.predict or
Estimator.export_savedmodel, which are running on TPU and CPU
respectively. Parent class Estimator does not distinguish these two.
Returns:
bool, whether current input_fn or model_fn should be running on CPU.
Raises:
ValueError: any configuration is invalid.
"""
is_running_on_cpu = self._is_running_on_cpu(is_export_mode)
if not is_running_on_cpu:
self._validate_tpu_configuration()
return is_running_on_cpu
def _is_running_on_cpu(self, is_export_mode):
"""Determines whether the input_fn and model_fn should be invoked on CPU."""
mode = self._assert_mode()
if not self._use_tpu:
return True
if mode != model_fn_lib.ModeKeys.PREDICT:
return False
# There are actually 2 use cases when running with mode.PREDICT: prediction
# and saving the model. We run actual predictions on the TPU, but
# model export is run on the CPU.
if is_export_mode:
return True
return False
@property
def global_batch_size(self):
mode = self._assert_mode()
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
elif mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
elif mode == model_fn_lib.ModeKeys.PREDICT:
return self._predict_batch_size
else:
return None
@property
def batch_size_for_input_fn(self):
"""Returns the shard batch size for `input_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
# On TPU
if self.is_input_sharded_per_core() or (
self.is_input_per_host_with_iterators()):
# We prohibit per core input sharding for the model parallelism case,
# therefore it is safe to use num_cores here.
return global_batch_size // self.num_cores
else:
return global_batch_size // self.num_hosts
@property
def batch_size_for_model_fn(self):
"""Returns the shard batch size for `model_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
# On TPU. always sharded per shard.
return global_batch_size // self.num_replicas
@property
def master_job(self):
"""Returns the job name to use to place TPU computations on.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
run_config = self._config
# If the user specifies the tpu_job_name, use that.
if run_config.tpu_config.tpu_job_name:
return run_config.tpu_config.tpu_job_name
# The tpu job is determined by the run_config. Right now, this method is
# required as tpu_config is not part of the RunConfig.
mode = self._assert_mode()
master = (
run_config.evaluation_master
if mode == model_fn_lib.ModeKeys.EVAL else run_config.master)
if master in _LOCAL_MASTERS:
return None
if (not run_config.session_config or
not run_config.session_config.cluster_def.job):
return _DEFAULT_JOB_NAME
cluster_def = run_config.session_config.cluster_def
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
@property
def tpu_host_placement_function(self):
"""Returns the TPU host place function."""
master = self.master_job
def _placement_function(_sentinal=None, core_id=None, host_id=None): # pylint: disable=invalid-name
assert _sentinal is None
if core_id is not None and host_id is not None:
raise RuntimeError(
'core_id and host_id can have only one non-None value.')
if master is None:
return '/replica:0/task:0/device:CPU:0'
else:
if core_id is not None:
host_id = core_id / self.num_of_cores_per_host
return '/job:%s/task:%d/device:CPU:0' % (master, host_id)
return _placement_function
@property
def tpu_device_placement_function(self):
"""Returns a TPU device placement Fn."""
master = self.master_job
job_device = '' if master is None else ('/job:%s' % master)
def _placement_function(i):
if self.model_parallelism_enabled:
return self.device_assignment.tpu_device(replica=i, job=master)
else:
num_of_cores_per_host = self.num_of_cores_per_host
host_id = i / num_of_cores_per_host
ordinal_id = i % num_of_cores_per_host
return '%s/task:%d/device:TPU:%d' % (job_device, host_id, ordinal_id)
return _placement_function
@property
def tpu_ordinal_function(self):
"""Returns the TPU ordinal fn."""
def _tpu_ordinal_function(index):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
index: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
if self.model_parallelism_enabled:
return self.device_assignment.tpu_ordinal(replica=index)
else:
return index % self.num_of_cores_per_host
return _tpu_ordinal_function
def _validate_tpu_configuration(self):
"""Validates the configuration based on the TPU system metadata."""
mode = self._assert_mode()
if self._lazy_validation_dict.get(mode):
return
# All following information is obtained from TPU system metadata.
num_cores = self.num_cores
num_replicas = self.num_replicas
num_hosts = self.num_hosts
if not num_cores:
tpu_system_metadata = self._get_tpu_system_metadata()
raise RuntimeError(
'Cannot find any TPU cores in the system. Please double check '
'Tensorflow master address and TPU worker(s). Available devices '
'are {}.'.format(tpu_system_metadata.devices))
if self._config.tpu_config.num_shards:
user_provided_num_replicas = self._config.tpu_config.num_shards
if user_provided_num_replicas != num_replicas:
message = (
'TPUConfig.num_shards is not set correctly. According to TPU '
'system metadata for Tensorflow master ({}): num_replicas should '
'be ({}), got ({}). For non-model-parallelism, num_replicas should '
'be the total num of TPU cores in the system. For '
'model-parallelism, the total number of TPU cores should be '
'product(computation_shape) * num_replicas. Please set it '
'accordingly or leave it as `None`'.format(
self._get_master_address(), num_replicas,
user_provided_num_replicas))
raise ValueError(message)
if mode == model_fn_lib.ModeKeys.TRAIN:
if self._train_batch_size % num_replicas != 0:
raise ValueError(
'train batch size {} must be divisible by number of replicas {}'
.format(self._train_batch_size, num_replicas))
elif mode == model_fn_lib.ModeKeys.EVAL:
if self._eval_batch_size is None:
raise ValueError(
'eval_batch_size in TPUEstimator constructor cannot be `None`'
'if .evaluate is running on TPU.')
if self._eval_batch_size % num_replicas != 0:
raise ValueError(
'eval batch size {} must be divisible by number of replicas {}'
.format(self._eval_batch_size, num_replicas))
if num_hosts > 1:
raise ValueError(
'TPUEstimator.evaluate should be running on single TPU worker. '
'got {}.'.format(num_hosts))
else:
assert mode == model_fn_lib.ModeKeys.PREDICT
if self._predict_batch_size is None:
raise ValueError(
'predict_batch_size in TPUEstimator constructor should not be '
'`None` if .predict is running on TPU.')
if self._predict_batch_size % num_replicas != 0:
raise ValueError(
'predict batch size {} must be divisible by number of replicas {}'
.format(self._predict_batch_size, num_replicas))
if num_hosts > 1:
raise ValueError(
'TPUEstimator.predict should be running on single TPU worker. '
'got {}.'.format(num_hosts))
# Record the state "validated" into lazy dictionary.
self._lazy_validation_dict[mode] = True
class _OneCoreTPUContext(_TPUContext):
"""Special _TPUContext for one core usage."""
def __init__(self, config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
super(_OneCoreTPUContext, self).__init__(
config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
def _get_tpu_system_metadata(self):
"""Gets the (maybe cached) TPU system metadata."""
master = self._get_master_address()
tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master)
if tpu_system_metadata is not None:
return tpu_system_metadata
tpu_system_metadata = (
tpu_system_metadata_lib._TPUSystemMetadata( # pylint: disable=protected-access
num_cores=1,
num_hosts=1,
num_of_cores_per_host=1,
topology=None,
devices=[]))
self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata
return tpu_system_metadata
def _get_tpu_context(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu):
"""Returns an instance of `_TPUContext`."""
if (config.tpu_config.num_shards == 1 and
config.tpu_config.computation_shape is None):
logging.warning(
'Setting TPUConfig.num_shards==1 is an unsupported behavior. '
'Please fix as soon as possible (leaving num_shards as None.')
return _OneCoreTPUContext(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
return _TPUContext(config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu)
| |
#
# Copyright John Reid 2007
#
print 'testing gapped pssms'
import numpy
try: hmm
except: import biopsy.gapped_pssms as hmm
def print_model( m ):
print 'rho'
print m.var_dist.rho
print 'eta'
print m.var_dist.eta
print 'tau'
print m.var_dist.tau
def create_model(
K,
sequences,
psi = 0.01 * numpy.ones( 4 ),
theta = 100.0 * numpy.ones( 4 ),
phi = numpy.array( [ 9.5, 0.5 ] ),
upsilon = numpy.array( [ 99.0, 1.0 ] )
):
print 'testing state map'
state_map = hmm.StateMap( K )
hmm.test_state_map( state_map )
assert K == state_map.K
state_map.S
state_map.b
state_map.c
state_map.g
state_map.k
state_map.m
state_map.s
print 'Creating sequences'
s = hmm.ObservedSequences( sequences )
print s.sequences
assert s.N == len( sequences )
assert s.I(0) == len( sequences[ 0 ] )
print 'Creating data'
d = hmm.ObservedData(
K,
s,
psi,
theta,
phi,
upsilon
)
assert psi.all() == d.Psi.all()
assert theta.all() == d.Theta.all()
assert phi.all() == d.Phi.all()
assert upsilon.all() == d.Upsilon.all()
print 'Drawing hidden data'
hd = hmm.HiddenData( d )
print hd.draw_sequence( 10 )
print 'Creating model'
m = hmm.Model( d )
m.data
m.predecessor_states
m.var_dist
# print_model( m )
return m
psi = 0.01 * numpy.ones( 4 )
theta = 100.0 * numpy.ones( 4 )
phi = numpy.array( [ 0.01, 0.01 ] )
upsilon = numpy.array( [ 0.01, 0.01 ] )
def test_transitions():
"Test that transition probs are set correctly. Can check by inspecting svg."
K = 3
sequences = [
'tgacg',
]
m = create_model( K, sequences, psi, theta, phi, upsilon )
v = m.var_dist
v.tau = numpy.array(
[
[ .90, .10 ],
[ .50, .50 ],
[ .80, .20 ]
]
)
hmm.write_model_svg(
m,
name = 'transitions',
dir = 'test',
show_rev_comp = True,
show_dists = False,
edge_lengths = 3.0
)
return m, v
# m, v = test_transitions()
def test_eta_simple():
"Test update eta with simple 1 base pssm"
K = 1
sequences = [
'tg',
]
m = create_model( K, sequences, psi, theta, phi, upsilon )
v = m.var_dist
v.rho = numpy.array(
[
[
[ .98, .01, .01 ],
[ .01, .98, .01 ]
]
]
)
print 'Updating'
for i in xrange( 100 ):
m.update(
update_rho = False,
update_eta = True,
update_tau = False
)
hmm.write_model_svg(
m,
name = 'eta_simple',
dir = 'test',
show_rev_comp = True,
show_dists = True,
edge_lengths = 3.0
)
return m, v
# m, v = test_eta_simple()
def test_eta_gapped():
"Test update eta with gapped 2 base pssm"
K = 2
sequences = [
'tag', # 'a' is the gap
#'tg',
]
m = create_model( K, sequences, psi, theta, phi, upsilon )
v = m.var_dist
v.rho = [
numpy.array(
[
[ .01, .94, .01, .01, .01, .01, .01 ],
[ .01, .01, .94, .01, .01, .01, .01 ],
[ .01, .01, .01, .94, .01, .01, .01 ],
]
),
# numpy.array(
# [
# [ .01, .94, .01, .01, .01, .01, .01 ],
# [ .01, .01, .01, .94, .01, .01, .01 ],
# ]
# )
]
for i in xrange( 100 ):
m.update(
update_rho = False,
update_eta = True,
update_tau = False
)
print 'Updating'
hmm.write_model_svg(
m,
name = 'eta_gapped',
dir = 'test',
show_rev_comp = True,
show_dists = True,
edge_lengths = 3.0
)
return m, v
# m, v = test_eta_gapped()
def test_tau_simple():
K = 1
sequences = [
'tg',
]
m = create_model( K, sequences, psi, theta, phi, upsilon )
v = m.var_dist
v.rho = numpy.array(
[
[
[ .98, .01, .01 ],
[ .01, .98, .01 ]
]
]
)
print 'Updating'
for i in xrange( 100 ):
m.update(
update_rho = False,
update_eta = True,
update_tau = True
)
hmm.write_model_svg(
m,
name = 'tau_simple',
dir = 'test',
show_rev_comp = True,
show_dists = False,
edge_lengths = 3.0
)
return m, v
# m, v = test_tau_simple()
def test_tau_gapped():
K = 2
sequences = [
'tag',
'tg',
]
m = create_model( K, sequences, psi, theta, phi, upsilon )
v = m.var_dist
v.rho = [
numpy.array(
[
[ .01, .94, .01, .01, .01, .01, .01 ],
[ .01, .01, .94, .01, .01, .01, .01 ],
[ .01, .01, .01, .94, .01, .01, .01 ],
]
),
numpy.array(
[
[ .01, .94, .01, .01, .01, .01, .01 ],
[ .01, .01, .01, .94, .01, .01, .01 ],
]
)
]
print 'Updating'
for i in xrange( 100 ):
m.update(
update_rho = False,
update_eta = True,
update_tau = True
)
hmm.write_model_svg(
m,
name = 'tau_gapped',
dir = 'test',
show_rev_comp = True,
show_dists = False,
edge_lengths = 3.0
)
return m, v
# m, v = test_tau_gapped()
def test_rho_simple():
K = 2
sequences = [
'tga',
]
m = create_model( K, sequences, psi, theta, phi, upsilon )
v = m.var_dist
v.eta = numpy.array( # a pssm that looks like G[G]A
[
[ 0.25, 0.25, 0.25, 0.25 ],
[ 0.01, 0.01, 0.97, 0.01 ],
[ 0.01, 0.01, 0.97, 0.01 ],
[ 0.97, 0.01, 0.01, 0.01 ]
]
)
print 'Updating'
for i in xrange( 100 ):
print m.update(
update_rho = True,
update_eta = False,
update_tau = False
),
print "\n".join( str( r ) for r in v.r_mode )
hmm.write_model_svg(
m,
name = 'rho_simple',
dir = 'test',
show_rev_comp = True,
show_dists = False,
edge_lengths = 3.0
)
return m, v
m, v = test_rho_simple()
def test_simple():
K = 2
sequences = [
'tg',
'tg',
'tag',
'tag',
]
print theta
m = create_model( K, sequences, psi, theta, phi, upsilon )
#hmm.write_model_svg(
# m,
# name = '%d' % K,
# dir = 'test',
# show_rev_comp = True,
# show_dists = False,
# edge_lengths = 3.0
#)
#raise ""
v = m.var_dist
v.eta = numpy.array( # a pssm that looks like T[A]G
[
[ 0.25, 0.25, 0.25, 0.25 ],
[ 0.01, 0.01, 0.01, 0.97 ],
[ 0.97, 0.01, 0.01, 0.01 ],
[ 0.01, 0.01, 0.97, 0.01 ]
]
)
print 'Updating'
for i in xrange( 100 ):
m.update(
update_rho = True,
update_eta = False,
update_tau = True
)
print "\n".join(str( r ) for r in m.var_dist.r_mode)
hmm.write_model_svg(
m,
name = 'simple',
dir = 'test',
show_rev_comp = True,
show_dists = True,
edge_lengths = 3.0
)
return m, v
# m, v = test_simple()
def test_write_models():
for k in xrange( 10 ):
model = hmm.Model(
hmm.ObservedData(
k + 1,
hmm.ObservedSequences( sequences )
)
)
hmm.write_model_svg(
model,
'states_%d' % (k + 1),
show_rev_comp = False
)
# test_write_models()
| |
# -*- coding: utf-8 -*-
"""
# nflcom.py
# scraper and parser classes for nfl.com website
"""
from datetime import datetime
import logging
import re
from string import ascii_uppercase
import demjson
from bs4 import BeautifulSoup, Comment
import pendulum
try:
from .db import setup
except ImportError:
pass
from namematcher.name import first_last_pair
from sportscraper.dates import convert_format, today
from sportscraper.scraper import RequestScraper
from sportscraper.utility import digits, merge_two, save_csv
from sqlalchemy import text
NFL_STAT_CODES = {
1: {'abbr': 'GP', 'name': 'Games Played', 'shortName': 'GP'},
2: {'abbr': 'Att', 'name': 'Passing Attempts', 'shortName': 'Pass Att'},
3: {'abbr': 'Comp', 'name': 'Passing Completions', 'shortName': 'Pass Comp'},
4: {'abbr': 'Inc', 'name': 'Incomplete Passes', 'shortName': 'Pass Inc'},
5: {'abbr': 'Yds', 'name': 'Passing Yards', 'shortName': 'Pass Yds'},
6: {'abbr': 'TD', 'name': 'Passing Touchdowns', 'shortName': 'Pass TD'},
7: {'abbr': 'Int', 'name': 'Interceptions Thrown', 'shortName': 'Pass Int'},
8: {'abbr': 'Sacked', 'name': 'Every Time Sacked', 'shortName': 'Sacked'},
9: {'abbr': '300-399',
'name': '300-399 Passing Yards Bonus',
'shortName': '300-399 Pass Yds'},
10: {'abbr': '400+',
'name': '400+ Passing Yards Bonus',
'shortName': '400+ Pass Yds'},
11: {'abbr': '40+ TD',
'name': '40+ Passing Yard TD Bonus',
'shortName': '40+ Pass TD'},
12: {'abbr': '50+ TD',
'name': '50+ Passing Yards TD Bonus',
'shortName': '50+ Pass TD'},
13: {'abbr': 'Att', 'name': 'Rushing Attempts', 'shortName': 'Rush Att'},
14: {'abbr': 'Yds', 'name': 'Rushing Yards', 'shortName': 'Rush Yds'},
15: {'abbr': 'TD', 'name': 'Rushing Touchdowns', 'shortName': 'Rush TD'},
16: {'abbr': '40+ TD',
'name': '40+ Rushing Yard TD Bonus',
'shortName': '40+ Rush TD'},
17: {'abbr': '50+ TD',
'name': '50+ Rushing Yard TD Bonus',
'shortName': '50+ Rush TD'},
18: {'abbr': '100-199',
'name': '100-199 Rushing Yards Bonus',
'shortName': '100-199 Rush Yds'},
19: {'abbr': '200+',
'name': '200+ Rushing Yards Bonus',
'shortName': '200+ Rush Yds'},
20: {'abbr': 'Rect', 'name': 'Receptions', 'shortName': 'Receptions'},
21: {'abbr': 'Yds', 'name': 'Receiving Yards', 'shortName': 'Rec Yds'},
22: {'abbr': 'TD', 'name': 'Receiving Touchdowns', 'shortName': 'Rec TD'},
23: {'abbr': '40+ TD',
'name': '40+ Receiving Yard TD Bonus',
'shortName': '40+ Rec TD'},
24: {'abbr': '50+ TD',
'name': '50+ Receiving Yard TD Bonus',
'shortName': '50+ Rec TD'},
25: {'abbr': '100-199',
'name': '100-199 Receiving Yards Bonus',
'shortName': '100-199 Rec Yds'},
26: {'abbr': '200+',
'name': '200+ Receiving Yards Bonus',
'shortName': '200+ Rec Yds'},
27: {'abbr': 'Yds',
'name': 'Kickoff and Punt Return Yards',
'shortName': 'Return Yds'},
28: {'abbr': 'TD',
'name': 'Kickoff and Punt Return Touchdowns',
'shortName': 'Return TD'},
29: {'abbr': 'Fum TD',
'name': 'Fumble Recovered for TD',
'shortName': 'Fum TD'},
30: {'abbr': 'Lost', 'name': 'Fumbles Lost', 'shortName': 'Fum Lost'},
31: {'abbr': 'Fum', 'name': 'Fumble', 'shortName': 'Fum'},
32: {'abbr': '2PT', 'name': '2-Point Conversions', 'shortName': '2PT'},
33: {'abbr': 'Made', 'name': 'PAT Made', 'shortName': 'PAT Made'},
34: {'abbr': 'Miss', 'name': 'PAT Missed', 'shortName': 'PAT Miss'},
35: {'abbr': '0-19', 'name': 'FG Made 0-19', 'shortName': 'FG 0-19'},
36: {'abbr': '20-29', 'name': 'FG Made 20-29', 'shortName': 'FG 20-29'},
37: {'abbr': '30-39', 'name': 'FG Made 30-39', 'shortName': 'FG 30-39'},
38: {'abbr': '40-49', 'name': 'FG Made 40-49', 'shortName': 'FG 40-49'},
39: {'abbr': '50+', 'name': 'FG Made 50+', 'shortName': 'FG 50+'},
40: {'abbr': '0-19', 'name': 'FG Missed 0-19', 'shortName': 'FG Miss 0-19'},
41: {'abbr': '20-29',
'name': 'FG Missed 20-29',
'shortName': 'FG Miss 20-29'},
42: {'abbr': '30-39',
'name': 'FG Missed 30-39',
'shortName': 'FG Miss 30-39'},
43: {'abbr': '40-49',
'name': 'FG Missed 40-49',
'shortName': 'FG Miss 40-49'},
44: {'abbr': '50+', 'name': 'FG Missed 50+', 'shortName': 'FG Miss 50+'},
45: {'abbr': 'Sack', 'name': 'Sacks', 'shortName': 'Sack'},
46: {'abbr': 'Int', 'name': 'Interceptions', 'shortName': 'Int'},
47: {'abbr': 'Fum Rec', 'name': 'Fumbles Recovered', 'shortName': 'Fum Rec'},
48: {'abbr': 'Fum F', 'name': 'Fumbles Forced', 'shortName': 'Fum Forc'},
49: {'abbr': 'Saf', 'name': 'Safeties', 'shortName': 'Saf'},
50: {'abbr': 'TD', 'name': 'Touchdowns', 'shortName': 'TD'},
51: {'abbr': 'Block', 'name': 'Blocked Kicks', 'shortName': 'Block'},
52: {'abbr': 'Yds',
'name': 'Kickoff and Punt Return Yards',
'shortName': 'Return Yds'},
53: {'abbr': 'TD',
'name': 'Kickoff and Punt Return Touchdowns',
'shortName': 'Return TD'},
54: {'abbr': 'Pts Allow', 'name': 'Points Allowed', 'shortName': 'Pts Allow'},
55: {'abbr': 'Pts Allow',
'name': 'Points Allowed 0',
'shortName': 'Pts Allow 0'},
56: {'abbr': 'Pts Allow',
'name': 'Points Allowed 1-6',
'shortName': 'Pts Allow 1-6'},
57: {'abbr': 'Pts Allow',
'name': 'Points Allowed 7-13',
'shortName': 'Pts Allow 7-13'},
58: {'abbr': 'Pts Allow',
'name': 'Points Allowed 14-20',
'shortName': 'Pts Allow 14-20'},
59: {'abbr': 'Pts Allow',
'name': 'Points Allowed 21-27',
'shortName': 'Pts Allow 21-27'},
60: {'abbr': 'Pts Allow',
'name': 'Points Allowed 28-34',
'shortName': 'Pts Allow 28-34'},
61: {'abbr': 'Pts Allowed',
'name': 'Points Allowed 35+',
'shortName': 'Pts Allowed 35+'},
62: {'abbr': 'Yds Allow', 'name': 'Yards Allowed', 'shortName': 'Yds Allow'},
63: {'abbr': '0-99 Yds',
'name': 'Less than 100 Total Yards Allowed',
'shortName': 'Less 100 Yds Allowed'},
64: {'abbr': '100-199 Yds',
'name': '100-199 Yards Allowed',
'shortName': '100-199 Yds Allow'},
65: {'abbr': '200-299 Yds',
'name': '200-299 Yards Allowed',
'shortName': '200-299 Yds Allow'},
66: {'abbr': '300-399 Yds',
'name': '300-399 Yards Allowed',
'shortName': '300-399 Yds Allow'},
67: {'abbr': '400-449 Yds',
'name': '400-449 Yards Allowed',
'shortName': '400-449 Yds Allow'},
68: {'abbr': '450-499 Yds',
'name': '450-499 Yards Allowed',
'shortName': '450-499 Yds Allow'},
69: {'abbr': '500+ Yds',
'name': '500+ Yards Allowed',
'shortName': '500+ Yds Allow'},
70: {'abbr': 'Tot', 'name': 'Tackle', 'shortName': 'Tack'},
71: {'abbr': 'Ast', 'name': 'Assisted Tackles', 'shortName': 'Ast'},
72: {'abbr': 'Sck', 'name': 'Sack', 'shortName': 'Sack'},
73: {'abbr': 'Int', 'name': 'Defense Interception', 'shortName': 'Int'},
74: {'abbr': 'Frc Fum', 'name': 'Forced Fumble', 'shortName': 'Frc Fum'},
75: {'abbr': 'Fum Rec', 'name': 'Fumbles Recovery', 'shortName': 'Fum Rec'},
76: {'abbr': 'Int TD',
'name': 'Touchdown (Interception return)',
'shortName': 'Int TD'},
77: {'abbr': 'Fum TD',
'name': 'Touchdown (Fumble return)',
'shortName': 'Fum TD'},
78: {'abbr': 'Blk TD',
'name': 'Touchdown (Blocked kick)',
'shortName': 'Blk TD'},
79: {'abbr': 'Blk',
'name': 'Blocked Kick (punt, FG, PAT)',
'shortName': 'Blk'},
80: {'abbr': 'Saf', 'name': 'Safety', 'shortName': 'Saf'},
81: {'abbr': 'PDef', 'name': 'Pass Defended', 'shortName': 'Pass Def'},
82: {'abbr': 'Int Yds',
'name': 'Interception Return Yards',
'shortName': 'Int Yds'},
83: {'abbr': 'Fum Yds',
'name': 'Fumble Return Yards',
'shortName': 'Fum Yds'},
84: {'abbr': 'TFL', 'name': 'Tackles for Loss Bonus', 'shortName': 'TFL'},
85: {'abbr': 'QB Hit', 'name': 'QB Hit', 'shortName': 'QB Hit'},
86: {'abbr': 'Sck Yds', 'name': 'Sack Yards', 'shortName': 'Sck Yds'},
87: {'abbr': '10+ Tackles',
'name': '10+ Tackles Bonus',
'shortName': '10+ Tack'},
88: {'abbr': '2+ Sacks', 'name': '2+ Sacks Bonus', 'shortName': '2+ Sck'},
89: {'abbr': '3+ Passes Defended',
'name': '3+ Passes Defended Bonus',
'shortName': '3+ Pas Def'},
90: {'abbr': '50+ Yard INT Return TD',
'name': '50+ Yard INT Return TD Bonus',
'shortName': '50+ Yard INT TD'},
91: {'abbr': '50+ Yard Fumble Return TD',
'name': '50+ Yard Fumble Return TD Bonus',
'shortName': '50+ Yard Fum Ret TD'},
92: {'abbr': 'DP 2pt Ret',
'name': 'Def 2-point Return',
'shortName': 'Def Player 2pt Ret'},
93: {'abbr': 'DST 2pt Ret',
'name': 'Team Def 2-point Return',
'shortName': 'Team Def 2pt Ret'},
94: {'abbr': 'FGA', 'name': 'FG Attempts', 'shortName': 'FG Attempts'},
95: {'abbr': 'PATA', 'name': 'PAT Attempts', 'shortName': 'PAT Attempts'}
}
class Scraper(RequestScraper):
"""
Scrapes nfl.com resources
"""
@property
def nfl_teamd(self, team_code):
"""
Dict of team_code: nfl_team_id
"""
return {
"BAL": 325,
"CIN": 920,
"CLE": 1050,
"PIT": 3900,
"BUF": 610,
"MIA": 2700,
"NE": 3200,
"NYJ": 3430,
"CHI": 810,
"DET": 1540,
"GB": 1800,
"MIN": 3000,
"DAL": 1200,
"NYG": 3410,
"PHI": 3700,
"WAS": 5110,
"HOU": 2120,
"IND": 2200,
"JAX": 2250,
"TEN": 2100,
"DEN": 1400,
"KC": 2310,
"LAC": 4400,
"OAK": 2520,
"ATL": 200,
"CAR": 750,
"NO": 3300,
"TB": 4900,
"ARI": 3800,
"LA": 2510,
"SF": 4500,
"SEA": 4600,
}
def game(self, gsis_id):
"""
Gets individual gamecenter page
Args:
gsis_id:
Returns:
str
"""
url = "http://www.nfl.com/liveupdate/game-center/{0}/{0}_gtd.json"
return self.get(url.format(gsis_id))
def gamebook(self, season_year, week, gamekey):
"""
Gets XML gamebook for individual game
Args:
season: int 2016, 2015
week: int 1-17
gamekey: int 56844, etc.
Returns:
HTML string
"""
url = "http://www.nflgsis.com/{}/Reg/{}/{}/Gamebook.xml"
if week < 10:
week = "0{}".format(week)
else:
week = str(week)
return self.get(url.format(season_year, week, gamekey))
def injuries(self, week):
"""
Parses a weekly page with reported player injuries
Args:
week: int 1, 2, 3, etc.
Returns:
str
"""
url = "http://www.nfl.com/injuries?week={}"
return self.get(url.format(week))
def ol(self, season_year):
"""
Parses a weekly page with offensive line information
Args:
week: int 1, 2, 3, etc.
Returns:
str
"""
url = "http://www.nfl.com/stats/categorystats?"
params = {
"archive": "true",
"conference": "null",
"role": "TM",
"offensiveStatisticCategory": "OFFENSIVE_LINE",
"defensiveStatisticCategory": "null",
"season": season_year,
"seasonType": "REG",
"tabSeq": "2",
"qualified": "false",
"Submit": "Go",
}
return self.get(url, params=params)
def player_profile(self, profile_path=None, player_name=None, profile_id=None):
"""
Gets nfl.com player profile
Args:
profile_path(str): 'adamvinatieri/2503471'
player_name(str): 'adamvinatieri'
profile_id(int): 2503471
Returns:
str
"""
if profile_path:
url = "http://www.nfl.com/player/{}/profile".format(profile_path)
elif player_name and profile_id:
url = "http://www.nfl.com/player/{}/{}/profile".format(
player_name, profile_id
)
else:
raise ValueError("must specify profile_path or player_name and profile_id")
return self.get(url)
def players(self, last_initial, player_type="current"):
"""
Args:
last_initial: A, B, C, etc.
player_type: 'current' or 'all'
Returns:
response
"""
try:
last_initial = last_initial.upper()
url = "http://www.nfl.com/players/search?"
if last_initial in ascii_uppercase:
params = {
"category": "lastName",
"filter": last_initial,
"playerType": player_type,
}
return self.session.get(url, params=params)
else:
raise ValueError("invalid last_initial")
except ValueError as e:
logging.exception(e)
def player_search_name(self, player_name, player_type="current"):
"""
Searches for player using NFL search engine
Args:
player_name(str): 'Jones, Bobby'
player_type(str): 'current' or 'historical'
Returns:
str - page of search results
"""
url = "http://www.nfl.com/players/search?"
params = {"category": "name", "filter": player_name, "playerType": player_type}
return self.get(url, params=params)
def player_search_web(self, player_name):
"""
Searches for player profile page using duckduckgo search engine
Args:
player_name(str): 'Jones, Bobby'
Returns:
str - URL for profile page
"""
patt = re.compile(r"(^http://www.nfl.com/player/\w+/\d+/profile)")
try:
ln, fn = player_name.split(", ")
except:
fn, ln = player_name.split()
term = fn + "+" + ln
url = f"https://duckduckgo.com/?q=nfl.com+{term}&ia=web"
response = self.session.get(url)
response.html.render()
for link in response.html.links:
match = re.search(patt, link)
if match:
return match.group(1)
return None
def schedule_week(self, season, week):
"""
Parses a weekly schedule page with links to individual gamecenters
Similar to score_week, but does not have scores for each quarter
Args:
season: int 2017, 2016, etc.
week: int 1, 2, 3, etc.
Returns:
str
"""
url = f"http://www.nfl.com/schedules/{season}/REG{week}"
return self.get(url, encoding="ISO-8859-1")
def score_week(self, season, week):
"""
Parses a weekly page with links to individual gamecenters
Similar to schedule_week, but has scores for each quarter
Args:
season: int 2017, 2016, etc.
week: int 1, 2, 3, etc.
Returns:
str
"""
url = "http://www.nfl.com/scores/{0}/REG{1}"
return self.get(url.format(season, week))
def team_roster(self, team_code=None, nfl_team_id=None):
"""
Args:
team_code(str):
nfl_team_id(int):
Returns:
Response
"""
if team_code:
nfl_team_id = self.nfl_teamd.get(team_code)
if not nfl_team_id:
raise ValueError("invalid team code or id: %s %s", team_code, nfl_team_id)
base_url = "http://www.nfl.com/players/search?"
params = {"category": "team", "playerType": "current", "filter": nfl_team_id}
return self.get(base_url, params=params, return_object=True)
class Parser:
"""
Used to parse NFL.com GameCenter pages,
which are json documents with game and play-by-play stats
"""
def __init__(self):
logging.getLogger(__name__).addHandler(logging.NullHandler())
def _gamecenter_team(self, team):
"""
Parses home or away team into stats dictionary
Args:
team: dictionary representing home or away team
Returns:
dict
"""
categories = [
"passing",
"rushing",
"receiving",
"fumbles",
"kickret",
"puntret",
"defense",
]
players = {}
for category in categories:
for player_id, player_stats in team[category].items():
if not player_id in players:
players[player_id] = {"player_id": player_id}
players[player_id][category] = player_stats
return players
def esb_id(self, content):
"""
Gets player esb_id
Args:
content(str):
Returns:
str
"""
soup = BeautifulSoup(content, "lxml")
# GSIS ID and ESB ID are buried in the comments
for c in soup.find_all(string=lambda text: isinstance(text, Comment)):
if "GSIS" in c:
parts = [part.strip() for part in c.split("\n")]
return parts[2].split(":")[-1].strip()
return None
def gamecenter(self, parsed):
"""
Parses gamecenter (json document)
Args:
content: parsed json document
Returns:
dict
Misc:
puntret: avg, lng, lngtd, name, ret, tds
fumbles: lost, name, rcv, tot, trcv, yds
defense: ast, ffum, int, name, sk, tkl
rushing: att, lng.lngtd, name, tds, twopta, twoptm, yds
receiving: lng, lngtd, name, rec, tds, twopta, twoptm, yds
passing: att, cmp, ints, name, tds, twopta, twoptm, yds
"""
game_id = parsed.keys()[0]
home_team_stats = self._gamecenter_team(parsed[game_id]["home"]["stats"])
away_team_stats = self._gamecenter_team(parsed[game_id]["away"]["stats"])
return merge_two(home_team_stats, away_team_stats)
def game_page(self, content):
"""
Parses individual game page from NFL.com
Args:
content:
Returns:
list: of dict
"""
games = []
soup = BeautifulSoup(content, "lxml")
for a in soup.findAll("a", {"class": "game-center-link"}):
game = {}
pattern = re.compile(
r"/gamecenter/(\d+)/(\d+)/REG(\d+)/([a-zA-Z0-9]+[@]{1}[a-zA-Z0-9]+)"
)
match = re.search(pattern, a["href"])
if match:
game["gsis_id"], game["season_year"], game["week"], game[
"matchup"
] = match.groups()
game["away"], game["home"] = game["matchup"].split("@")
game["game_date"] = convert_format(game["gsis_id"][0:8], "nfl")
game["url"] = "http://www.nfl.com{}" + a["href"]
if game:
games.append(game)
return games
def injuries(self, content, season, week):
"""
Returns injured players from injruies page
Args:
str
Returns:
list: of player dict
"""
players = []
away_patt = re.compile(
r"dataAway.*?(\[.*?\]);", re.MULTILINE | re.IGNORECASE | re.DOTALL
)
home_patt = re.compile(
r"dataHome.*?(\[.*?\]);", re.MULTILINE | re.IGNORECASE | re.DOTALL
)
awayAbbr_patt = re.compile(r"awayAbbr\s+=\s+\'([A-Z]+)\'")
homeAbbr_patt = re.compile(r"homeAbbr\s+=\s+\'([A-Z]+)\'")
soup = BeautifulSoup(content, "lxml")
# values are embedded in <script> tag
# I am trying to scrape the homeAbbr and awayAbbr variables
for script in soup.find_all("script"):
try:
# get away and home team codes
match = re.search(awayAbbr_patt, script.text)
if match:
away_team = match.group(1)
match = re.search(homeAbbr_patt, script.text)
if match:
home_team = match.group(1)
# away team
away_player = {
"team_code": away_team,
"season_year": season,
"week": week,
}
match = re.search(away_patt, script.text)
if match:
for player in demjson.decode(match.group(1)):
context = away_player.copy()
context.update(player)
players.append(context)
# home team
home_player = {
"team_code": home_team,
"season_year": season,
"week": week,
}
match = re.search(home_patt, script.text)
if match:
for player in demjson.decode(match.group(1)):
context = home_player.copy()
context.update(player)
players.append(context)
except:
pass
return players
def ol(self, content):
"""
Parses offensive line stats page on nfl.com
Args:
content(str): HTML page
Returns:
list
"""
soup = BeautifulSoup(content, "lxml")
headers = [
"rank",
"team",
"experience",
"rush_att",
"rush_yds",
"rush_ypc",
"rush_tds",
"rush_fd_left",
"rush_neg_left",
"rush_pty_left",
"rush_pwr_left",
"rush_fd_center",
"rush_neg_center",
"rush_pty_center",
"rush_pwr_center",
"rush_fd_right",
"rush_neg_right",
"rush_pty_right",
"rush_pwr_right",
"sacks",
"qb_hits",
]
return [
dict(zip(headers, [td.text.strip() for td in tr.find_all("td")]))
for tr in soup.find("table", {"id": "result"}).find("tbody").find_all("tr")
]
def players(self, response):
"""
Returns alphabetical player page on nfl.com
Args:
response
Returns:
list of dict
"""
patt = re.compile(r"/player/(\w+/\d+)/profile")
players = []
for tr in response.html.find("tr"):
if tr.attrs.get("class") and tr.attrs.get("class")[0] in ["even", "odd"]:
player = {}
vals = [el.text for el in tr.find("td")]
player["pos"] = vals[0]
player["num"] = vals[1]
player["plyr"] = vals[2]
player["status"] = vals[3]
player["team"] = vals[-1]
# now add profile
for link in tr.links:
match = re.search(patt, link)
if match:
player["profile_path"] = match.group(1)
players.append(player)
return players
def player_page(self, content, profile_id):
"""
Returns data from individual player page
Args:
content(str):
profile_id(str):
Returns:
dict
"""
soup = BeautifulSoup(content, "lxml")
player = {"status": "Active"}
# GSIS ID and ESB ID are buried in the comments
for c in soup.find_all(string=lambda text: isinstance(text, Comment)):
if "GSIS" in c:
parts = [part.strip() for part in c.split("\n")]
esb_id = parts[2].split(":")[-1].strip()
gsis_id = parts[3].split(":")[-1].strip()
player["nflcom_player_id"] = gsis_id
player["profile_id"] = profile_id
if esb_id:
player["esb_id"] = esb_id
break
# Most player data is found in the player-profile div
# Then have to loop through paragraphs in that div
paras = soup.find("div", {"id": "player-profile"}).find_all("p")
if not paras or len(paras) < 6:
paras = soup.find("div", {"id": "player-info"}).find_all("p")
if not paras or len(paras) < 6:
return None
try:
# paras[0]: name and number
spans = paras[0].find_all("span")
name = spans[0].text.strip()
player["full_name"] = name
player["first_name"], player["last_name"] = first_last_pair(name)
number, pos = spans[1].text.split()
player["number"] = digits(number)
player["position"] = pos
# paras[1]: team
player["team"] = paras[1].find("a")["href"].split("=")[-1]
except (IndexError, ValueError) as e:
logging.exception(e)
return None
try:
# paras[2]: height, weight, age
parts = paras[2].text.split()
feet, inches = parts[1].split("-")
player["height"] = int(digits(feet)) * 6 + int(digits(inches))
player["weight"] = digits(parts[3])
player["age"] = digits(parts[5])
except (IndexError, ValueError) as e:
logging.exception(e)
try:
# birthdate
parts = paras[3].text.split()
player["birthdate"] = parts[1].strip()
except (IndexError, ValueError) as e:
logging.exception(e)
try:
# college
parts = paras[4].text.split()
player["college"] = parts[1].strip()
except (IndexError, ValueError) as e:
logging.exception(e)
try:
# years pro
parts = paras[5].text.split()
ordinal = parts[1].strip()
player["years_pro"] = "".join(ch for ch in ordinal if ch.isdigit())
except (IndexError, ValueError) as e:
logging.exception(e)
return player
def player_search_name(self, content):
"""
Parses player search results
Args:
content(str): HTML page
Returns:
tuple - full_name, nfl_name, profile_id
"""
vals = []
soup = BeautifulSoup(content, "lxml")
patt = re.compile(r"\/player.*?\d+\/profile", re.IGNORECASE | re.UNICODE)
for a in soup.find_all("a", {"href": patt}):
nfl_name, profile_id = a["href"].split("/")[-3:-1]
vals.append((a.text, nfl_name, profile_id))
return vals
def position(self, content):
"""
Returns player's position from his profile page on nfl.com
Args:
content(str): HTML page
Returns:
str: 'QB', 'RB', 'WR', 'TE', 'UNK'
"""
allowed = [
"C",
"CB",
"DB",
"DE",
"DL",
"DT",
"FB",
"FS",
"G",
"ILB",
"K",
"LB",
"LS",
"MLB",
"NT",
"OG",
"OL",
"OLB",
"OT",
"P",
"QB",
"RB",
"SAF",
"SS",
"T",
"TE",
"WR",
"UNK",
"DST",
]
xref = {"ML": "LB", "IL": "LB", "SAF": "S"}
patt = re.compile(r"[A-Z]{1}.*?,\s+([A-Z]{1,3})", re.IGNORECASE | re.UNICODE)
soup = BeautifulSoup(content, "lxml")
title = soup.title.text
match = re.search(patt, title)
try:
pos = match.group(1)
if pos in allowed:
return pos
return xref.get(pos, "UNK")
except:
return "UNK"
def team_roster(self, response):
"""
Args:
response:
Returns:
"""
return self.players(response)
def upcoming_week_page(self, content):
"""
Parses upcoming week page (before games played)
Args:
content(str): HTML page
Returns:
list: of game dict
"""
games = []
etz = "America/New_York"
soup = BeautifulSoup(content, "lxml")
patt = re.compile(
r"(game[.]+week.*?homeCityName:.*?[-]+[>]+)", re.MULTILINE | re.DOTALL
)
subpatt = re.compile(
r"formattedDate: (.*?)\s+[-]+[>]+.*?formattedTime: (\d+:\d+ [AP]+M)",
re.MULTILINE | re.DOTALL,
)
# get game data from comments
start_times = []
for match in re.finditer(patt, content):
submatch = re.search(subpatt, match.group(1))
dtstr = f"{submatch.group(1)} {submatch.group(2)}"
parsed = pendulum.parse(dtstr, tz=etz)
start_times.append((parsed.in_tz("UTC"), parsed.date().strftime("%A")))
wanted = [
"data-gameid",
"data-away-abbr",
"data-home-abbr",
"data-localtime",
"data-site",
]
for start_time, div in zip(
start_times, soup.select("div.schedules-list-content")
):
game = {att: div[att].strip() for att in div.attrs if att in wanted}
game["start_time"] = start_time[0]
game["day_of_week"] = start_time[1]
games.append(game)
return games
def week_page(self, content):
"""
Parses weekly scoreboard page from NFL.com
Args:
content(str):
Returns:
dict: of gsis_id and dict
"""
games = []
soup = BeautifulSoup(content, "lxml")
# weekly page will have links to individual games in format:
# <a href="/gamecenter/2014090400/2014/REG1/packers@seahawks"
# class="game-center-link" . . . </a>
for a in soup.findAll("a", {"class": "game-center-link"}):
game = {}
pattern = re.compile(
r"/gamecenter/(\d+)/(\d+)/REG(\d+)/([a-zA-Z0-9]+[@]{1}[a-zA-Z0-9]+)"
)
match = re.search(pattern, a["href"])
if match:
game["gsis_id"], game["season_year"], game["week"], game[
"matchup"
] = match.groups()
game["away"], game["home"] = game["matchup"].split("@")
game["game_date"] = convert_format(game["gsis_id"][0:8], "nfl")
game["url"] = "http://www.nfl.com{}" + a["href"]
if game:
games.append(game)
return games
class Agent:
"""
Combines common scraping/parsing tasks
"""
def __init__(self, scraper=None, parser=None, cache_name="nfl-agent"):
"""
Creates Agent object
Args:
scraper: NFLComScraper object
parser: NFLComParser object
cache_name: string
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
if scraper:
self._s = scraper
else:
self._s = Scraper(cache_name=cache_name)
if parser:
self._p = parser
else:
self._p = Parser()
self.base, self.eng, self.session = setup(database="pg", schema="base")
def team_roster_urls(self):
"""
Gets URLs for nfl.com team roster pages
Returns:
list: of str
"""
base_url = "http://www.nfl.com/players/search?"
params = {"category": "team", "playerType": "current"}
# team_roster_urls
roster_urls = {}
response = self._s.get(base_url, params=params, return_object=True)
div = response.html.find("#playertabs_2", first=True)
for tr in div.find("tr"):
for td in tr.find("td"):
for p in td.find("p"):
a = p.find("a", first=True)
if a:
roster_urls[p.text] = next(iter(a.absolute_links))
return roster_urls
def get_team_rosters(self):
"""
Gets all 32 team rosters
Returns:
list: of dict
"""
rosters = []
for team_name, url in self.team_roster_urls().items():
logging.info("starting %s: %s", team_name, url)
response = self._s.get(url, return_object=True)
roster = self._p.team_roster(response)
logging.info(roster)
rosters.append(roster)
return [item for sublist in rosters for item in sublist]
def get_player(self, player):
"""
Args:
player(dict):
Returns:
bool
"""
PlayerXref = self.base.classes.player_xref
return (
self.session.query(PlayerXref)
.filter(PlayerXref.source_player_code != None)
.filter(PlayerXref.source_player_code == player.get("profile_path"))
.filter(PlayerXref.source == "nflcom")
.first()
)
def save_unmatched(self, unmatched, file_name="/tmp/unmatched.csv"):
"""
Saves unmatched players to CSV file
Args:
unmatched(list): of dict
file_name(str): default '/tmp/unmatched.csv'
Returns:
None
"""
if unmatched and len(unmatched) > 0:
save_csv(
data=unmatched,
csv_fname=file_name,
fieldnames=sorted(list(unmatched[0].keys())),
)
def update_unmatched_xref(self, unmatched):
"""
Adds unmatched player to player_xref
Args:
unmatched:
Returns:
"""
Player = self.base.classes.player
PlayerXref = self.base.classes.player_xref
for item in unmatched:
profile_path = item.get("profile_path")
if profile_path:
profile_id = profile_path.split("/")[-1]
try:
content = self._s.player_profile(profile_path=profile_path)
p = self._p.player_page(content, profile_id)
match = (
self.session.query(Player)
.filter(Player.full_name == p["full_name"])
.filter(Player.pos == p["position"])
.filter(Player.birthdate == p.get("birthdate", "1960-01-01"))
.first()
)
if match:
self.session.add(
PlayerXref(
player_id=match.player_id,
source="nflcom",
source_player_id=match.nflcom_player_id,
source_player_code=profile_id,
source_player_name=match.full_name,
source_player_position=match.position,
)
)
except:
pass
self.session.commit()
def update_unmatched(self, unmatched):
"""
Args:
unmatched:
Returns:
"""
Player = self.base.classes.player
for item in unmatched:
profile_path = item.get("profile_path")
if profile_path:
profile_id = profile_path.split("/")[-1]
try:
content = self._s.player_profile(profile_path=profile_path)
p = self._p.player_page(content, profile_id)
filt = text(f"nflcom_player_id='{p['nflcom_player_id']}'")
match = self.session.query(Player).filter(filt).first()
# if no match, add to player table and get autoincrement id
# if match, get existing player_id
if match:
logging.info("found match for %s", p["full_name"])
else:
logging.info("no match for %s", p["full_name"])
match = (
self.session.query(Player)
.filter(Player.first_name == p["first_name"])
.filter(Player.last_name == p["last_name"])
.filter(Player.pos == p["position"])
.filter(Player.birthdate == p.get("birthdate"))
.first()
)
if match:
match.nflcom_player_id = p["nflcom_player_id"]
if p.get("height"):
match.height = p["height"]
if p.get("weight"):
match.weight = p["weight"]
if p.get("college"):
match.college = p["college"]
else:
playerobj = Player(
nflcom_player_id=p["nflcom_player_id"],
first_name=p["first_name"],
last_name=p["last_name"],
pos=p["position"],
height=p.get("height", None),
weight=p.get("weight", None),
birthdate=p.get("birthdate", None),
college=p.get("college", None),
)
self.session.add(playerobj)
self.session.commit()
except:
pass
def update_team_rosters(self, rosters):
"""
Adds team rosters to roster table
Args:
rosters(list): of dict
Returns:
None
"""
unmatched = []
matched = []
Roster = self.base.classes.roster
as_of = today()
for item in rosters:
# sample item
# {'pos': 'WR',
# 'num': '16',
# 'plyr': 'Adeboyejo, Quincy',
# 'status': 'ACT',
# 'team': 'BAL',
# 'profile_path': 'quincyadeboyejo/2557843'}
# step one: see if player in table
# if not in table, then add
player = self.get_player(item)
if not player:
# add player
unmatched.append(item)
logging.info("no match for %s", item["plyr"])
continue
# step two: add player to roster table
matched.append(
{"as_of": as_of, "player_id": player.player_id, "team_id": item["team"]}
)
logging.info("added %s", item["plyr"])
# add roster objects to database
self.session.bulk_insert_mappings(Roster, matched)
self.session.commit()
return unmatched
def upcoming_games(self, season, week):
"""
Gets all games from weekly schedule page from NFL.com
Args:
season(int): 2018, etc.
week(int): 1, etc.
Returns:
list of dict
"""
content = self._s.schedule_week(season, week)
return self._p.upcoming_week_page(content)
def yearly_schedule(self, season):
"""
Args:
season(int): 2018, etc.
Returns:
list: of dict
"""
Game = self.base.classes.game
for week in range(1, 18):
logging.info("starting week %s", week)
content = self._s.schedule_week(season, week)
utcnow = datetime.utcnow()
gobjs = []
for g in self._p.upcoming_week_page(content):
gobj = Game(
gsis_id=g["data-gameid"],
start_time=g["start_time"],
week=week,
day_of_week=g["day_of_week"],
season_year=season,
season_type="Regular",
finished=False,
home_team=g["data-home-abbr"],
away_team=g["data-away-abbr"],
time_inserted=utcnow,
time_updated=utcnow,
)
gobjs.append(gobj)
self.session.bulk_save_objects(gobjs)
self.session.commit()
if __name__ == "__main__":
pass
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renders depth maps from EuRoC MAV point clouds.
https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets
Some of the rooms of the EuRoC MAV dataset have point clouds. This script
renders depth maps from the point clouds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import matplotlib.image
import numpy as np
flags.DEFINE_string('room_path', '', 'Path to the EuRoC data for one of the '
'rooms ')
flags.DEFINE_string('output_path', '', 'Path where to store the outputs.')
FLAGS = flags.FLAGS
# A 4D transform that connects the Cam0 to the body of the MAV. This is taken
# from the sensor.yaml file. To project the point cloud on Cam1, please replace
# with the respective extrinsic matrix. This is constant across all the rooms in
# the dataset.
CAM0_TO_BODY = np.array(
[[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975],
[0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],
[-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],
[0.0, 0.0, 0.0, 1.0]])
# Intrinsics of Cam0. This is taken from cam0/sensor.yaml and is the same for
# all rooms.
FX = 458.654
FY = 457.296
X0 = 367.215
Y0 = 248.375
K1 = -0.28340811
K2 = 0.07395907
H = 480
W = 752
def get_camera_view_pointcloud(transform, xyz, greyscale_color):
"""Transform point cloud to camera view, prune points outside of the view.
Args:
transform: 4x4 transform matrix representing position and orientation of
the body of the MAV.
xyz: A 4xN matrix, point cloud in homogeneous coordinates. The k-th column
is (x, y, z, 1), where x, y, z are the coordinates of the k-th point.
greyscale_color: N-vector, vertex grayscale value. The k-th entry is the
greyscale color of the k-th point.
Returns:
3xM (M < N) matrix representing the point cloud in the camera view.
M vector, vertex grayscale value.
Only points that fall within the camera viweing angle and are in front of
the camera are kept.
"""
overall_transform = np.linalg.inv(CAM0_TO_BODY).dot(np.linalg.inv(transform))
transformed_xyz = xyz.dot(overall_transform.transpose())
x, y, z, _ = _split(transformed_xyz)
u, v = _project_and_distort(x, y, z)
# Remove points that are out of frame. Keep some margin (1.05), to make sure
# occlusions are addressed correctly at the edges of the field of view. For
# example a point that is just slightly out of frame can occlude a neighboring
# point inside the frame.
valid_mask = np.logical_and.reduce(
(z > 0.0, u > -0.05 * W, u < W * 1.05, v > -0.05 * H, v < H * 1.05),
axis=0)
valid_points = valid_mask.nonzero()[0]
return transformed_xyz[valid_points, :3], greyscale_color[valid_points]
def get_occluded_points(xyz, neighborhood_radius, z_threshold):
"""Remove points that are occluded by others from a camera-view point cloud.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
neighborhood_radius: The radius around each point in which it occludes
others.
z_threshold: Minimum z distance betweem two points for them considered to
be occluding each other. If two points are verty close in z, they likely
belong to the same surface and thus do not occlude each other.
Returns:
A list of indices in xyz corresponding to points that are occluded.
"""
def get_bin(xz, yz):
xbin = int(round(xz / neighborhood_radius))
ybin = int(round(yz / neighborhood_radius))
return xbin, ybin
xs, ys, zs = _split(xyz)
xzs = xs / zs
yzs = ys / zs
grid = collections.defaultdict(lambda: np.inf)
for ind in range(xyz.shape[0]):
# Place each point in the bin where it belongs, and in the neighboring bins.
# Keep only the closest point to the camera in each bin.
xbin, ybin = get_bin(xzs[ind], yzs[ind])
for i in range(-1, 2):
for j in range(-1, 2):
grid[(xbin + i, ybin + j)] = min(grid[(xbin + i, ybin + j)], zs[ind])
occluded_indices = []
for ind in range(xyz.shape[0]):
# Loop over all points and see if they are occluded, by finding the closest
# point to the camera within the same bin and testing for the occlusion
# condition. A point is occluded if there is another point in the same bin
# that is far enough in z, so that it cannot belong to the same surface,
zmin = grid[get_bin(xzs[ind], yzs[ind])]
if zmin < (1 - z_threshold) * zs[ind]:
occluded_indices.append(ind)
return occluded_indices
def render_rgb(xyz, c):
"""Given a colored cloud in camera coordinates, render an image.
This function is useful for visualization / debugging.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
c: A N-long vector containing (greyscale) colors of the points.
Returns:
A rendered image.
"""
x, y, z = _split(xyz)
u, v = _project_and_distort(x, y, z)
u = np.floor(0.5 * u).astype(int)
v = np.floor(0.5 * v).astype(int)
rendered_c = np.full((int(H / 2), int(W / 2)), 0.0)
rendered_c[v, u] = c
rendered_c = np.stack([rendered_c] * 3, axis=2)
return rendered_c
def render_z(xyz):
"""Given a colored cloud in camera coordinates, render a depth map.
This function is useful for visualization / debugging.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
Returns:
A rendered depth map.
"""
x, y, z = _split(xyz)
u, v = _project_and_distort(x, y, z)
u = np.floor(0.5 * u).astype(int)
v = np.floor(0.5 * v).astype(int)
rendered_z = np.full((int(H / 2), int(W / 2)), -np.inf)
rendered_z[v, u] = z
maxz = np.max(rendered_z)
rendered_z = np.where(rendered_z == -np.inf, np.nan, rendered_z)
rendered_z /= maxz
return rendered_z
class GroundTruthInterpolator(object):
"""Interpolates MAV position and orientation groundtruth to a timestamp."""
def __init__(self, filename):
"""Creates an instance.
Args:
filename: A string, filepath of the state_groundtruth_estimate0.csv file.
"""
with open(filename) as f:
lines = f.readlines()
lines = lines[1:] # skip the first line
gt = []
for l in lines:
tokens = l.split(',')
gt.append([float(t) for t in tokens[:8]])
self._gt = np.array(gt)
self._mint = np.min(self._gt[:, 0])
self._maxt = np.max(self._gt[:, 0])
def get_transform(self, timestamp):
"""Interpolates the MAV's transform matrix at a timestamp."""
if timestamp < self._mint or timestamp > self._maxt:
return None
# self._gt[:, 0], the 0th column, is the timestamp. Columns 1-3 are x, y, z,
# and columns 4-7 are quaternion components describing the rotation.
timestamps = self._gt[:, 0]
x = np.interp(timestamp, timestamps, self._gt[:, 1])
y = np.interp(timestamp, timestamps, self._gt[:, 2])
z = np.interp(timestamp, timestamps, self._gt[:, 3])
qw = np.interp(timestamp, timestamps, self._gt[:, 4])
qx = np.interp(timestamp, timestamps, self._gt[:, 5])
qy = np.interp(timestamp, timestamps, self._gt[:, 6])
qz = np.interp(timestamp, timestamps, self._gt[:, 7])
# Creates a matrix
transform = np.array([[
1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw,
2 * qx * qz + 2 * qy * qw, x
], # pylint: disable=bad-continuation
[
2 * qx * qy + 2 * qz * qw,
1 - 2 * qx * qx - 2 * qz * qz,
2 * qy * qz - 2 * qx * qw, y
],
[
2 * qx * qz - 2 * qy * qw,
2 * qy * qz + 2 * qx * qw,
1 - 2 * qx * qx - 2 * qy * qy, z
], [0.0, 0.0, 0.0, 1.0]])
return transform
def read_ply(filename):
"""Reads a PLY file representing EuRoc's point cloud."""
with open(filename) as f:
lines = f.readlines()
lines = lines[11:]
xyz = []
c = [] # The color channel (just one, it's greyscale)
for l in lines:
tokens = l.split(' ')
xyz.append([float(t) for t in tokens[:3]])
c.append(float(tokens[3]))
return np.array(xyz), np.array(c)
def filter_out_ot_frame_points(xyz, c):
"""Remove all points in a camera-view pointcloud that are out of frame.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
c: A N-long vector containing (greyscale) colors of the points.
Returns:
A 3xM matrix and a M-long vector representing the filtered colored point
cloud.
"""
x, y, z = _split(xyz)
u, v = _project_and_distort(x, y, z)
u = np.floor(u).astype(int)
v = np.floor(v).astype(int)
valid_mask = np.logical_and.reduce((u >= 0, u < W, v >= 0, v < H), axis=0)
valid_points = valid_mask.nonzero()[0]
return xyz[valid_points, :], c[valid_points]
def sample_uniform(xyz, bin_size):
"""subsamples a point cloud to be more uniform in perspective coordinates.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
bin_size: Size of a square in which we allow only a single point.
Returns:
A list of indices, corresponding to a subset of the original `xyz`, to keep.
"""
x, y, z = _split(xyz)
xbins = (x / z / bin_size)
ybins = (y / z / bin_size)
xbins_rounded = np.round(xbins)
ybins_rounded = np.round(ybins)
xbins_diff = xbins_rounded - xbins
ybins_diff = ybins_rounded - ybins
diff_sq = xbins_diff**2 + ybins_diff**2
bin_to_ind = {}
for ind in range(len(diff_sq)):
bin_ = (xbins_rounded[ind], ybins_rounded[ind])
if bin_ not in bin_to_ind or diff_sq[ind] < bin_to_ind[bin_][1]:
bin_to_ind[bin_] = (ind, diff_sq[ind])
inds_to_keep = sorted([i[0] for i in bin_to_ind.values()])
return inds_to_keep
def main(argv):
del argv # unused
gti = GroundTruthInterpolator(
os.path.join(FLAGS.room_path, 'state_groundtruth_estimate0/data.csv'))
print('Groundtruth loaded.')
xyz, c = read_ply(os.path.join(FLAGS.room_path, 'pointcloud0/data.ply'))
print('PLY loaded.')
xyz_homogeneous = np.concatenate([xyz, np.ones((xyz.shape[0], 1))], axis=1)
imagesto_render = sorted(
os.listdir(os.path.join(FLAGS.room_path, 'cam0/data')))
imagesto_render = imagesto_render[0::5] # render every fifth image
for imfile in imagesto_render:
timestamp = float(imfile.split('.')[0])
transform = gti.get_transform(timestamp)
if transform is None:
print ('Timestamp %d has no groundtruth.' % int(timestamp))
continue
else:
print ('Rendering timestamp %d...' % int(timestamp))
xyz_view, c_view = get_camera_view_pointcloud(transform, xyz_homogeneous, c)
print ('View pointcloud generated, %d points.' % xyz_view.shape[0])
occluded_inds = get_occluded_points(xyz_view, 0.02, 0.08)
occluded_inds = set(occluded_inds)
visible_indices = [
i for i in range(xyz_view.shape[0]) if i not in occluded_inds
]
print ('%d visible points found.' % len(visible_indices))
visible_xyz = xyz_view[visible_indices, :]
visible_c = c_view[visible_indices]
visible_xyz, visible_c = filter_out_ot_frame_points(visible_xyz, visible_c)
inds_to_keep = sample_uniform(visible_xyz, 1e-2)
visible_xyz = visible_xyz[inds_to_keep]
visible_c = visible_c[inds_to_keep]
rgb_image = render_rgb(visible_xyz, visible_c)
z_image = render_z(visible_xyz)
matplotlib.image.imsave(
os.path.join(FLAGS.output_path, '%dgrayscale.png' % int(timestamp)),
rgb_image)
matplotlib.image.imsave(
os.path.join(FLAGS.output_path, '%ddepth.png' % int(timestamp)),
z_image)
np.save(
os.path.join(FLAGS.output_path, '%d.npy' % int(timestamp)), visible_xyz)
def _split(matrix):
return [
np.squeeze(v, axis=1) for v in np.split(matrix, matrix.shape[1], axis=1)
]
def _project_and_distort(x, y, z):
"""Apply perspective projection and distortion on a point cloud.
Args:
x: A vector containing the x coordinates of the points.
y: A vector containing the y coordinates of the points, same length as x.
z: A vector containing the z coordinates of the points, same length as x.
Returns:
A tuple of two vectors of the same length as x, containing the image-plane
coordinates (u, v) of the point cloud.
"""
xz = (x / z)
yz = (y / z)
# 2. Apply radial camera distortion:
rr = xz**2 + yz**2
distortion = (1 + K1 * rr + K2 * rr * rr)
xz *= distortion
yz *= distortion
# 3. Apply intrinsic matrix to get image coordinates:
u = FX * xz + X0
v = FY * yz + Y0
return u, v
if __name__ == '__main__':
app.run(main)
| |
#!/usr/bin/env python
import sys
import re
import shelve
import json
regs_map = {
'%r0':{'op': None, 'src': None, 'dest': None},
'%r1':{'op': None, 'src': None, 'dest': None},
'%r2':{'op': None, 'src': None, 'dest': None},
'%r3':{'op': None, 'src': None, 'dest': None},
'%r4':{'op': None, 'src': None, 'dest': None},
'%r5':{'op': None, 'src': None, 'dest': None},
'%r6':{'op': None, 'src': None, 'dest': None},
'%r7':{'op': None, 'src': None, 'dest': None},
'%r8':{'op': None, 'src': None, 'dest': None},
'%r9':{'op': None, 'src': None, 'dest': None},
'%r10':{'op': None, 'src': None, 'dest': None},
'%r11':{'op': None, 'src': None, 'dest': None},
'%r12':{'op': None, 'src': None, 'dest': None},
'%r13':{'op': None, 'src': None, 'dest': None},
'%r14':{'op': None, 'src': None, 'dest': None},
'%r15':{'op': None, 'src': None, 'dest': None},
'%rax':{'op': None, 'src': None, 'dest': None},
'%rcx':{'op': None, 'src': None, 'dest': None},
'%rdx':{'op': None, 'src': None, 'dest': None},
'%rbx':{'op': None, 'src': None, 'dest': None},
'%rsp':{'op': None, 'src': None, 'dest': None},
'%rbp':{'op': None, 'src': None, 'dest': None},
'%rsi':{'op': None, 'src': None, 'dest': None},
'%rdi':{'op': None, 'src': None, 'dest': None},
'%rip':{'op': None, 'src': None, 'dest': None},
'%r0d':{'op': None, 'src': None, 'dest': None},
'%r1d':{'op': None, 'src': None, 'dest': None},
'%r2d':{'op': None, 'src': None, 'dest': None},
'%r3d':{'op': None, 'src': None, 'dest': None},
'%r4d':{'op': None, 'src': None, 'dest': None},
'%r5d':{'op': None, 'src': None, 'dest': None},
'%r6d':{'op': None, 'src': None, 'dest': None},
'%r7d':{'op': None, 'src': None, 'dest': None},
'%r8d':{'op': None, 'src': None, 'dest': None},
'%r9d':{'op': None, 'src': None, 'dest': None},
'%r10d':{'op': None, 'src': None, 'dest': None},
'%r11d':{'op': None, 'src': None, 'dest': None},
'%r12d':{'op': None, 'src': None, 'dest': None},
'%r13d':{'op': None, 'src': None, 'dest': None},
'%r14d':{'op': None, 'src': None, 'dest': None},
'%r15d':{'op': None, 'src': None, 'dest': None},
'%eax':{'op': None, 'src': None, 'dest': None},
'%ecx':{'op': None, 'src': None, 'dest': None},
'%edx':{'op': None, 'src': None, 'dest': None},
'%ebx':{'op': None, 'src': None, 'dest': None},
'%esp':{'op': None, 'src': None, 'dest': None},
'%ebp':{'op': None, 'src': None, 'dest': None},
'%esi':{'op': None, 'src': None, 'dest': None},
'%edi':{'op': None, 'src': None, 'dest': None},
'%eip':{'op': None, 'src': None, 'dest': None},
'%r0w':{'op': None, 'src': None, 'dest': None},
'%r1w':{'op': None, 'src': None, 'dest': None},
'%r2w':{'op': None, 'src': None, 'dest': None},
'%r3w':{'op': None, 'src': None, 'dest': None},
'%r4w':{'op': None, 'src': None, 'dest': None},
'%r5w':{'op': None, 'src': None, 'dest': None},
'%r6w':{'op': None, 'src': None, 'dest': None},
'%r7w':{'op': None, 'src': None, 'dest': None},
'%r8w':{'op': None, 'src': None, 'dest': None},
'%r9w':{'op': None, 'src': None, 'dest': None},
'%r10w':{'op': None, 'src': None, 'dest': None},
'%r11w':{'op': None, 'src': None, 'dest': None},
'%r12w':{'op': None, 'src': None, 'dest': None},
'%r13w':{'op': None, 'src': None, 'dest': None},
'%r14w':{'op': None, 'src': None, 'dest': None},
'%r15w':{'op': None, 'src': None, 'dest': None},
'%ax':{'op': None, 'src': None, 'dest': None},
'%cx':{'op': None, 'src': None, 'dest': None},
'%dx':{'op': None, 'src': None, 'dest': None},
'%bx':{'op': None, 'src': None, 'dest': None},
'%sp':{'op': None, 'src': None, 'dest': None},
'%bp':{'op': None, 'src': None, 'dest': None},
'%si':{'op': None, 'src': None, 'dest': None},
'%di':{'op': None, 'src': None, 'dest': None},
'%r0b':{'op': None, 'src': None, 'dest': None},
'%r1b':{'op': None, 'src': None, 'dest': None},
'%r2b':{'op': None, 'src': None, 'dest': None},
'%r3b':{'op': None, 'src': None, 'dest': None},
'%r4b':{'op': None, 'src': None, 'dest': None},
'%r5b':{'op': None, 'src': None, 'dest': None},
'%r6b':{'op': None, 'src': None, 'dest': None},
'%r7b':{'op': None, 'src': None, 'dest': None},
'%r8b':{'op': None, 'src': None, 'dest': None},
'%r9b':{'op': None, 'src': None, 'dest': None},
'%r10b':{'op': None, 'src': None, 'dest': None},
'%r11b':{'op': None, 'src': None, 'dest': None},
'%r12b':{'op': None, 'src': None, 'dest': None},
'%r13b':{'op': None, 'src': None, 'dest': None},
'%r14b':{'op': None, 'src': None, 'dest': None},
'%r15b':{'op': None, 'src': None, 'dest': None},
'%al':{'op': None, 'src': None, 'dest': None},
'%cl':{'op': None, 'src': None, 'dest': None},
'%dl':{'op': None, 'src': None, 'dest': None},
'%bl':{'op': None, 'src': None, 'dest': None},
'%spl':{'op': None, 'src': None, 'dest': None},
'%bpl':{'op': None, 'src': None, 'dest': None},
'%sil':{'op': None, 'src': None, 'dest': None},
'%dil':{'op': None, 'src': None, 'dest': None},
'%xmm0':{'op': None, 'src': None, 'dest': None},
'%xmm1':{'op': None, 'src': None, 'dest': None},
'%xmm2':{'op': None, 'src': None, 'dest': None},
'%xmm3':{'op': None, 'src': None, 'dest': None},
'%xmm4':{'op': None, 'src': None, 'dest': None},
'%xmm5':{'op': None, 'src': None, 'dest': None},
'%xmm6':{'op': None, 'src': None, 'dest': None},
'%xmm7':{'op': None, 'src': None, 'dest': None},
'%xmm8':{'op': None, 'src': None, 'dest': None},
'%xmm9':{'op': None, 'src': None, 'dest': None},
'%xmm10':{'op': None, 'src': None, 'dest': None},
'%xmm11':{'op': None, 'src': None, 'dest': None},
'%xmm12':{'op': None, 'src': None, 'dest': None},
'%xmm13':{'op': None, 'src': None, 'dest': None},
'%xmm14':{'op': None, 'src': None, 'dest': None},
'%xmm15':{'op': None, 'src': None, 'dest': None}
}
fun_stack = []
var_stack = []
var_count = 0
par_stack = []
par_count = 0
fun_list = ['main']
# instruct match list
def handler_cmpx(pattern, regs_map_new):
vals = pattern[0]
print(vals)
s = "{1} == {0}".format(vals[1], vals[2])
s = re.sub(r'\$', '', s)
s = re.sub(r'\(%rdi\)', '*param_1', s)
s = re.sub(r'%rdi', 'param_1', s)
# print(s)
def handler_movx(pattern, regs_map_new):
print(pattern)
def handler_addx(pattern, regs_map_new):
print(pattern)
def handler_subx(pattern, regs_map_new):
print(pattern)
inst_re_list = {
r'cmp([blqw]*)[ ]+([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)[ ]*,[ ]*([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)': handler_cmpx,
r'mov([blqw]*)[ ]+([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)[ ]*,[ ]*([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)': handler_movx,
r'add([blqw]*)[ ]+([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)[ ]*,[ ]*([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)': handler_addx,
r'sub([blqw]*)[ ]+([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)[ ]*,[ ]*([0-9a-fA-Fx-]*\(%[a-z]+\)|%[a-z]+|\$0x[0-9a-fA-F]+)': handler_subx,
}
def asm_to_json(db):
"""Convert asm to json format
"""
global fun_list
dats = db['dats']
for l in fun_list:
if l in dats:
fun = dats[l]
translate_fun(fun)
def translate_fun(fun):
global regs_map
blocks = []
blocks.append(fun['addr'])
# find out all labels
for inst in fun['inst']:
pattern = re.findall(r'(je|jne|jmp|jb|jg|jl)[ ]+([a-fA-F0-9]{6,16})', inst[1])
if len(pattern) > 0:
blocks.append(pattern[0][1])
blocks.sort()
regs_map_new = regs_map.copy()
# translate instructions to middle language
for inst in fun['inst']:
print(inst)
for k, v in inst_re_list.items():
pattern = re.findall(k, inst[1])
if len(pattern) > 0:
v(pattern, regs_map_new)
break
if __name__ == "__main__":
if len(sys.argv) < 2:
print("%s [xxx.db]" % sys.argv[0])
sys.exit(-1)
db = shelve.open(sys.argv[1])
asm_to_json(db)
db.close()
| |
# utils/config.py
import os
import re
import warnings
# 3rd-party modules
from lxml import etree
from ncclient.operations import RPCError
# package modules
from jnpr.junos.exception import *
from jnpr.junos import jxml as JXML
from jnpr.junos.utils.util import Util
"""
Configuration Utilities
"""
class Config(Util):
"""
Overivew of Configuration Utilities:
* :meth:`commit`: commit changes
* :meth:`commit_check`: perform the commit check operation
* :meth:`diff`: return the diff string between running and candidate config
* :meth:`load`: load changes into the candidate config
* :meth:`lock`: take an exclusive lock on the candidate config
* :meth:`pdiff`: prints the diff string (debug/helper)
* :meth:`rescue`: controls "rescue configuration"
* :meth:`rollback`: perform the load rollback command
* :meth:`unlock`: release the exclusive lock
"""
# ------------------------------------------------------------------------
# commit
# ------------------------------------------------------------------------
def commit(self, **kvargs):
"""
Commit a configuration.
:param str comment: If provided logs this comment with the commit.
:param int confirm: If provided activates confirm safeguard with
provided value as timeout (minutes).
:param int timeout: If provided the command will wait for completion
using the provided value as timeout (seconds).
By default the device timeout is used.
:param bool sync: On dual control plane systems, requests that
the candidate configuration on one control plane be
copied to the other control plane, checked for
correct syntax, and committed on both Routing Engines.
:param bool force_sync: On dual control plane systems, forces the candidate
configuration on one control plane to be copied to the
other control plane.
:param bool full: When true requires all the daemons to check and evaluate
the new configuration.
:param bool detail: When true return commit detail as XML
:returns:
* ``True`` when successful
* Commit detail XML (when detail is True)
:raises CommitError: When errors detected in candidate configuration.
You can use the Exception errs variable
to identify the specific problems
.. warning::
If the function does not receive a reply prior to the timeout
a RpcTimeoutError will be raised. It is possible the commit
was successful. Manual verification may be required.
"""
rpc_args = {}
# if a comment is provided, then include that in the RPC
comment = kvargs.get('comment')
if comment:
rpc_args['log'] = comment
# if confirm is provided, then setup the RPC args
# so that Junos will either use the default confirm
# timeout (confirm=True) or a specific timeout
# (confirm=<minutes>)
confirm = kvargs.get('confirm')
if confirm:
rpc_args['confirmed'] = True
confirm_val = str(confirm)
if 'True' != confirm_val:
rpc_args['confirm-timeout'] = confirm_val
# if a timeout is provided, then include that in the RPC
timeout = kvargs.get('timeout')
if timeout:
rpc_args['dev_timeout'] = timeout
# Check for force_sync and sync
if kvargs.get('force_sync'):
rpc_args['synchronize'] = True
rpc_args['force-synchronize'] = True
elif kvargs.get('sync'):
rpc_args['synchronize'] = True
# Check for full
if kvargs.get('full'):
rpc_args['full'] = True
rpc_varg = []
detail = kvargs.get('detail')
if detail:
rpc_varg = [{'detail': 'detail'}]
# dbl-splat the rpc_args since we want to pass key/value to metaexec
# if there is a commit/check error, this will raise an execption
try:
rsp = self.rpc.commit_configuration(*rpc_varg, **rpc_args)
except RpcTimeoutError:
raise
except RpcError as err: # jnpr.junos exception
if err.rsp is not None and err.rsp.find('ok') is not None:
# this means there are warnings, but no errors
return True
else:
raise CommitError(cmd=err.cmd, rsp=err.rsp, errs=err.errs)
except Exception as err:
# so the ncclient gives us something I don't want. I'm going to
# convert it and re-raise the commit error
if hasattr(err, 'xml') and isinstance(err.xml, etree._Element):
raise CommitError(rsp=err.xml)
else:
raise
if detail:
return rsp
else:
return True
# -------------------------------------------------------------------------
# commit check
# -------------------------------------------------------------------------
def commit_check(self):
"""
Perform a commit check. If the commit check passes, this function
will return ``True``. If the commit-check results in warnings, they
are reported and available in the Exception errs.
:returns: ``True`` if commit-check is successful (no errors)
:raises CommitError: When errors detected in candidate configuration.
You can use the Exception errs variable
to identify the specific problems
:raises RpcError: When underlying ncclient has an error
"""
try:
self.rpc.commit_configuration(check=True)
except RpcTimeoutError:
raise
except RpcError as err: # jnpr.junos exception
if err.rsp is not None and err.rsp.find('ok') is not None:
# this means there is a warning, but no errors
return True
else:
raise CommitError(cmd=err.cmd, rsp=err.rsp, errs=err.errs)
except Exception as err:
# :err: is from ncclient, so extract the XML data
# and convert into dictionary
return JXML.rpc_error(err.xml)
return True
# -------------------------------------------------------------------------
# show | compare rollback <number|0*>
# -------------------------------------------------------------------------
def diff(self, rb_id=0):
"""
Retrieve a diff (patch-format) report of the candidate config against
either the current active config, or a different rollback.
:param int rb_id: rollback id [0..49]
:returns:
* ``None`` if there is no difference
* ascii-text (str) if there is a difference
"""
if rb_id < 0 or rb_id > 49:
raise ValueError("Invalid rollback #" + str(rb_id))
rsp = self.rpc.get_configuration(dict(
compare='rollback', rollback=str(rb_id), format='text'
))
diff_txt = rsp.find('configuration-output').text
return None if diff_txt == "\n" else diff_txt
def pdiff(self, rb_id=0):
"""
Helper method that calls ``print`` on the diff (patch-format) between the
current candidate and the provided rollback.
:param int rb_id: the rollback id value [0-49]
:returns: ``None``
"""
print self.diff(rb_id)
# -------------------------------------------------------------------------
# helper on loading configs
# -------------------------------------------------------------------------
def load(self, *vargs, **kvargs):
"""
Loads changes into the candidate configuration. Changes can be
in the form of strings (text,set,xml), XML objects, and files.
Files can be either static snippets of configuration or Jinja2
templates. When using Jinja2 Templates, this method will render
variables into the templates and then load the resulting change;
i.e. "template building".
:param object vargs[0]:
The content to load. If the contents is a string, the framework
will attempt to automatically determine the format. If it is
unable to determine the format then you must specify the
**format** parameter. If the content is an XML object, then
this method assumes you've structured it correctly;
and if not an Exception will be raised.
:param str path:
Path to file of configuration on the local server.
The path extension will be used to determine the format of
the contents:
* "conf","text","txt" is curly-text-style
* "set" - ascii-text, set-style
* "xml" - ascii-text, XML
.. note:: The format can specifically set using **format**.
:param str format:
Determines the format of the contents. Refer to options
from the **path** description.
:param bool overwrite:
Determines if the contents completely replace the existing
configuration. Default is ``False``.
.. note:: This option cannot be used if **format** is "set".
:param bool merge:
If set to ``True`` will set the load-config action to merge.
the default load-config action is 'replace'
:param str template_path:
Similar to the **path** parameter, but this indicates that
the file contents are ``Jinja2`` format and will require
template-rendering.
.. note:: This parameter is used in conjunction with **template_vars**.
The template filename extension will be used to determine
the format-style of the contents, or you can override
using **format**.
:param jinja2.Template template:
A Jinja2 Template object. Same description as *template_path*,
except this option you provide the actual Template, rather than
a path to the template file.
:param dict template_vars:
Used in conjunction with the other template options. This parameter
contains a dictionary of variables to render into the template.
:returns:
RPC-reply as XML object.
:raises: ConfigLoadError: When errors detected while loading candidate configuration.
You can use the Exception errs variable
to identify the specific problems
"""
rpc_xattrs = {}
rpc_xattrs['format'] = 'xml' # default to XML format
rpc_xattrs['action'] = 'replace' # replace is default action
rpc_contents = None
# support the ability to completely replace the Junos configuration
# note: this cannot be used if format='set', per Junos API.
overwrite = kvargs.get('overwrite', False)
if True == overwrite:
rpc_xattrs['action'] = 'override'
elif kvargs.get('merge') is True:
del rpc_xattrs['action']
# ---------------------------------------------------------------------
# private helpers ...
# ---------------------------------------------------------------------
def _lformat_byext(path):
""" determine the format style from the file extension """
ext = os.path.splitext(path)[1]
if ext == '.xml':
return 'xml'
if ext in ['.conf', '.text', '.txt']:
return 'text'
if ext in ['.set']:
return 'set'
raise ValueError("Unknown file contents from extension: %s" % ext)
def _lset_format(kvargs, rpc_xattrs):
""" setup the kvargs/rpc_xattrs """
# when format is given, setup the xml attrs appropriately
if kvargs['format'] == 'set':
if True == overwrite:
raise ValueError(
"conflicting args, cannot use 'set' with 'overwrite'")
rpc_xattrs['action'] = 'set'
kvargs['format'] = 'text'
rpc_xattrs['format'] = kvargs['format']
def _lset_fromfile(path):
""" setup the kvargs/rpc_xattrs based on path """
if 'format' not in kvargs:
# we use the extension to determine the format
kvargs['format'] = _lformat_byext(path)
_lset_format(kvargs, rpc_xattrs)
def _lset_from_rexp(rpc):
""" setup the kvargs/rpc_xattrs using string regular expression """
if re.search(r'^\s*<.*>$', rpc, re.MULTILINE):
kvargs['format'] = 'xml'
elif re.search(r'^\s*(set|delete|replace|rename)\s', rpc):
kvargs['format'] = 'set'
elif re.search(r'^[a-z:]*\s*\w+\s+{', rpc, re.I) and re.search(r'.*}\s*$', rpc):
kvargs['format'] = 'text'
def try_load(rpc_contents, rpc_xattrs):
try:
got = self.rpc.load_config(rpc_contents, **rpc_xattrs)
except RpcTimeoutError as err:
raise err
except RpcError as err:
raise ConfigLoadError(cmd=err.cmd, rsp=err.rsp, errs=err.errs)
# Something unexpected happened - raise it up
except Exception as err:
raise
return got
# ---------------------------------------------------------------------
# end-of: private helpers
# ---------------------------------------------------------------------
if 'format' in kvargs:
_lset_format(kvargs, rpc_xattrs)
# ---------------------------------------------------------------------
# if contents are provided as vargs[0], then process that as XML or str
# ---------------------------------------------------------------------
if len(vargs):
# caller is providing the content directly.
rpc_contents = vargs[0]
if isinstance(rpc_contents, str):
if 'format' not in kvargs:
_lset_from_rexp(rpc_contents)
if 'format' in kvargs:
_lset_format(kvargs, rpc_xattrs)
else:
raise RuntimeError(
"Not able to resolve the config format "
"You must define the format of the contents explicitly "
"to the function. Ex: format='set'")
if kvargs['format'] == 'xml':
# covert the XML string into XML structure
rpc_contents = etree.XML(rpc_contents)
return try_load(rpc_contents, rpc_xattrs)
# ~! UNREACHABLE !~#
# ---------------------------------------------------------------------
# if path is provided, use the static-config file
# ---------------------------------------------------------------------
if 'path' in kvargs:
# then this is a static-config file. load that as our rpc_contents
rpc_contents = open(kvargs['path'], 'rU').read()
_lset_fromfile(kvargs['path'])
if rpc_xattrs['format'] == 'xml':
# covert the XML string into XML structure
rpc_contents = etree.XML(rpc_contents)
return try_load(rpc_contents, rpc_xattrs)
# ~! UNREACHABLE !~#
# ---------------------------------------------------------------------
# if template_path is provided, then jinja2 load the template, and
# render the results. if template_vars are provided, use those
# in the render process.
# ---------------------------------------------------------------------
if 'template_path' in kvargs:
path = kvargs['template_path']
template = self.dev.Template(path)
rpc_contents = template.render(kvargs.get('template_vars', {}))
_lset_fromfile(path)
if rpc_xattrs['format'] == 'xml':
# covert the XML string into XML structure
rpc_contents = etree.XML(rpc_contents)
return try_load(rpc_contents, rpc_xattrs)
# ~! UNREACHABLE !~#
# ---------------------------------------------------------------------
# if template is provided, then this is a pre-loaded jinja2 Template
# object. Use the template.filename to determine the format style
# ---------------------------------------------------------------------
if 'template' in kvargs:
template = kvargs['template']
path = template.filename
rpc_contents = template.render(kvargs.get('template_vars', {}))
_lset_fromfile(path)
if rpc_xattrs['format'] == 'xml':
# covert the XML string into XML structure
rpc_contents = etree.XML(rpc_contents)
return try_load(rpc_contents, rpc_xattrs)
# ~! UNREACHABLE !~#
raise RuntimeError("Unhandled load request")
# -------------------------------------------------------------------------
# config exclusive
# -------------------------------------------------------------------------
def lock(self):
"""
Attempts an exclusive lock on the candidate configuration. This
is a non-blocking call.
:returns:
``True`` always when successful
:raises LockError: When the lock cannot be obtained
"""
try:
self.rpc.lock_configuration()
except Exception as err:
if isinstance(err, RpcError):
raise LockError(rsp=err.rsp)
else:
# :err: is from ncclient
raise LockError(rsp=JXML.remove_namespaces(err.xml))
return True
# -------------------------------------------------------------------------
# releases the exclusive lock
# -------------------------------------------------------------------------
def unlock(self):
"""
Unlocks the candidate configuration.
:returns:
``True`` always when successful
:raises UnlockError: If you attempt to unlock a configuration
when you do not own the lock
"""
try:
self.rpc.unlock_configuration()
except Exception as err:
if isinstance(err, RpcError):
raise UnlockError(rsp=err.rsp)
else:
# :err: is from ncclient
raise UnlockError(rsp=JXML.remove_namespaces(err.xml))
return True
# -------------------------------------------------------------------------
# rollback <number|0*>
# -------------------------------------------------------------------------
def rollback(self, rb_id=0):
"""
Rollback the candidate config to either the last active or
a specific rollback number.
:param str rb_id: The rollback id value [0-49], defaults to ``0``.
:returns:
``True`` always when successful
:raises ValueError: When invalid rollback id is given
"""
if rb_id < 0 or rb_id > 49:
raise ValueError("Invalid rollback #" + str(rb_id))
self.rpc.load_configuration(dict(
compare='rollback', rollback=str(rb_id)
))
return True
# -------------------------------------------------------------------------
# rescue configuration
# -------------------------------------------------------------------------
def rescue(self, action, format='text'):
"""
Perform action on the "rescue configuration".
:param str action: identifies the action as follows:
* "get" - retrieves/returns the rescue configuration via **format**
* "save" - saves current configuration as rescue
* "delete" - removes the rescue configuration
* "reload" - loads the rescue config as candidate (no-commit)
:param str format: identifies the return format when **action** is "get":
* "text" (default) - ascii-text format
* "xml" - as XML object
:return:
* When **action** is 'get', then the contents of the rescue configuration
is returned in the specified *format*. If there is no rescue configuration
saved, then the return value is ``None``.
* ``True`` when **action** is "save".
* ``True`` when **action** is "delete".
.. note:: ``True`` regardless if a rescue configuration exists.
* When **action** is 'reload', return is ``True`` if a rescue configuration
exists, and ``False`` otherwise.
.. note:: The rescue configuration is only loaded as the candidate,
and not committed. You must commit to make the rescue
configuration active.
:raises ValueError:
If **action** is not one of the above
"""
def _rescue_save():
"""
Saves the current configuration as the rescue configuration
"""
self.rpc.request_save_rescue_configuration()
return True
def _rescue_delete():
"""
Deletes the existing resuce configuration.
"""
# note that this will result in an "OK" regardless if
# a rescue config exists or not.
self.rpc.request_delete_rescue_configuration()
return True
def _rescue_get():
"""
Retrieves the rescue configuration, returning it in
either :format: 'text' or 'xml'.
Returns either the 'text'/'xml' if the rescue config
exists, or :None: otherwise
"""
try:
got = self.rpc.get_rescue_information(format=format)
return got.findtext('configuration-information/configuration-output') \
if 'text' == format else got
except:
return None
def _rescue_reload():
"""
Loads the rescue configuration as the active candidate.
This action does *not* commit the configuration; use the
:commit(): method for that purpose.
Returns the XML response if the rescue configuration
exists, or :False: otherwise
"""
try:
return self.rpc.load_configuration({'rescue': 'rescue'})
except:
return False
def _unsupported_action():
raise ValueError("unsupported action: {0}".format(action))
result = {
'get': _rescue_get,
'save': _rescue_save,
'delete': _rescue_delete,
'reload': _rescue_reload
}.get(action, _unsupported_action)()
return result
def __init__(self, dev, mode=None):
"""
:param str mode: Can be used *only* when creating Config object using context manager
* "private" - Work in private database
* "dynamic" - Work in dynamic database
* "batch" - Work in batch database
* "exclusive" - Work with Locking the candidate configuration
Example::
# mode can be private/dynamic/exclusive/batch
with Config(dev, mode='exclusive') as cu:
cu.load('set system services netconf traceoptions file xyz', format='set')
print cu.diff()
cu.commit()
"""
self.mode = mode
Util.__init__(self, dev=dev)
def __enter__(self):
# defining separate functions for each mode so that can be
# changed/edited as per the need of corresponding rpc call.
def _open_configuration_private():
try:
self.rpc.open_configuration(private=True)
except RpcError as err:
if err.rpc_error['severity'] == 'warning':
if err.message != 'uncommitted changes will be discarded on exit':
warnings.warn(err.message, RuntimeWarning)
return True
else:
raise err
def _open_configuration_dynamic():
self.rpc.open_configuration(dynamic=True)
return True
def _open_configuration_batch():
try:
self.rpc.open_configuration(batch=True)
except RpcError as err:
if err.rpc_error['severity'] == 'warning':
if err.message != 'uncommitted changes will be discarded on exit':
warnings.warn(err.message, RuntimeWarning)
return True
else:
raise err
def _open_configuration_exclusive():
return self.lock()
def _unsupported_option():
if self.mode is not None:
raise ValueError("unsupported action: {0}".format(self.mode))
{
'private': _open_configuration_private,
'dynamic': _open_configuration_dynamic,
'batch': _open_configuration_batch,
'exclusive': _open_configuration_exclusive
}.get(self.mode, _unsupported_option)()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.mode == 'exclusive':
self.unlock()
elif self.mode is not None:
self.rpc.close_configuration()
| |
# Copyright (c) 2020 NTT DATA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from tacker import context
from tacker import objects
from tacker.tests.unit.db.base import SqlTestCase
from tacker.tests.unit.objects import fakes
from tacker.tests import uuidsentinel
class TestInstantiatedVnfInfo(SqlTestCase):
def setUp(self):
super(TestInstantiatedVnfInfo, self).setUp()
self.context = context.get_admin_context()
self.vnf_package = self._create_and_upload_vnf_package()
self.vnf_instance = self._create_vnf_instance()
self.resource_handle_info = self._create_resource_handle()
self.ext_link_port_info = self._create_ext_link_port_info()
self.ext_virtual_link_info = self._create_ext_virtual_link_info()
self.vnf_link_ports_info = self._create_vnf_link_ports()
self.ip_addresses_info = self._create_ip_addresses_info()
self.ip_over_ethernet = self._create_ip_over_ethernet_info()
self.cp_protocol_info = self._create_cp_protocol_info()
self.vnf_external_cp_info = self._create_vnf_external_cp_info()
self.vnfc_cp_info = self._create_vnfc_cp_info()
self.vnfc_resource_info = self._create_vnfc_resource_info()
self.virtual_link_resource_info = \
self._create_virtual_link_resource_info()
self.virtual_storage_resource_info = \
self._create_virtual_storage_resource_info()
self.ext_managed_virtual_link_info = \
self._create_ext_managed_virtual_link_info()
def _create_and_upload_vnf_package(self):
vnf_package = objects.VnfPackage(context=self.context,
**fakes.vnf_package_data)
vnf_package.create()
vnf_pack_vnfd = fakes.get_vnf_package_vnfd_data(
vnf_package.id, uuidsentinel.vnfd_id)
vnf_pack_vnfd_obj = objects.VnfPackageVnfd(
context=self.context, **vnf_pack_vnfd)
vnf_pack_vnfd_obj.create()
self.vnf_package_vnfd = vnf_pack_vnfd_obj
vnf_package.vnf_package = "ONBOARDED"
vnf_package.save()
return vnf_package
def _create_vnf_instance(self):
vnf_instance_data = fakes.get_vnf_instance_data(
self.vnf_package_vnfd.vnfd_id)
vnf_instance = objects.VnfInstance(context=self.context,
**vnf_instance_data)
vnf_instance.create()
return vnf_instance
def _create_vnf_external_cp_info(self):
vnf_external_cp_data = copy.deepcopy(fakes.vnf_external_cp_info)
vnf_external_cp_data.update(
{'cp_protocol_info': [self.cp_protocol_info]})
vnf_external_cp_info = objects.VnfExtCpInfo(
context=self.context, **vnf_external_cp_data)
return vnf_external_cp_info
def _create_resource_handle(self):
resource_handle_data = copy.deepcopy(fakes.resource_handle_info)
resource_handle_info = objects.ResourceHandle(
context=self.context, **resource_handle_data)
return resource_handle_info
def _create_ext_link_port_info(self):
ext_link_port_info = copy.deepcopy(fakes.ext_link_port_info)
ext_link_port_info.update(
{'resource_handle': self.resource_handle_info})
ext_link_port_info = objects.ExtLinkPortInfo(
context=self.context, **ext_link_port_info)
return ext_link_port_info
def _create_ext_virtual_link_info(self):
ext_virtual_link_info = copy.deepcopy(fakes.ext_virtual_link_info)
ext_virtual_link_info.update(
{'resource_handle_info': self.resource_handle_info,
'ext_link_ports': self.ext_link_port_info})
ext_virtual_link_info = objects.VnfExtCpInfo(
context=self.context, **ext_virtual_link_info)
return ext_virtual_link_info
def _create_vnf_link_ports(self):
vnf_link_ports_info = copy.deepcopy(fakes.vnf_link_ports)
vnf_link_ports_info.update(
{'resource_handle': self.resource_handle_info})
vnf_link_ports_info = objects.VnfLinkPortInfo(
context=self.context, **vnf_link_ports_info)
return vnf_link_ports_info
def _create_ext_managed_virtual_link_info(self):
ext_managed_virtual_link_info = copy.deepcopy(
fakes.ext_managed_virtual_link_info)
ext_managed_virtual_link_info.update(
{'network_resource': self.resource_handle_info,
'vnf_link_ports': [self.vnf_link_ports_info]})
ext_managed_virtual_link_info = objects.ExtManagedVirtualLinkInfo(
context=self.context, **ext_managed_virtual_link_info)
return ext_managed_virtual_link_info
def _create_ip_addresses_info(self):
ip_address_info = copy.deepcopy(fakes.ip_address_info)
ip_address_info = objects.IpAddress(
context=self.context, **ip_address_info)
return ip_address_info
def _create_ip_over_ethernet_info(self):
ip_over_ethernet_onfo = copy.deepcopy(
fakes.ip_over_ethernet_address_info)
ip_over_ethernet_onfo.update(
{'ip_addresses': [self.ip_addresses_info]})
ip_over_ethernet_onfo = objects.IpOverEthernetAddressInfo(
context=self.context, **ip_over_ethernet_onfo)
return ip_over_ethernet_onfo
def _create_cp_protocol_info(self):
cp_protocol_info = copy.deepcopy(fakes.cp_protocol_info)
cp_protocol_info.update(
{'ip_over_ethernet': self.ip_over_ethernet})
cp_protocol_info = objects.CpProtocolInfo(
context=self.context, **cp_protocol_info)
return cp_protocol_info
def _create_vnfc_cp_info(self):
vnfc_cp_info = copy.deepcopy(fakes.vnfc_cp_info)
vnfc_cp_info.update(
{'cp_protocol_info': [self.cp_protocol_info]})
vnfc_cp_info = objects.VnfcCpInfo(
context=self.context, **vnfc_cp_info)
return vnfc_cp_info
def _create_vnfc_resource_info(self):
vnfc_resource_info = copy.deepcopy(fakes.vnfc_resource_info)
vnfc_resource_info.update(
{'compute_resource': self.resource_handle_info,
'vnf_link_ports': [self.vnf_link_ports_info],
'vnfc_cp_info': [self.vnfc_cp_info]})
vnfc_resource_info = objects.VnfcResourceInfo(
context=self.context, **vnfc_resource_info)
return vnfc_resource_info
def _create_virtual_link_resource_info(self):
vnf_virtual_link_resource_info = copy.deepcopy(
fakes.vnf_virtual_link_resource_info)
vnf_virtual_link_resource_info.update(
{'network_resource': self.resource_handle_info,
'vnf_link_ports': [self.vnf_link_ports_info]})
vnf_virtual_link_resource_info = objects.VnfVirtualLinkResourceInfo(
context=self.context, **vnf_virtual_link_resource_info)
return vnf_virtual_link_resource_info
def _create_virtual_storage_resource_info(self):
virtual_storage_resource_info = copy.deepcopy(
fakes.virtual_storage_resource_info)
virtual_storage_resource_info.update(
{'storage_resource': self.resource_handle_info})
virtual_storage_resource_info = objects.VirtualStorageResourceInfo(
context=self.context, **virtual_storage_resource_info)
return virtual_storage_resource_info
def test_save(self):
instantiated_vnf_info = copy.deepcopy(
fakes.get_instantiated_vnf_info())
instantiated_vnf_info.update(
{'ext_cp_info': [self.vnf_external_cp_info],
'vnf_instance_id': self.vnf_instance.id,
'ext_link_port_info': self.ext_link_port_info,
'ext_managed_virtual_link_info': [
self.ext_managed_virtual_link_info],
'vnfc_resource_info': [self.vnfc_resource_info],
'vnf_virtual_link_resource_info': [
self.virtual_link_resource_info],
'virtual_storage_resource_info': [
self.virtual_storage_resource_info]})
instantiated_vnf_info = objects.InstantiatedVnfInfo(
context=self.context, **instantiated_vnf_info)
instantiated_vnf_info.save()
self.assertIsNotNone(instantiated_vnf_info.created_at)
def test_resource_handle_obj_from_primitive_and_object_to_dict(self):
resource_handle = copy.deepcopy(fakes.resource_handle_info)
result = objects.ResourceHandle.obj_from_primitive(
resource_handle, self.context)
self.assertIsInstance(result, objects.ResourceHandle)
self.assertEqual('TEST', result.vim_level_resource_type)
resource_handle_dict = result.to_dict()
self.assertIsInstance(resource_handle_dict, dict)
self.assertEqual(
'TEST', resource_handle_dict['vim_level_resource_type'])
def test_virt_strg_res_info_obj_from_primitive_and_obj_to_dict(self):
virtual_storage_resource_info = copy.deepcopy(
fakes.virtual_storage_resource_info)
result = objects.VirtualStorageResourceInfo.obj_from_primitive(
virtual_storage_resource_info, self.context)
self.assertIsInstance(result,
objects.VirtualStorageResourceInfo)
virt_strg_res_info_dict = result.to_dict()
self.assertIsInstance(virt_strg_res_info_dict, dict)
def test_vnfc_cp_info_obj_from_primitive_and_obj_to_dict(self):
vnfc_cp_info = copy.deepcopy(fakes.vnfc_cp_info)
result = objects.VnfcCpInfo.obj_from_primitive(
vnfc_cp_info, self.context)
self.assertIsInstance(result, objects.VnfcCpInfo)
vnfc_cp_info = result.to_dict()
self.assertIsInstance(vnfc_cp_info, dict)
def test_vnfc_resource_info_obj_from_primitive_and_obj_to_dict(self):
vnfc_resource_info = copy.deepcopy(fakes.vnfc_resource_info)
result = objects.VnfcResourceInfo.obj_from_primitive(
vnfc_resource_info, self.context)
self.assertIsInstance(result, objects.VnfcResourceInfo)
self.assertEqual({'key': 'value'}, result.metadata)
vnfc_resource_info = result.to_dict()
self.assertIsInstance(vnfc_resource_info, dict)
def test_ext_mng_virt_link_obj_from_primitive_and_obj_to_dict(self):
ext_managed_virtual_link_info = copy.deepcopy(
fakes.ext_managed_virtual_link_info)
result = objects.ExtManagedVirtualLinkInfo.obj_from_primitive(
ext_managed_virtual_link_info, self.context)
self.assertIsInstance(result, objects.ExtManagedVirtualLinkInfo)
ext_mng_virt_link = result.to_dict()
self.assertIsInstance(ext_mng_virt_link, dict)
def test_ext_link_port_info_obj_from_primitive_and_obj_to_dict(self):
ext_link_port_info_data = copy.deepcopy(fakes.ext_link_port_info)
result = objects.ExtLinkPortInfo.obj_from_primitive(
ext_link_port_info_data, self.context)
self.assertIsInstance(result, objects.ExtLinkPortInfo)
ext_link_port_info = result.to_dict()
self.assertIsInstance(ext_link_port_info, dict)
def test_ext_virt_link_info_obj_from_primitive_and_obj_to_dict(self):
ext_virtual_link_info = copy.deepcopy(fakes.ext_virtual_link_info)
result = objects.ExtVirtualLinkInfo.obj_from_primitive(
ext_virtual_link_info, self.context)
self.assertIsInstance(result, objects.ExtVirtualLinkInfo)
ext_virt_link_info = result.to_dict()
self.assertIsInstance(ext_virt_link_info, dict)
def test_vnf_ext_cp_info_obj_from_primitive_and_obj_to_dict(self):
vnf_ext_cp_info = copy.deepcopy(fakes.vnf_ext_cp_info)
result = objects.VnfExtCpInfo.obj_from_primitive(
vnf_ext_cp_info, self.context)
self.assertIsInstance(result, objects.VnfExtCpInfo)
ext_virt_link_info = result.to_dict()
self.assertIsInstance(ext_virt_link_info, dict)
def test_instantiated_info_obj_from_primitive_and_obj_to_dict(self):
instantiated_vnf_info = copy.deepcopy(fakes.instantiated_vnf_info)
result = objects.InstantiatedVnfInfo.obj_from_primitive(
instantiated_vnf_info, self.context)
self.assertIsInstance(result, objects.InstantiatedVnfInfo)
instantiated_vnf_info_dict = result.to_dict()
self.assertIsInstance(instantiated_vnf_info_dict, dict)
| |
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on collections.
All functions here should be agnostic of how CollectionModel objects are
stored in the database. In particular, the various query methods should
delegate to the Collection model class. This will enable the collection
storage model to be changed without affecting this module and others above it.
"""
import collections
import copy
import datetime
import logging
import os
from core.domain import collection_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import summary_services
from core.domain import user_services
from core.platform import models
import feconf
import utils
(collection_models, user_models) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.user])
memcache_services = models.Registry.import_memcache_services()
search_services = models.Registry.import_search_services()
# This takes additional 'title' and 'category' parameters.
CMD_CREATE_NEW = 'create_new'
# Name for the collection search index.
SEARCH_INDEX_COLLECTIONS = 'collections'
# The maximum number of iterations allowed for populating the results of a
# search query.
MAX_ITERATIONS = 10
# TODO(bhenning): Improve the ranking calculation. Some possible suggestions
# for a better ranking include using an average of the search ranks of each
# exploration referenced in the collection and/or demoting collections
# for any validation errors from explorations referenced in the collection.
_STATUS_PUBLICIZED_BONUS = 30
# This is done to prevent the rank hitting 0 too easily. Note that
# negative ranks are disallowed in the Search API.
_DEFAULT_RANK = 20
_MS_IN_ONE_DAY = 24 * 60 * 60 * 1000
def _migrate_collection_to_latest_schema(versioned_collection):
"""Holds the responsibility of performing a step-by-step, sequential update
of the collection structure based on the schema version of the input
collection dictionary. This is very similar to the exploration migration
process seen in exp_services. If any of the current collection schemas
change, a new conversion function must be added and some code appended to
this function to account for that new version.
Args:
versioned_collection: A dict with two keys:
- schema_version: the schema version for the collection.
- nodes: the list of collection nodes comprising the collection.
"""
collection_schema_version = versioned_collection['schema_version']
if not (1 <= collection_schema_version
<= feconf.CURRENT_COLLECTION_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d collection schemas at '
'present.' % feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
# This is where conversion functions will be placed once updates to the
# collection schemas happen.
# TODO(sll): Ensure that there is a test similar to
# exp_domain_test.SchemaMigrationMethodsUnitTests to ensure that the
# appropriate migration functions are declared.
# Repository GET methods.
def _get_collection_memcache_key(collection_id, version=None):
"""Returns a memcache key for an collection."""
if version:
return 'collection-version:%s:%s' % (collection_id, version)
else:
return 'collection:%s' % collection_id
def get_collection_from_model(collection_model, run_conversion=True):
"""Returns a Collection domain object given a collection model loaded
from the datastore.
If run_conversion is True, then the collection's schema version will be
checked against the current schema version. If they do not match, the
collection will be automatically updated to the latest schema version.
IMPORTANT NOTE TO DEVELOPERS: In general, run_conversion should never be
False. This option is only used for testing that the schema version
migration works correctly, and it should never be changed otherwise.
"""
# Ensure the original collection model does not get altered.
versioned_collection = {
'schema_version': collection_model.schema_version,
'nodes': copy.deepcopy(collection_model.nodes)
}
# Migrate the collection if it is not using the latest schema version.
if (run_conversion and collection_model.schema_version !=
feconf.CURRENT_COLLECTION_SCHEMA_VERSION):
_migrate_collection_to_latest_schema(versioned_collection)
return collection_domain.Collection(
collection_model.id, collection_model.title,
collection_model.category, collection_model.objective,
versioned_collection['schema_version'], [
collection_domain.CollectionNode.from_dict(collection_node_dict)
for collection_node_dict in versioned_collection['nodes']
],
collection_model.version, collection_model.created_on,
collection_model.last_updated)
def get_collection_summary_from_model(collection_summary_model):
return collection_domain.CollectionSummary(
collection_summary_model.id, collection_summary_model.title,
collection_summary_model.category, collection_summary_model.objective,
collection_summary_model.status,
collection_summary_model.community_owned,
collection_summary_model.owner_ids,
collection_summary_model.editor_ids,
collection_summary_model.viewer_ids,
collection_summary_model.contributor_ids,
collection_summary_model.contributors_summary,
collection_summary_model.version,
collection_summary_model.collection_model_created_on,
collection_summary_model.collection_model_last_updated
)
def get_collection_by_id(collection_id, strict=True, version=None):
"""Returns a domain object representing a collection."""
collection_memcache_key = _get_collection_memcache_key(
collection_id, version=version)
memcached_collection = memcache_services.get_multi(
[collection_memcache_key]).get(collection_memcache_key)
if memcached_collection is not None:
return memcached_collection
else:
collection_model = collection_models.CollectionModel.get(
collection_id, strict=strict, version=version)
if collection_model:
collection = get_collection_from_model(collection_model)
memcache_services.set_multi({collection_memcache_key: collection})
return collection
else:
return None
def get_collection_summary_by_id(collection_id):
"""Returns a domain object representing a collection summary."""
# TODO(msl): Maybe use memcache similarly to get_collection_by_id.
collection_summary_model = collection_models.CollectionSummaryModel.get(
collection_id)
if collection_summary_model:
collection_summary = get_collection_summary_from_model(
collection_summary_model)
return collection_summary
else:
return None
def get_multiple_collections_by_id(collection_ids, strict=True):
"""Returns a dict of domain objects representing collections with the
given ids as keys. If a collection_id is not present it is not included in
the return dict.
"""
collection_ids = set(collection_ids)
result = {}
uncached = []
memcache_keys = [_get_collection_memcache_key(i) for i in collection_ids]
cache_result = memcache_services.get_multi(memcache_keys)
for collection_obj in cache_result.itervalues():
result[collection_obj.id] = collection_obj
for _id in collection_ids:
if _id not in result:
uncached.append(_id)
db_collection_models = collection_models.CollectionModel.get_multi(
uncached)
db_results_dict = {}
not_found = []
for index, cid in enumerate(uncached):
model = db_collection_models[index]
if model:
collection = get_collection_from_model(model)
db_results_dict[cid] = collection
else:
logging.info('Tried to fetch collection with id %s, but no such '
'collection exists in the datastore' % cid)
not_found.append(cid)
if strict and not_found:
raise ValueError(
'Couldn\'t find collections with the following ids:\n%s'
% '\n'.join(not_found))
cache_update = {
cid: db_results_dict[cid] for cid in db_results_dict.iterkeys()
if db_results_dict[cid] is not None
}
if cache_update:
memcache_services.set_multi(cache_update)
result.update(db_results_dict)
return result
def get_new_collection_id():
"""Returns a new collection id."""
return collection_models.CollectionModel.get_new_id('')
def is_collection_summary_editable(collection_summary, user_id=None):
"""Checks if a given user may edit an collection by checking
the given domain object.
"""
return user_id is not None and (
user_id in collection_summary.editor_ids
or user_id in collection_summary.owner_ids
or collection_summary.community_owned)
def get_learner_collection_dict_by_id(
collection_id, user_id, strict=True, allow_invalid_explorations=False,
version=None):
"""Creates and returns a dictionary representation of a collection given by
the provided collection ID. This dictionary contains extra information
along with the dict returned by collection_domain.Collection.to_dict()
which includes useful data for the collection learner view. The information
includes progress in the collection, information about explorations
referenced within the collection, and a slightly nicer data structure for
frontend work.
This raises a ValidationError if the collection retrieved using the given ID
references non-existent explorations.
"""
collection = get_collection_by_id(
collection_id, strict=strict, version=version)
exp_ids = collection.exploration_ids
exp_summary_dicts = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
exp_summaries_dict_map = {
exp_summary_dict['id']: exp_summary_dict
for exp_summary_dict in exp_summary_dicts
}
# TODO(bhenning): Users should not be recommended explorations they have
# completed outside the context of a collection (see #1461).
next_exploration_ids = None
completed_exploration_ids = None
if user_id:
completed_exploration_ids = _get_valid_completed_exploration_ids(
user_id, collection_id, collection)
next_exploration_ids = collection.get_next_exploration_ids(
completed_exploration_ids)
else:
# If the user is not logged in or they have not completed any of
# the explorations yet within the context of this collection,
# recommend the initial explorations.
next_exploration_ids = collection.init_exploration_ids
completed_exploration_ids = []
collection_dict = collection.to_dict()
collection_dict['skills'] = collection.skills
collection_dict['playthrough_dict'] = {
'next_exploration_ids': next_exploration_ids,
'completed_exploration_ids': completed_exploration_ids
}
collection_dict['version'] = collection.version
collection_is_public = rights_manager.is_collection_public(collection_id)
# Insert an 'exploration' dict into each collection node, where the
# dict includes meta information about the exploration (ID and title).
for collection_node in collection_dict['nodes']:
exploration_id = collection_node['exploration_id']
summary_dict = exp_summaries_dict_map.get(exploration_id)
if not allow_invalid_explorations:
if not summary_dict:
raise utils.ValidationError(
'Expected collection to only reference valid '
'explorations, but found an exploration with ID: %s (was '
'the exploration deleted?)' % exploration_id)
if collection_is_public and rights_manager.is_exploration_private(
exploration_id):
raise utils.ValidationError(
'Cannot reference a private exploration within a public '
'collection, exploration ID: %s' % exploration_id)
collection_node['exploration'] = {
'exists': bool(summary_dict)
}
if summary_dict:
collection_node['exploration'].update(summary_dict)
return collection_dict
# Query methods.
def get_collection_titles_and_categories(collection_ids):
"""Returns collection titles and categories for the given ids.
The result is a dict with collection ids as keys. The corresponding values
are dicts with the keys 'title' and 'category'.
Any invalid collection_ids will not be included in the return dict. No
error will be raised.
"""
collection_list = [
(get_collection_from_model(e) if e else None)
for e in collection_models.CollectionModel.get_multi(collection_ids)]
result = {}
for collection in collection_list:
if collection is None:
logging.error('Could not find collection corresponding to id')
else:
result[collection.id] = {
'title': collection.title,
'category': collection.category,
}
return result
def get_completed_exploration_ids(user_id, collection_id):
"""Returns a list of explorations the user has completed within the context
of the provided collection. Returns an empty list if the user has not yet
completed any explorations within the collection. Note that this function
will also return an empty list if either the collection and/or user do not
exist.
A progress model isn't added until the first exploration of a collection is
completed, so, if a model is missing, there isn't enough information to
infer whether that means the collection doesn't exist, the user doesn't
exist, or if they just haven't mdae any progress in that collection yet.
Thus, we just assume the user and collection exist for the sake of this
call, so it returns an empty list, indicating that no progress has yet been
made.
"""
progress_model = user_models.CollectionProgressModel.get(
user_id, collection_id)
return progress_model.completed_explorations if progress_model else []
def _get_valid_completed_exploration_ids(user_id, collection_id, collection):
"""Returns a filtered version of the return value of
get_completed_exploration_ids, where explorations not also found within the
collection are removed from the returned list.
"""
completed_exploration_ids = get_completed_exploration_ids(
user_id, collection_id)
return [
exp_id for exp_id in completed_exploration_ids
if collection.get_node(exp_id)
]
def get_next_exploration_ids_to_complete_by_user(user_id, collection_id):
"""Returns a list of exploration IDs in the specified collection that the
given user has not yet attempted and has the prerequisite skills to play.
Returns the collection's initial explorations if the user has yet to
complete any explorations within the collection. Returns an empty list if
the user has completed all of the explorations within the collection.
See collection_domain.Collection.get_next_exploration_ids for more
information.
"""
completed_exploration_ids = get_completed_exploration_ids(
user_id, collection_id)
collection = get_collection_by_id(collection_id)
if completed_exploration_ids:
return collection.get_next_exploration_ids(completed_exploration_ids)
else:
# The user has yet to complete any explorations inside the collection.
return collection.init_exploration_ids
def record_played_exploration_in_collection_context(
user_id, collection_id, exploration_id):
progress_model = user_models.CollectionProgressModel.get_or_create(
user_id, collection_id)
if exploration_id not in progress_model.completed_explorations:
progress_model.completed_explorations.append(exploration_id)
progress_model.put()
def _get_collection_summary_dicts_from_models(collection_summary_models):
"""Given an iterable of CollectionSummaryModel instances, create a dict
containing corresponding collection summary domain objects, keyed by id.
"""
collection_summaries = [
get_collection_summary_from_model(collection_summary_model)
for collection_summary_model in collection_summary_models]
result = {}
for collection_summary in collection_summaries:
result[collection_summary.id] = collection_summary
return result
def get_collection_summaries_matching_ids(collection_ids):
"""Given a list of collection ids, return a list with the corresponding
summary domain objects (or None if the corresponding summary does not
exist).
"""
return [
(get_collection_summary_from_model(model) if model else None)
for model in collection_models.CollectionSummaryModel.get_multi(
collection_ids)]
# TODO(bhenning): Update this function to support also matching the query to
# explorations contained within this collection. Introduce tests to verify this
# behavior.
def get_collection_summaries_matching_query(query_string, cursor=None):
"""Returns a list with all collection summary domain objects matching the
given search query string, as well as a search cursor for future fetches.
This method returns exactly feconf.GALLERY_PAGE_SIZE results if there are
at least that many, otherwise it returns all remaining results. (If this
behaviour does not occur, an error will be logged.) The method also returns
a search cursor.
"""
summary_models = []
search_cursor = cursor
for _ in range(MAX_ITERATIONS):
remaining_to_fetch = feconf.GALLERY_PAGE_SIZE - len(summary_models)
collection_ids, search_cursor = search_collections(
query_string, remaining_to_fetch, cursor=search_cursor)
invalid_collection_ids = []
for ind, model in enumerate(
collection_models.CollectionSummaryModel.get_multi(
collection_ids)):
if model is not None:
summary_models.append(model)
else:
invalid_collection_ids.append(collection_ids[ind])
if len(summary_models) == feconf.GALLERY_PAGE_SIZE or (
search_cursor is None):
break
else:
logging.error(
'Search index contains stale collection ids: %s' %
', '.join(invalid_collection_ids))
if (len(summary_models) < feconf.GALLERY_PAGE_SIZE
and search_cursor is not None):
logging.error(
'Could not fulfill search request for query string %s; at least '
'%s retries were needed.' % (query_string, MAX_ITERATIONS))
return ([
get_collection_summary_from_model(summary_model)
for summary_model in summary_models
], search_cursor)
# Repository SAVE and DELETE methods.
def apply_change_list(collection_id, change_list):
"""Applies a changelist to a pristine collection and returns the result.
Each entry in change_list is a dict that represents an CollectionChange
object.
Returns:
the resulting collection domain object.
"""
collection = get_collection_by_id(collection_id)
try:
changes = [collection_domain.CollectionChange(change_dict)
for change_dict in change_list]
for change in changes:
if change.cmd == collection_domain.CMD_ADD_COLLECTION_NODE:
collection.add_node(change.exploration_id)
elif change.cmd == collection_domain.CMD_DELETE_COLLECTION_NODE:
collection.delete_node(change.exploration_id)
elif (
change.cmd ==
collection_domain.CMD_EDIT_COLLECTION_NODE_PROPERTY):
collection_node = collection.get_node(change.exploration_id)
if (change.property_name ==
collection_domain.COLLECTION_NODE_PROPERTY_PREREQUISITE_SKILLS): # pylint: disable=line-too-long
collection_node.update_prerequisite_skills(
change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_NODE_PROPERTY_ACQUIRED_SKILLS): # pylint: disable=line-too-long
collection_node.update_acquired_skills(change.new_value)
elif change.cmd == collection_domain.CMD_EDIT_COLLECTION_PROPERTY:
if (change.property_name ==
collection_domain.COLLECTION_PROPERTY_TITLE):
collection.update_title(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_CATEGORY):
collection.update_category(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_OBJECTIVE):
collection.update_objective(change.new_value)
elif (
change.cmd ==
collection_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
# Loading the collection model from the datastore into an
# Collection domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# collection is sufficient to apply the schema migration.
continue
return collection
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, collection_id, change_list)
)
raise
def validate_exps_in_collection_are_public(collection):
for exploration_id in collection.exploration_ids:
if rights_manager.is_exploration_private(exploration_id):
raise utils.ValidationError(
'Cannot reference a private exploration within a public '
'collection, exploration ID: %s' % exploration_id)
def _save_collection(committer_id, collection, commit_message, change_list):
"""Validates an collection and commits it to persistent storage.
If successful, increments the version number of the incoming collection
domain object by 1.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save collection %s: %s' % (collection.id, change_list))
collection_rights = rights_manager.get_collection_rights(collection.id)
if collection_rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE:
collection.validate(strict=True)
else:
collection.validate(strict=False)
# Validate that all explorations referenced by the collection exist.
exp_ids = collection.exploration_ids
exp_summaries = (
exp_services.get_exploration_summaries_matching_ids(exp_ids))
exp_summaries_dict = {
exp_id: exp_summaries[ind] for (ind, exp_id) in enumerate(exp_ids)
}
for collection_node in collection.nodes:
if not exp_summaries_dict[collection_node.exploration_id]:
raise utils.ValidationError(
'Expected collection to only reference valid explorations, '
'but found an exploration with ID: %s (was it deleted?)' %
collection_node.exploration_id)
# Ensure no explorations are being added that are 'below' the public status
# of this collection. If the collection is private, it can have both
# private and public explorations. If it's public, it can only have public
# explorations.
# TODO(bhenning): Ensure the latter is enforced above when trying to
# publish a collection.
if rights_manager.is_collection_public(collection.id):
validate_exps_in_collection_are_public(collection)
collection_model = collection_models.CollectionModel.get(
collection.id, strict=False)
if collection_model is None:
collection_model = collection_models.CollectionModel(id=collection.id)
else:
if collection.version > collection_model.version:
raise Exception(
'Unexpected error: trying to update version %s of collection '
'from version %s. Please reload the page and try again.'
% (collection_model.version, collection.version))
elif collection.version < collection_model.version:
raise Exception(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_model.version, collection.version))
collection_model.category = collection.category
collection_model.title = collection.title
collection_model.objective = collection.objective
collection_model.schema_version = collection.schema_version
collection_model.nodes = [
collection_node.to_dict() for collection_node in collection.nodes
]
collection_model.commit(committer_id, commit_message, change_list)
memcache_services.delete(_get_collection_memcache_key(collection.id))
index_collections_given_ids([collection.id])
collection.version += 1
def _create_collection(committer_id, collection, commit_message, commit_cmds):
"""Ensures that rights for a new collection are saved first.
This is because _save_collection() depends on the rights object being
present to tell it whether to do strict validation or not.
"""
# This line is needed because otherwise a rights object will be created,
# but the creation of an collection object will fail.
collection.validate(strict=False)
rights_manager.create_new_collection_rights(collection.id, committer_id)
model = collection_models.CollectionModel(
id=collection.id,
category=collection.category,
title=collection.title,
objective=collection.objective,
schema_version=collection.schema_version,
nodes=[
collection_node.to_dict() for collection_node in collection.nodes
],
)
model.commit(committer_id, commit_message, commit_cmds)
collection.version += 1
create_collection_summary(collection.id, committer_id)
def save_new_collection(committer_id, collection):
commit_message = (
'New collection created with title \'%s\'.' % collection.title)
_create_collection(committer_id, collection, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': collection.title,
'category': collection.category,
}])
def delete_collection(committer_id, collection_id, force_deletion=False):
"""Deletes the collection with the given collection_id.
IMPORTANT: Callers of this function should ensure that committer_id has
permissions to delete this collection, prior to calling this function.
If force_deletion is True the collection and its history are fully deleted
and are unrecoverable. Otherwise, the collection and all its history are
marked as deleted, but the corresponding models are still retained in the
datastore. This last option is the preferred one.
"""
collection_rights_model = collection_models.CollectionRightsModel.get(
collection_id)
collection_rights_model.delete(
committer_id, '', force_deletion=force_deletion)
collection_model = collection_models.CollectionModel.get(collection_id)
collection_model.delete(
committer_id, feconf.COMMIT_MESSAGE_COLLECTION_DELETED,
force_deletion=force_deletion)
# This must come after the collection is retrieved. Otherwise the memcache
# key will be reinstated.
collection_memcache_key = _get_collection_memcache_key(collection_id)
memcache_services.delete(collection_memcache_key)
# Delete the collection from search.
delete_documents_from_search_index([collection_id])
# Delete the summary of the collection (regardless of whether
# force_deletion is True or not).
delete_collection_summary(collection_id)
def get_collection_snapshots_metadata(collection_id):
"""Returns the snapshots for this collection, as dicts.
Args:
collection_id: str. The id of the collection in question.
Returns:
list of dicts, each representing a recent snapshot. Each dict has the
following keys: committer_id, commit_message, commit_cmds, commit_type,
created_on_ms, version_number. The version numbers are consecutive and
in ascending order. There are collection.version_number items in the
returned list.
"""
collection = get_collection_by_id(collection_id)
current_version = collection.version
version_nums = range(1, current_version + 1)
return collection_models.CollectionModel.get_snapshots_metadata(
collection_id, version_nums)
def publish_collection_and_update_user_profiles(committer_id, col_id):
"""Publishes the collection with publish_collection() function in
rights_manager.py, as well as updates first_contribution_msec.
It is the responsibility of the caller to check that the collection is
valid prior to publication.
"""
rights_manager.publish_collection(committer_id, col_id)
contribution_time_msec = utils.get_current_time_in_millisecs()
collection_summary = get_collection_summary_by_id(col_id)
contributor_ids = collection_summary.contributor_ids
for contributor in contributor_ids:
user_services.update_first_contribution_msec_if_not_set(
contributor, contribution_time_msec)
def update_collection(
committer_id, collection_id, change_list, commit_message):
"""Update an collection. Commits changes.
Args:
- committer_id: str. The id of the user who is performing the update
action.
- collection_id: str. The collection id.
- change_list: list of dicts, each representing a CollectionChange object.
These changes are applied in sequence to produce the resulting
collection.
- commit_message: str or None. A description of changes made to the
collection. For published collections, this must be present; for
unpublished collections, it may be equal to None.
"""
is_public = rights_manager.is_collection_public(collection_id)
if is_public and not commit_message:
raise ValueError(
'Collection is public so expected a commit message but '
'received none.')
collection = apply_change_list(collection_id, change_list)
_save_collection(committer_id, collection, commit_message, change_list)
update_collection_summary(collection.id, committer_id)
if not rights_manager.is_collection_private(collection.id):
user_services.update_first_contribution_msec_if_not_set(
committer_id, utils.get_current_time_in_millisecs())
def create_collection_summary(collection_id, contributor_id_to_add):
"""Create summary of a collection and store in datastore."""
collection = get_collection_by_id(collection_id)
collection_summary = compute_summary_of_collection(
collection, contributor_id_to_add)
save_collection_summary(collection_summary)
def update_collection_summary(collection_id, contributor_id_to_add):
"""Update the summary of an collection."""
create_collection_summary(collection_id, contributor_id_to_add)
def compute_summary_of_collection(collection, contributor_id_to_add):
"""Create a CollectionSummary domain object for a given Collection domain
object and return it.
"""
collection_rights = collection_models.CollectionRightsModel.get_by_id(
collection.id)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(collection.id))
# Update the contributor id list if necessary (contributors
# defined as humans who have made a positive (i.e. not just
# a revert) change to an collection's content).
if collection_summary_model:
contributor_ids = collection_summary_model.contributor_ids
contributors_summary = collection_summary_model.contributors_summary
else:
contributor_ids = []
contributors_summary = {}
if (contributor_id_to_add is not None and
contributor_id_to_add not in feconf.SYSTEM_USER_IDS and
contributor_id_to_add not in contributor_ids):
contributor_ids.append(contributor_id_to_add)
if contributor_id_to_add not in feconf.SYSTEM_USER_IDS:
if contributor_id_to_add is None:
# Revert commit or other non-positive commit
contributors_summary = compute_collection_contributors_summary(
collection.id)
else:
if contributor_id_to_add in contributors_summary:
contributors_summary[contributor_id_to_add] += 1
else:
contributors_summary[contributor_id_to_add] = 1
collection_model_last_updated = collection.last_updated
collection_model_created_on = collection.created_on
collection_summary = collection_domain.CollectionSummary(
collection.id, collection.title, collection.category,
collection.objective, collection_rights.status,
collection_rights.community_owned, collection_rights.owner_ids,
collection_rights.editor_ids, collection_rights.viewer_ids,
contributor_ids, contributors_summary,
collection.version, collection_model_created_on,
collection_model_last_updated
)
return collection_summary
def compute_collection_contributors_summary(collection_id):
"""Returns a dict whose keys are user_ids and whose values are
the number of (non-revert) commits made to the given collection
by that user_id. This does not count commits which have since been reverted.
"""
snapshots_metadata = get_collection_snapshots_metadata(collection_id)
current_version = len(snapshots_metadata)
contributors_summary = collections.defaultdict(int)
while True:
snapshot_metadata = snapshots_metadata[current_version - 1]
committer_id = snapshot_metadata['committer_id']
is_revert = (snapshot_metadata['commit_type'] == 'revert')
if not is_revert and committer_id not in feconf.SYSTEM_USER_IDS:
contributors_summary[committer_id] += 1
if current_version == 1:
break
if is_revert:
current_version = snapshot_metadata['commit_cmds'][0][
'version_number']
else:
current_version -= 1
return contributors_summary
def save_collection_summary(collection_summary):
"""Save a collection summary domain object as a CollectionSummaryModel
entity in the datastore.
"""
collection_summary_model = collection_models.CollectionSummaryModel(
id=collection_summary.id,
title=collection_summary.title,
category=collection_summary.category,
objective=collection_summary.objective,
status=collection_summary.status,
community_owned=collection_summary.community_owned,
owner_ids=collection_summary.owner_ids,
editor_ids=collection_summary.editor_ids,
viewer_ids=collection_summary.viewer_ids,
contributor_ids=collection_summary.contributor_ids,
contributors_summary=collection_summary.contributors_summary,
version=collection_summary.version,
collection_model_last_updated=(
collection_summary.collection_model_last_updated),
collection_model_created_on=(
collection_summary.collection_model_created_on)
)
collection_summary_model.put()
def delete_collection_summary(collection_id):
"""Delete a collection summary model."""
collection_models.CollectionSummaryModel.get(collection_id).delete()
def save_new_collection_from_yaml(committer_id, yaml_content, collection_id):
collection = collection_domain.Collection.from_yaml(
collection_id, yaml_content)
commit_message = (
'New collection created from YAML file with title \'%s\'.'
% collection.title)
_create_collection(committer_id, collection, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': collection.title,
'category': collection.category,
}])
return collection
def delete_demo(collection_id):
"""Deletes a single demo collection."""
if not collection_domain.Collection.is_demo_collection_id(collection_id):
raise Exception('Invalid demo collection id %s' % collection_id)
collection = get_collection_by_id(collection_id, strict=False)
if not collection:
logging.info('Collection with id %s was not deleted, because it '
'does not exist.' % collection_id)
else:
delete_collection(
feconf.SYSTEM_COMMITTER_ID, collection_id, force_deletion=True)
def load_demo(collection_id):
"""Loads a demo collection.
The resulting collection will have version 2 (one for its initial
creation and one for its subsequent modification.)
"""
delete_demo(collection_id)
if not collection_domain.Collection.is_demo_collection_id(collection_id):
raise Exception('Invalid demo collection id %s' % collection_id)
demo_filepath = os.path.join(
feconf.SAMPLE_COLLECTIONS_DIR,
feconf.DEMO_COLLECTIONS[collection_id])
if demo_filepath.endswith('yaml'):
yaml_content = utils.get_file_contents(demo_filepath)
else:
raise Exception('Unrecognized file path: %s' % demo_filepath)
collection = save_new_collection_from_yaml(
feconf.SYSTEM_COMMITTER_ID, yaml_content, collection_id)
publish_collection_and_update_user_profiles(
feconf.SYSTEM_COMMITTER_ID, collection_id)
index_collections_given_ids([collection_id])
# Now, load all of the demo explorations that are part of the collection.
for collection_node in collection.nodes:
exp_id = collection_node.exploration_id
# Only load the demo exploration if it is not yet loaded.
if exp_services.get_exploration_by_id(exp_id, strict=False) is None:
exp_services.load_demo(exp_id)
logging.info('Collection with id %s was loaded.' % collection_id)
# TODO(bhenning): Cleanup search logic and abstract it between explorations and
# collections to avoid code duplication.
def get_next_page_of_all_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None):
"""Returns a page of commits to all collections in reverse time order.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
results, new_urlsafe_start_cursor, more = (
collection_models.CollectionCommitLogEntryModel.get_all_commits(
page_size, urlsafe_start_cursor))
return ([collection_domain.CollectionCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.collection_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def get_next_page_of_all_non_private_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None,
max_age=None):
"""Returns a page of non-private commits in reverse time order. If max_age
is given, it should be a datetime.timedelta instance.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
if max_age is not None and not isinstance(max_age, datetime.timedelta):
raise ValueError(
"max_age must be a datetime.timedelta instance. or None.")
results, new_urlsafe_start_cursor, more = (
collection_models.CollectionCommitLogEntryModel.get_all_non_private_commits( # pylint: disable=line-too-long
page_size, urlsafe_start_cursor, max_age=max_age))
return ([collection_domain.CollectionCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.collection_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def _collection_rights_to_search_dict(rights):
# Allow searches like "is:featured".
doc = {}
if rights.status == rights_manager.ACTIVITY_STATUS_PUBLICIZED:
doc['is'] = 'featured'
return doc
def _should_index(collection):
rights = rights_manager.get_collection_rights(collection.id)
return rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE
def _get_search_rank(collection_id):
"""Returns an integer determining the document's rank in search.
Featured collections get a ranking bump, and so do collections that
have been more recently updated.
"""
rights = rights_manager.get_collection_rights(collection_id)
rank = _DEFAULT_RANK + (
_STATUS_PUBLICIZED_BONUS
if rights.status == rights_manager.ACTIVITY_STATUS_PUBLICIZED
else 0)
# Iterate backwards through the collection history metadata until we find
# the most recent snapshot that was committed by a human.
last_human_update_ms = 0
snapshots_metadata = get_collection_snapshots_metadata(collection_id)
for snapshot_metadata in reversed(snapshots_metadata):
if snapshot_metadata['committer_id'] != feconf.MIGRATION_BOT_USER_ID:
last_human_update_ms = snapshot_metadata['created_on_ms']
break
_time_now_ms = utils.get_current_time_in_millisecs()
time_delta_days = int(
(_time_now_ms - last_human_update_ms) / _MS_IN_ONE_DAY)
if time_delta_days == 0:
rank += 80
elif time_delta_days == 1:
rank += 50
elif 2 <= time_delta_days <= 7:
rank += 35
# Ranks must be non-negative.
return max(rank, 0)
def _collection_to_search_dict(collection):
rights = rights_manager.get_collection_rights(collection.id)
doc = {
'id': collection.id,
'title': collection.title,
'category': collection.category,
'objective': collection.objective,
'rank': _get_search_rank(collection.id),
}
doc.update(_collection_rights_to_search_dict(rights))
return doc
def clear_search_index():
"""WARNING: This runs in-request, and may therefore fail if there are too
many entries in the index.
"""
search_services.clear_index(SEARCH_INDEX_COLLECTIONS)
def index_collections_given_ids(collection_ids):
# We pass 'strict=False' so as not to index deleted collections.
collection_list = get_multiple_collections_by_id(
collection_ids, strict=False).values()
search_services.add_documents_to_index([
_collection_to_search_dict(collection)
for collection in collection_list
if _should_index(collection)
], SEARCH_INDEX_COLLECTIONS)
def patch_collection_search_document(collection_id, update):
"""Patches an collection's current search document, with the values
from the 'update' dictionary.
"""
doc = search_services.get_document_from_index(
collection_id, SEARCH_INDEX_COLLECTIONS)
doc.update(update)
search_services.add_documents_to_index([doc], SEARCH_INDEX_COLLECTIONS)
def update_collection_status_in_search(collection_id):
rights = rights_manager.get_collection_rights(collection_id)
if rights.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
delete_documents_from_search_index([collection_id])
else:
patch_collection_search_document(
rights.id, _collection_rights_to_search_dict(rights))
def delete_documents_from_search_index(collection_ids):
search_services.delete_documents_from_index(
collection_ids, SEARCH_INDEX_COLLECTIONS)
def search_collections(query, limit, sort=None, cursor=None):
"""Searches through the available collections.
args:
- query_string: the query string to search for.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on. When this is None, results are based on 'rank'. See
_get_search_rank to see how rank is determined.
- limit: the maximum number of results to return.
- cursor: A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
returns: a tuple:
- a list of collection ids that match the query.
- a cursor if there are more matching collections to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string that
can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_COLLECTIONS, cursor, limit, sort, ids_only=True)
| |
from BeautifulSoup import BeautifulSoup, NavigableString, Tag
import mechanize
import os
import re
def compare_semesters(a_name, b_name):
'''
Takes two semester strings as arguments and behave like the python cmp()
function. Semester strings start with Spring, Summer, Fall, or Winter and end
with a year (for example, "Fall 2010"). The cmp(a, b) function returns a
negative value if a comes before b, zero if a == b, and a positive value if a
comes after b.
'''
seasons = { 'Spring': 0, 'Summer': 1, 'Fall': 2, 'Winter': 3 }
a_season, a_year = a_name.split()
b_season, b_year = b_name.split()
year_cmp = int(a_year).__cmp__(int(b_year))
season_cmp = seasons[a_season].__cmp__(seasons[b_season])
return year_cmp if year_cmp else season_cmp
################################################################################
# class structure
################################################################################
class Course:
'''Banner is represented as a list of Course objects, which own Semester objects.'''
def __init__(self):
self.name = ''
self.title = ''
self.attributes = ''
self.description = ''
self.semesters = []
def get_semester(self, name):
'''Returns the semester with the given name, creating it first if needed.'''
for semester in self.semesters:
if semester.name == name:
return semester
semester = Semester()
semester.name = name
self.semesters.append(semester)
return semester
class Semester:
'''Semester objects own Section objects and are owned by Course objects.'''
def __init__(self):
self.name = ''
self.exam_time = ''
self.exam_date = ''
self.sections = []
class Section:
'''Section objects own Meeting objects and are owned by Semester objects.'''
def __init__(self):
self.crn = 0
self.levels = ''
self.xlist_data = ''
self.registration_dates = ''
self.meetings = []
class Meeting:
'''Meeting objects are owned by Section objects.'''
def __init__(self):
self.type = ''
self.days = ''
self.time = ''
self.where = ''
self.date_range = ''
self.instructors = ''
################################################################################
# xml output
################################################################################
def _courses_to_xml_helper(doc, parent, obj, name):
element = doc.createElement(name)
parent.appendChild(element)
if isinstance(obj, int) or isinstance(obj, float):
obj = str(obj)
if isinstance(obj, basestring):
element.appendChild(doc.createTextNode(obj))
elif isinstance(obj, list):
for x in obj:
_courses_to_xml_helper(doc, element, x, x.__class__.__name__.lower())
else:
for x in obj.__dict__:
_courses_to_xml_helper(doc, element, obj.__dict__[x], x)
def courses_to_xml(courses):
'''Takes a list of Course objects and returns an XML string.'''
import xml.dom.minidom as xml
doc = xml.Document()
_courses_to_xml_helper(doc, doc, courses, 'courses')
return doc.toxml()
################################################################################
# json output
################################################################################
def _courses_to_json_helper(obj):
if isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, basestring):
return obj
elif isinstance(obj, list):
return [_courses_to_json_helper(x) for x in obj]
else:
return dict((x, _courses_to_json_helper(obj.__dict__[x])) for x in obj.__dict__)
def courses_to_json(courses):
'''Takes a list of Course objects and returns a JSON string.'''
import json
return json.dumps(_courses_to_json_helper(courses))
################################################################################
# downloading
################################################################################
CACHE_DIR = '.cache'
BASE_URL = 'https://selfservice.brown.edu'
SCHEDULE_MAIN_URL = BASE_URL + '/ss/bwckschd.p_disp_dyn_sched'
SCHEDULE_DETAIL_URL = '/ss/bwckschd.p_disp_detail_sched'
SCHEDULE_LINK_REGEX = r'^/ss/bwckschd\.p_disp_detail_sched'
SCHEDULE_DATA_PATH = CACHE_DIR + '/%s/schedule/'
CATALOG_MAIN_URL = BASE_URL + '/ss/bwckctlg.p_disp_dyn_ctlg'
CATALOG_DETAIL_URL = '/ss/bwckctlg.p_display_courses'
CATALOG_LINK_REGEX = r'^/ss/bwckctlg\.p_disp_course_detail'
CATALOG_DATA_PATH = CACHE_DIR + '/%s/catalog/'
EXAM_LINK_REGEX = r'.*Display_Exam'
EXAM_DATA_PATH = CACHE_DIR + '/%s/exam times/'
BAD_EXAM_INFO = 'Only the Primary Meeting of a course has scheduled exam information'
def _save(path, data):
'''Saves data in the given path after creating directories as needed.'''
try:
os.makedirs(path[:path.rfind('/')])
except OSError:
pass
open(path, 'w').write(data)
def _download_semester_helper(semester, start_url, path_template):
# open the main schedule page
b = mechanize.Browser()
b.set_handle_robots(False)
b.open(start_url)
# select the <option> that starts with the text in semester variable
b.select_form(nr=0)
found = False
for item in b.find_control(type='select').items:
if item.get_labels()[0].text.startswith(semester):
item.selected = True
found = True
break
if not found:
print 'error: could not find semester "%s" on page %s' % (semester, start_url)
import sys
sys.exit()
b.submit()
# get the list of department codes
b.select_form(nr=0)
department_codes = map(str, b.find_control(type='select', nr=0).items)
# download each department schedule
for i, department_code in enumerate(department_codes):
b.select_form(nr=0)
b.find_control(type='select', nr=0).get(department_code).selected = True
b.submit()
html = b.response().read()
_save((path_template % semester) + department_code + '.html', html)
b.back()
print 'downloaded department %s, %.2f%% done' % (department_code,
100.0 * (i + 1) / len(department_codes))
def _download_exam_times(semester):
directory = SCHEDULE_DATA_PATH % semester
filenames = os.listdir(directory)
for i, filename in enumerate(filenames):
if not filename.endswith('.html'):
continue
data = open(directory + filename).read()
soup = BeautifulSoup(data)
for link in soup.findAll(href=re.compile(SCHEDULE_LINK_REGEX)):
title, crn, name, index = link.text.rsplit('-', 3)
href = BASE_URL + link['href']
b = mechanize.Browser()
b.set_handle_robots(False)
b.open(href)
name = name.strip()
for link in b.links(url_regex=re.compile(EXAM_LINK_REGEX)):
b.follow_link(link)
html = b.response().read()
b.back()
if 'Exam Date' in html and 'Exam Time' in html:
_save((EXAM_DATA_PATH % semester) + name + '.html', html)
print 'saved exam time for %s' % name
break
print 'parsed %s, %.2f%% done' % (filename, 100.0 * (i + 1) / len(filenames))
def download_semester(semester_name):
'''
Download the entire semester given by the semester name (example: "Fall 2010")
and store it in the local cache directory.
'''
print 'downloading', semester_name
print 'downloading schedule'
_download_semester_helper(semester_name, SCHEDULE_MAIN_URL, SCHEDULE_DATA_PATH)
print 'downloading catalog'
_download_semester_helper(semester_name, CATALOG_MAIN_URL, CATALOG_DATA_PATH)
print 'downloading exam times'
_download_exam_times(semester_name)
################################################################################
# parsing
################################################################################
# get the text in between the nodes
def _to_str(element):
return ''.join(element.findAll(text=True)).replace(' ', ' ').strip()
# get the text in between the nodes, but also convert <br> to '\n'
def _to_str_br(element):
if not len(element.contents):
return ''
stopNode = element._lastRecursiveChild().next
strings = []
current = element.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current)
elif isinstance(current, Tag) and current.name.lower() == 'br':
strings.append('\n')
current = current.next
return ''.join(strings).replace(' ', ' ').strip()
# normalize whitespace
def _fix(text):
return re.sub(' +', ' ', text.strip())
def _parse_semester_schedule(semester_name):
directory = SCHEDULE_DATA_PATH % semester_name
filenames = os.listdir(directory)
name_to_course = {}
for i, filename in enumerate(filenames):
if not filename.endswith('.html'):
continue
data = open(directory + filename).read()
soup = BeautifulSoup(data)
for link in soup.findAll(href=re.compile(SCHEDULE_LINK_REGEX)):
# <table>
# <tr><th><a>this link</a></th></tr>
# <tr><td>the goods</td></tr>
# </table>
# a -> th -> tr -> tr -> td
element = link.parent.parent.nextSibling.nextSibling
# extract section information from the link
section = Section()
title, crn, name, index = link.text.rsplit('-', 3)
section.crn = int(_fix(crn))
# extract section information from the details
lines = _to_str(element).split('\n')
items = {}
for line in lines:
if ':' in line:
key, value = line.split(':', 1)
items[key] = value.strip()
section.levels = _fix(items.get('Levels', ''))
section.registration_dates = _fix(items.get('Registration Dates', ''))
# special-case crosslist data
xlist_data = _to_str_br(element)
if 'XLIST' in name and 'Associated Term:' in xlist_data:
section.xlist_data = xlist_data[:xlist_data.find('Associated Term:')].replace(' ', ' ').strip()
# extract meetings (xlists don't have tables)
table = element.find('table')
section.meetings = []
if table:
rows = table.findAll('tr')
labels = [_fix(_to_str(cell).lower()).replace(' ', '_') for cell in rows[0].findAll('th')]
for row in rows[1:]:
cells = [_fix(_to_str(cell)) for cell in row.findAll('td')]
meeting_dict = dict(zip(labels, cells))
meeting = Meeting()
meeting.type = meeting_dict['type']
meeting.days = meeting_dict['days']
meeting.time = meeting_dict['time']
meeting.where = meeting_dict['where']
meeting.date_range = meeting_dict['date_range']
meeting.instructors = meeting_dict['instructors']
section.meetings.append(meeting)
# add section to courses, creating a course if necessary
name = _fix(name)
title = _fix(title)
course = name_to_course.setdefault(name, Course())
if course.title and course.title != title:
print 'warning(%s): title "%s" and "%s" differ' % (name, course.title, title)
course.name = name
course.title = title
course.get_semester(semester_name).sections.append(section)
print 'parsed schedule %s, %.2f%% done' % (filename, 100.0 * (i + 1) / len(filenames))
return name_to_course.values()
def _parse_semester_catalog(semester_name):
directory = CATALOG_DATA_PATH % semester_name
filenames = os.listdir(directory)
courses = []
for i, filename in enumerate(filenames):
if not filename.endswith('.html'):
continue
data = open(directory + filename).read()
soup = BeautifulSoup(data)
for link in soup.findAll(href=re.compile(CATALOG_LINK_REGEX)):
# <table>
# <tr><td><a>this link</a></td></tr>
# <tr><td>the goods</td></tr>
# </table>
# a -> td -> tr -> tr -> td
element = link.parent.parent.nextSibling.nextSibling
# extract course information from the link
course = Course()
name, title = link.text.split('-', 1)
course.name = _fix(name)
course.title = _fix(title)
# extract course information from the details
lines = _to_str(element).split('\n')
description = ''
reading_description = True
for line in lines:
line = _fix(line)
if line.endswith('Credit hours') or line.endswith('Lecture hours'):
reading_description = False
elif line.startswith('Course Attributes:'):
course.attributes = _fix(line[line.find(':')+1:])
elif reading_description:
description += line + '\n'
course.description = _fix(description)
courses.append(course)
print 'parsed catalog %s, %.2f%% done' % (filename, 100.0 * (i + 1) / len(filenames))
return courses
def _parse_exam_times(semester_name):
directory = EXAM_DATA_PATH % semester_name
filenames = os.listdir(directory)
courses = []
for i, filename in enumerate(filenames):
if not filename.endswith('.html'):
continue
data = open(directory + filename).read()
exam_time = None
exam_date = None
soup = BeautifulSoup(data)
for element in soup.findAll(text='Exam Date'):
exam_date = element.parent.nextSibling.nextSibling.text
for element in soup.findAll(text='Exam Time'):
exam_time = element.parent.nextSibling.nextSibling.text
if exam_date and exam_time:
course = Course()
course.name = filename.replace('.html', '')
courses.append(course)
semester = course.get_semester(semester_name)
semester.exam_date = exam_date
semester.exam_time = exam_time
print 'parsed exam time %s, %.2f%% done' % (filename, 100.0 * (i + 1) / len(filenames))
return courses
def parse_semester(semester_name):
'''
Parse the entire semester given by the semester name (example: "Fall 2010")
and return a list of Course objects for that semester. Must download the
semester with download_semester() before parsing.
'''
print 'parsing semester', semester_name
schedule_courses = _parse_semester_schedule(semester_name)
catalog_courses = _parse_semester_catalog(semester_name)
exam_time_courses = _parse_exam_times(semester_name)
# make indices for quick access
schedule_index = dict((course.name, course) for course in schedule_courses)
catalog_index = dict((course.name, course) for course in catalog_courses)
exam_time_index = dict((course.name, course) for course in exam_time_courses)
# consistency check
for name in schedule_index:
if name not in catalog_index:
print 'warning(%s): course in schedule but not in catalog' % name
for name in exam_time_index:
if name not in catalog_index:
print 'warning(%s): course in exam times but not in catalog' % name
# merge the courses
courses = []
for name in catalog_index:
course = catalog_index[name]
courses.append(course)
# merge with schedule
if name in schedule_index:
schedule_course = schedule_index[name]
course.semesters = schedule_course.semesters
if course.title != schedule_course.title:
print 'warning(%s): title mismatch between catalog "%s" and schedule "%s", keeping catalog title' % \
(name, course.title, schedule_course.title)
# merge with exam times
if name in exam_time_index:
exam_time_semester = exam_time_index[name].get_semester(semester_name)
semester = course.get_semester(semester_name)
semester.exam_time = exam_time_semester.exam_time
semester.exam_date = exam_time_semester.exam_date
return courses
################################################################################
# merging
################################################################################
def merge_courses(old_courses, new_courses):
'''
Merge courses with the same name in old_courses and new_courses, returns
the list of merged courses.
'''
courses_index = {}
old_courses_index = dict((course.name, course) for course in old_courses)
new_courses_index = dict((course.name, course) for course in new_courses)
courses_index = old_courses_index
for name in new_courses_index:
new_course = new_courses_index[name]
if name not in courses_index:
courses_index[name] = new_course
else:
old_course = courses_index[name]
old_course.semesters.extend(new_course.semesters)
if old_course.title != new_course.title:
print 'warning(%s): title "%s" differs from title "%s", using more recent one' % \
(name, old_course.title, new_course.title)
if old_course.attributes != new_course.attributes:
print 'warning(%s): attributes "%s" differ from attributes "%s", using more recent one' % \
(name, old_course.attributes, new_course.attributes)
if old_course.description != new_course.description:
print 'warning(%s): description "%s" differs from description "%s", using more recent one' % \
(name, old_course.description, new_course.description)
# for conflicts, use more recent info (assuming old_course is older than new_course)
old_course.title = new_course.title
old_course.attributes = new_course.attributes
old_course.title = new_course.title
return courses_index.values()
################################################################################
# unit tests
################################################################################
import unittest
class _Tester(unittest.TestCase):
def test_semester_cmp(self):
a, b = 'Spring 2009', 'Spring 2010'
self.assertTrue(compare_semesters(a, b) < 0 and compare_semesters(b, a) > 0)
a, b = 'Spring 2010', 'Summer 2010'
self.assertTrue(compare_semesters(a, b) < 0 and compare_semesters(b, a) > 0)
a, b = 'Summer 2010', 'Fall 2010'
self.assertTrue(compare_semesters(a, b) < 0 and compare_semesters(b, a) > 0)
a, b = 'Fall 2010', 'Winter 2010'
self.assertTrue(compare_semesters(a, b) < 0 and compare_semesters(b, a) > 0)
a, b = 'Spring 2010', 'Winter 2010'
self.assertTrue(compare_semesters(a, b) < 0 and compare_semesters(b, a) > 0)
a, b = 'Spring 2010', 'Spring 2010'
self.assertTrue(compare_semesters(a, b) == 0)
if __name__ == '__main__':
import sys
if 'test' in sys.argv:
sys.argv.remove('test')
unittest.main()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import fixtures
import microversion_parse
import os
from unittest import mock
from oslo_utils.fixture import uuidsentinel
from oslotest import base
from nova.compute import provider_config
from nova import exception as nova_exc
class SchemaValidationMixin(base.BaseTestCase):
"""This class provides the basic methods for running schema validation test
cases. It can be used along with ddt.file_data to test a specific schema
version using tests defined in yaml files. See SchemaValidationTestCasesV1
for an example of how this was done for schema version 1.
Because decorators can only access class properties of the class they are
defined in (even when overriding values in the subclass), the decorators
need to be placed in the subclass. This is why there are test_ functions in
the subclass that call the run_test_ methods in this class. This should
keep things simple as more schema versions are added.
"""
def setUp(self):
super(SchemaValidationMixin, self).setUp()
self.mock_load_yaml = self.useFixture(
fixtures.MockPatchObject(
provider_config, '_load_yaml_file')).mock
self.mock_LOG = self.useFixture(
fixtures.MockPatchObject(
provider_config, 'LOG')).mock
def set_config(self, config=None):
data = config or {}
self.mock_load_yaml.return_value = data
return data
def run_test_validation_errors(self, config, expected_messages):
self.set_config(config=config)
actual_msg = self.assertRaises(
nova_exc.ProviderConfigException,
provider_config._parse_provider_yaml, 'test_path').message
for msg in expected_messages:
self.assertIn(msg, actual_msg)
def run_test_validation_success(self, config):
reference = self.set_config(config=config)
actual = provider_config._parse_provider_yaml('test_path')
self.assertEqual(reference, actual)
def run_schema_version_matching(
self, min_schema_version, max_schema_version):
# note _load_yaml_file is mocked so the value is not important
# however it may appear in logs messages so changing it could
# result in tests failing unless the expected_messages field
# is updated in the test data.
path = 'test_path'
# test exactly min and max versions are supported
self.set_config(config={
'meta': {'schema_version': str(min_schema_version)}})
provider_config._parse_provider_yaml(path)
self.set_config(config={
'meta': {'schema_version': str(max_schema_version)}})
provider_config._parse_provider_yaml(path)
self.mock_LOG.warning.assert_not_called()
# test max major+1 raises
higher_major = microversion_parse.Version(
major=max_schema_version.major + 1, minor=max_schema_version.minor)
self.set_config(config={'meta': {'schema_version': str(higher_major)}})
self.assertRaises(nova_exc.ProviderConfigException,
provider_config._parse_provider_yaml, path)
# test max major with max minor+1 is logged
higher_minor = microversion_parse.Version(
major=max_schema_version.major, minor=max_schema_version.minor + 1)
expected_log_call = (
"Provider config file [%(path)s] is at schema version "
"%(schema_version)s. Nova supports the major version, but "
"not the minor. Some fields may be ignored." % {
"path": path, "schema_version": higher_minor})
self.set_config(config={'meta': {'schema_version': str(higher_minor)}})
provider_config._parse_provider_yaml(path)
self.mock_LOG.warning.assert_called_once_with(expected_log_call)
@ddt.ddt
class SchemaValidationTestCasesV1(SchemaValidationMixin):
MIN_SCHEMA_VERSION = microversion_parse.Version(1, 0)
MAX_SCHEMA_VERSION = microversion_parse.Version(1, 0)
@ddt.unpack
@ddt.file_data('provider_config_data/v1/validation_error_test_data.yaml')
def test_validation_errors(self, config, expected_messages):
self.run_test_validation_errors(config, expected_messages)
@ddt.unpack
@ddt.file_data('provider_config_data/v1/validation_success_test_data.yaml')
def test_validation_success(self, config):
self.run_test_validation_success(config)
def test_schema_version_matching(self):
self.run_schema_version_matching(self.MIN_SCHEMA_VERSION,
self.MAX_SCHEMA_VERSION)
@ddt.ddt
class ValidateProviderConfigTestCases(base.BaseTestCase):
@ddt.unpack
@ddt.file_data('provider_config_data/validate_provider_good_config.yaml')
def test__validate_provider_good_config(self, sample):
provider_config._validate_provider_config(sample, "fake_path")
@ddt.unpack
@ddt.file_data('provider_config_data/validate_provider_bad_config.yaml')
def test__validate_provider_bad_config(self, sample, expected_messages):
actual_msg = self.assertRaises(
nova_exc.ProviderConfigException,
provider_config._validate_provider_config,
sample, 'fake_path').message
self.assertIn(actual_msg, expected_messages)
@mock.patch.object(provider_config, 'LOG')
def test__validate_provider_config_one_noop_provider(self, mock_log):
expected = {
"providers": [
{
"identification": {"name": "NAME1"},
"inventories": {
"additional": [
{"CUSTOM_RESOURCE_CLASS": {}}
]
}
},
{
"identification": {"name": "NAME_453764"},
"inventories": {
"additional": []
},
"traits": {
"additional": []
}
}
]
}
data = copy.deepcopy(expected)
valid = provider_config._validate_provider_config(data, "fake_path")
mock_log.warning.assert_called_once_with(
"Provider NAME_453764 defined in "
"fake_path has no additional "
"inventories or traits and will be ignored."
)
# assert that _validate_provider_config does not mutate inputs
self.assertEqual(expected, data)
# assert that the first entry in the returned tuple is the full set
# of providers not a copy and is equal to the expected providers.
self.assertIs(data['providers'][0], valid[0])
self.assertEqual(expected['providers'][0], valid[0])
class GetProviderConfigsTestCases(base.BaseTestCase):
@mock.patch.object(provider_config, 'glob')
def test_get_provider_configs_one_file(self, mock_glob):
expected = {
"$COMPUTE_NODE": {
"__source_file": "example_provider.yaml",
"identification": {
"name": "$COMPUTE_NODE"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1.0
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT_ONE",
"CUSTOM_TRAIT2"
]
}
}
}
example_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'provider_config_data/v1/example_provider.yaml')
mock_glob.glob.return_value = [example_file]
actual = provider_config.get_provider_configs('path')
self.assertEqual(expected, actual)
mock_glob.glob.assert_called_with('path/*.yaml')
@mock.patch.object(provider_config, 'glob')
@mock.patch.object(provider_config, '_parse_provider_yaml')
def test_get_provider_configs_one_file_uuid_conflict(
self, mock_parser, mock_glob):
# one config file with conflicting identification
providers = [
{"__source_file": "file1.yaml",
"identification": {
"uuid": uuidsentinel.uuid1
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
},
{"__source_file": "file1.yaml",
"identification": {
"uuid": uuidsentinel.uuid1
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS2": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT2"
]
}
}
]
mock_parser.side_effect = [{"providers": providers}]
mock_glob.glob.return_value = ['file1.yaml']
# test that correct error is raised and message matches
error = self.assertRaises(nova_exc.ProviderConfigException,
provider_config.get_provider_configs,
'dummy_path').kwargs['error']
self.assertEqual("Provider %s has multiple definitions in source "
"file(s): ['file1.yaml']." % uuidsentinel.uuid1,
error)
@mock.patch.object(provider_config, 'glob')
@mock.patch.object(provider_config, '_parse_provider_yaml')
def test_get_provider_configs_two_files(self, mock_parser, mock_glob):
expected = {
"EXAMPLE_RESOURCE_PROVIDER1": {
"__source_file": "file1.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER1"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
},
"EXAMPLE_RESOURCE_PROVIDER2": {
"__source_file": "file2.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER2"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS2": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT2"
]
}
}
}
mock_parser.side_effect = [
{"providers": [provider]} for provider in expected.values()]
mock_glob_return = ['file1.yaml', 'file2.yaml']
mock_glob.glob.return_value = mock_glob_return
dummy_path = 'dummy_path'
actual = provider_config.get_provider_configs(dummy_path)
mock_glob.glob.assert_called_once_with(os.path.join(dummy_path,
'*.yaml'))
mock_parser.assert_has_calls([mock.call(param)
for param in mock_glob_return])
self.assertEqual(expected, actual)
@mock.patch.object(provider_config, 'glob')
@mock.patch.object(provider_config, '_parse_provider_yaml')
def test_get_provider_configs_two_files_name_conflict(self, mock_parser,
mock_glob):
# two config files with conflicting identification
configs = {
"EXAMPLE_RESOURCE_PROVIDER1": {
"__source_file": "file1.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER1"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
},
"EXAMPLE_RESOURCE_PROVIDER2": {
"__source_file": "file2.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER1"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
}
}
mock_parser.side_effect = [{"providers": [configs[provider]]}
for provider in configs]
mock_glob.glob.return_value = ['file1.yaml', 'file2.yaml']
# test that correct error is raised and message matches
error = self.assertRaises(nova_exc.ProviderConfigException,
provider_config.get_provider_configs,
'dummy_path').kwargs['error']
self.assertEqual("Provider EXAMPLE_RESOURCE_PROVIDER1 has multiple "
"definitions in source file(s): "
"['file1.yaml', 'file2.yaml'].", error)
@mock.patch.object(provider_config, 'LOG')
def test_get_provider_configs_no_configs(self, mock_log):
path = "invalid_path!@#"
actual = provider_config.get_provider_configs(path)
self.assertEqual({}, actual)
mock_log.info.assert_called_once_with(
"No provider configs found in %s. If files are present, "
"ensure the Nova process has access.", path)
| |
import ast
import inspect
import imp
import numpy as np
import os
import re
from pyspark.mllib.feature import Word2Vec
# =============================================================================
# Helper Functions
#
# These helper functions will be used in Py2VecModel's __get_file_docstrings()
#
# These helper functions extract the docstring of each:
# * function definition
# * class definition
# * class's function definition
# * import library
# * from ... import ... library
#
# =============================================================================
def getModule(parent_module_name, this_module_name):
# this implementation only works on python 2.7
parent_module = __import__(parent_module_name, globals(), locals(), this_module_name)
if this_module_name is None:
return parent_module
else:
this_module = getattr(parent_module_name, this_module_name)
return this_module
"""
import importlib
def getModule(parent_module_name, this_module_name):
# this implementation only works on python 3
parent_module_name = importlib.import_module(parent_module_name)
if this_module_name is None:
return parent_module
else:
this_module = getattr(parent_module_name, this_module_name)
return this_module
"""
def __get_repo_docstring(repo_name):
try:
repo_module = getModule(repo_name, None)
docstring = inspect.getdoc(repo_module)
return docstring
except Exception:
return ""
def __get_file_docstring(file_path):
# this function does not grab the docstring of intermediary modules
# ie. grandparent_module.parent_module.child_module.granchild_module
# this function will only grab the docstring of child_module and grandchild_module
# but not grandparent_module or parent_module
try:
parent_module_name = os.path.dirname(file_path).replace("/", ".")
this_module_name = os.path.splitext(os.path.basename(file_path))[0]
docstring = inspect.getdoc(getModule(str(parent_module_name), None))
docstring += inspect.getdoc(getModule(str(parent_module_name), str(this_module_name)))
return docstring
except Exception:
return ""
def __get_import_docstring(ast_module):
# this function does not grab the docstring of
# import libraries within the same project
try:
# get import library docstring
import_definitions = [node for node in ast_module.body if isinstance(node, ast.Import)]
docstring = ""
for import_definition in import_definitions:
import_alias = import_definition.names[0]
import_module_name = import_alias.name
import_module = getModule(import_module_name, None)
docstring += inspect.getdoc(import_module)
return docstring
except Exception:
return ""
def __get_import_from_docstring(ast_module):
# this function does not grab the docstring of
# import libraries within the same project
try:
# get import library docstring
import_definitions = [node for node in ast_module.body if isinstance(node, ast.ImportFrom)]
docstring = ""
for import_definition in import_definitions:
import_alias = import_definition.names[0]
import_module_name = import_alias.name
import_module = getModule(import_module_name, None)
tmp_docstring = inspect.getdoc(import_module)
if tmp_docstring is not None:
docstring += tmp_docstring
return docstring
except Exception:
return ""
def __get_function_docstring(ast_module):
# this function grabs all functions' docstrings in a file
try:
function_definitions = [node for node in ast_module.body if isinstance(node, ast.FunctionDef)]
docstring = ""
for function_definition in function_definitions:
#function_name = function_definition.name
function_docstring = ast.get_docstring(function_definition)
if function_docstring is not None:
docstring += function_docstring
return docstring
except Exception:
return ""
def __get_class_docstring(ast_module):
# this function grab all classes' docstrings in a file
# as well as each class's functions' docstrings
try:
class_definitions = [node for node in ast_module.body if isinstance(node, ast.ClassDef)]
docstring = ""
for class_definition in class_definitions:
#class_name = class_definition.name
class_docstring = ast.get_docstring(class_definition)
if class_docstring is not None:
docstring += class_docstring
# add the class's functions' docstrings too!
docstring += __get_class_function_docstring(class_definition.body)
return docstring
except Exception:
return ""
def __get_class_function_docstring(function_definitions):
# this function grabs the class's functions' docsstrings
# TODO: integrate this with __get_function_docstring
try:
docstring = ""
for function_definition in function_definitions:
if isinstance(function_definition, ast.FunctionDef):
#function_name = function_definition.name
function_docstring = ast.get_docstring(function_definition)
if function_docstring is not None:
docstring += function_docstring
return docstring
except Exception:
return ""
def get_docstring(((repo_name, file_path), file_lines)):
# returns [((repo_name, file_path), file_docstrings)]
docstring = ""
docstring = __get_repo_docstring(repo_name)
docstring += __get_file_docstring(file_path)
try:
# get ast's module from file's lines
ast_module = ast.parse(file_lines)
except Exception:
pass
else:
docstring += __get_import_docstring(ast_module)
docstring += __get_import_from_docstring(ast_module)
docstring += __get_function_docstring(ast_module)
docstring += __get_class_docstring(ast_module)
return ((repo_name, file_path), docstring)
# =============================================================================
# Py2VecModel
# takes in the dataframe of the JSON file and the setting of Word2Vec
# =============================================================================
class Py2VecModel(object):
def __init__(self, gitdf, word2vec_setting=[20, 41, 0.025, 50]):
# gitdf is a dataframe of your data's JSON file
self.gitdf = gitdf
self.word2vec = self.__get_word2vec(word2vec_setting)
def __get_word2vec(self, word2vec_setting):
min_count, seed, learning_rate, vector_size = word2vec_setting
word2vec = Word2Vec()
# Word2Vec's default min count is 100; our default min count is 20.
word2vec.setMinCount(min_count)
word2vec.setSeed(seed)
# Word2Vec's default learning rate is 0.025; our default min count is also 0.025.
word2vec.setLearningRate(learning_rate)
# Word2Vec's default vector size is 100; our default vector size is 50.
word2vec.setVectorSize(vector_size)
return word2vec
def get_model(self):
raise NotImplemented
def get_model_dict(self):
raise NotImplemented
# =============================================================================
# Py2VecDocstringModel
# You want to pass in get_model_dict() from this object to git_vectorize()
# as a support file.
#
# How to use it:
# 1. py2vecDocstringModel = Py2VecDocstringModel(gitdf)
# or
# py2vecDocstringModel = Py2VecDocstringModel(gitdf, [20, 41, 0.025, 50])
# 2. model_dict = py2vecDocstringModel.get_model_dict()
# 3. vectorizer = git_vectorize(gitdf, None, "py2vec", sc, model=model_dict)
# 4. content_vector = vectorizer.get_content_vector()
#
# =============================================================================
class Py2VecDocstringModel(Py2VecModel):
def get_model(self):
wordstrings = self.__get_each_word_in_docstrings()
return self.word2vec.fit(wordstrings)
def get_model_dict(self):
model = self.get_model()
return {k:np.array(list(v)) for k,v in dict(model.getVectors()).iteritems()}
def __get_code_lines(self):
"""
extract all lines in files
output: code_lines == [((reponame, filename), (line_num, line))]
"""
code_lines = self.gitdf.map(
lambda (
author,
author_mail,
author_time,
author_timezone,
comment,
commit_id,
committer,
committer_mail,
committer_time,
committer_timezone,
filename,
line,
line_num,
reponame,
):
((reponame, filename), (line_num, line))
).cache()
return code_lines
def __get_file_lines(self):
"""
append each file's code lines
output: file_lines == [((reponame, filename), filelines)]
"""
# code_lines == [((reponame, filename), (line_num, line))]
code_lines = self.__get_code_lines()
# file_lines == [((reponame, filename), [(line_num_1, line_1), ..., (line_num_n, line_n)])]
file_lines = code_lines.mapValues(lambda val:[val]).reduceByKey(lambda a, b: a + b)
# sort each file's code lines by line number and extract only the lines from a file
def sortByLineNumberAndExtractLines(listOfCodeLines):
sortedListOfCodeLines = sorted(listOfCodeLines, key=lambda (line_num, line): line_num)
fileLines = ""
for lineNum, line in sortedListOfCodeLines:
fileLines += line + "\n"
return fileLines
# file_lines == [((reponame, filename), filelines)]
file_lines = file_lines.mapValues(lambda listOfCodeLines: sortByLineNumberAndExtractLines(listOfCodeLines))
return file_lines
def __get_file_docstrings(self):
"""
get docstring of each file
output: file_docstrings == [((reponame, filepath), docstring)]
"""
# file_lines == [((reponame, filename), filelines)]
file_lines = self.__get_file_lines()
file_docstrings = file_lines.map(
lambda ((repo_name, file_path), file_lines): get_docstring(((repo_name, file_path), file_lines))
)
return file_docstrings
def __get_docstrings(self):
"""
get specifically just the docstring of all files
output: docstrings == [docstring]
"""
file_docstrings = self.__get_file_docstrings()
docstrings = file_docstrings.map(lambda ((repo_name, file_path), docstring): docstring)
return docstrings
def __get_each_word_in_docstrings(self):
"""
get each word from the docstring of all files
output: wordstrings == [[word1, word2, ..., wordn]]
"""
docstrings = self.__get_docstrings()
wordstrings = docstrings.map(lambda docstring: re.sub("[^\w]", " ", docstring).split())
return wordstrings
| |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_sflow
version_added: "2.4"
short_description: Manages sFlow configuration on HUAWEI CloudEngine switches.
description:
- Configure Sampled Flow (sFlow) to monitor traffic on an interface in real time,
detect abnormal traffic, and locate the source of attack traffic,
ensuring stable running of the network.
author: QijunPan (@QijunPan)
options:
agent_ip:
description:
- Specifies the IPv4/IPv6 address of a sFlow agent.
source_ip:
description:
- Specifies the source IPv4/IPv6 address of sFlow packets.
collector_id:
description:
- Specifies the ID of a sFlow collector. This ID is used when you specify
the collector in subsequent sFlow configuration.
choices: ['1', '2']
collector_ip:
description:
- Specifies the IPv4/IPv6 address of the sFlow collector.
collector_ip_vpn:
description:
- Specifies the name of a VPN instance.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
collector_datagram_size:
description:
- Specifies the maximum length of sFlow packets sent from an sFlow agent to an sFlow collector.
The value is an integer, in bytes. It ranges from 1024 to 8100. The default value is 1400.
collector_udp_port:
description:
- Specifies the UDP destination port number of sFlow packets.
The value is an integer that ranges from 1 to 65535. The default value is 6343.
collector_meth:
description:
- Configures the device to send sFlow packets through service interfaces,
enhancing the sFlow packet forwarding capability.
The enhanced parameter is optional. No matter whether you configure the enhanced mode,
the switch determines to send sFlow packets through service cards or management port
based on the routing information on the collector.
When the value is meth, the device forwards sFlow packets at the control plane.
When the value is enhanced, the device forwards sFlow packets at the forwarding plane to
enhance the sFlow packet forwarding capacity.
choices: ['meth', 'enhanced']
collector_description:
description:
- Specifies the description of a sFlow collector.
The value is a string of 1 to 255 case-sensitive characters without spaces.
sflow_interface:
description:
- Full name of interface for Flow Sampling or Counter.
It must be a physical interface, Eth-Trunk, or Layer 2 subinterface.
sample_collector:
description:
- Indicates the ID list of the collector.
sample_rate:
description:
- Specifies the flow sampling rate in the format 1/rate.
The value is an integer and ranges from 1 to 4294967295. The default value is 8192.
sample_length:
description:
- Specifies the maximum length of sampled packets.
The value is an integer and ranges from 18 to 512, in bytes. The default value is 128.
sample_direction:
description:
- Enables flow sampling in the inbound or outbound direction.
choices: ['inbound', 'outbound', 'both']
counter_collector:
description:
- Indicates the ID list of the counter collector.
counter_interval:
description:
- Indicates the counter sampling interval.
The value is an integer that ranges from 10 to 4294967295, in seconds. The default value is 20.
export_route:
description:
- Configures the sFlow packets sent by the switch not to carry routing information.
choices: ['enable', 'disable']
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
---
- name: sflow module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring sFlow Agent
ce_sflow:
agent_ip: 6.6.6.6
provider: '{{ cli }}'
- name: Configuring sFlow Collector
ce_sflow:
collector_id: 1
collector_ip: 7.7.7.7
collector_ip_vpn: vpn1
collector_description: Collector1
provider: '{{ cli }}'
- name: Configure flow sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
sample_collector: 1
sample_direction: inbound
provider: '{{ cli }}'
- name: Configure counter sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
counter_collector: 1
counter_interval: 1000
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"agent_ip": "6.6.6.6", "state": "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"agent": {}}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"agent": {"family": "ipv4", "ipv4Addr": "1.2.3.4", "ipv6Addr": null}}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["sflow agent ip 6.6.6.6"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr, to_string
CE_NC_GET_SFLOW = """
<filter type="subtree">
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<sources>
<source>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</source>
</sources>
<agents>
<agent>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</agent>
</agents>
<collectors>
<collector>
<collectorID></collectorID>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
<vrfName></vrfName>
<datagramSize></datagramSize>
<port></port>
<description></description>
<meth></meth>
</collector>
</collectors>
<samplings>
<sampling>
<ifName>%s</ifName>
<collectorID></collectorID>
<direction></direction>
<length></length>
<rate></rate>
</sampling>
</samplings>
<counters>
<counter>
<ifName>%s</ifName>
<collectorID></collectorID>
<interval></interval>
</counter>
</counters>
<exports>
<export>
<ExportRoute></ExportRoute>
</export>
</exports>
</sflow>
</filter>
"""
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_ip_vpn(vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
return False
if len(vpname) < 1 or len(vpname) > 31:
return False
return True
def get_ip_version(address):
"""get ip version fast"""
if not address:
return None
if address.count(':') >= 2 and address.count(":") <= 7:
return "ipv6"
elif address.count('.') == 3:
return "ipv4"
else:
return None
def get_interface_type(interface):
"""get the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Sflow(object):
"""Manages sFlow"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.agent_ip = self.module.params['agent_ip']
self.agent_version = None
self.source_ip = self.module.params['source_ip']
self.source_version = None
self.export_route = self.module.params['export_route']
self.collector_id = self.module.params['collector_id']
self.collector_ip = self.module.params['collector_ip']
self.collector_version = None
self.collector_ip_vpn = self.module.params['collector_ip_vpn']
self.collector_datagram_size = self.module.params['collector_datagram_size']
self.collector_udp_port = self.module.params['collector_udp_port']
self.collector_meth = self.module.params['collector_meth']
self.collector_description = self.module.params['collector_description']
self.sflow_interface = self.module.params['sflow_interface']
self.sample_collector = self.module.params['sample_collector'] or list()
self.sample_rate = self.module.params['sample_rate']
self.sample_length = self.module.params['sample_length']
self.sample_direction = self.module.params['sample_direction']
self.counter_collector = self.module.params['counter_collector'] or list()
self.counter_interval = self.module.params['counter_interval']
self.state = self.module.params['state']
# state
self.config = "" # current config
self.sflow_dict = dict()
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""init module"""
required_together = [("collector_id", "collector_ip")]
self.module = AnsibleModule(
argument_spec=self.spec, required_together=required_together, supports_check_mode=True)
def check_response(self, con_obj, xml_name):
"""Check if response message is already succeed"""
xml_str = con_obj.xml
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def netconf_set_config(self, xml_str, xml_name):
"""netconf set config"""
rcv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in rcv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def netconf_get_config(self, xml_str):
"""netconf set config"""
if xml_str is not None:
return get_nc_config(self.module, xml_str)
def get_sflow_dict(self):
""" sflow config dict"""
sflow_dict = dict(source=list(), agent=dict(), collector=list(),
sampling=dict(), counter=dict(), export=dict())
conf_str = CE_NC_GET_SFLOW % (
self.sflow_interface, self.sflow_interface)
if not self.collector_meth:
conf_str = conf_str.replace("<meth></meth>", "")
rcv_xml = self.netconf_get_config(conf_str)
if "<data/>" in rcv_xml:
return sflow_dict
xml_str = rcv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get source info
srcs = root.findall("sflow/sources/source")
if srcs:
for src in srcs:
attrs = dict()
for attr in src:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
attrs[attr.tag] = attr.text
sflow_dict["source"].append(attrs)
# get agent info
agent = root.find("sflow/agents/agent")
if agent:
for attr in agent:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
sflow_dict["agent"][attr.tag] = attr.text
# get collector info
collectors = root.findall("sflow/collectors/collector")
if collectors:
for collector in collectors:
attrs = dict()
for attr in collector:
if attr.tag in ["collectorID", "family", "ipv4Addr", "ipv6Addr",
"vrfName", "datagramSize", "port", "description", "meth"]:
attrs[attr.tag] = attr.text
sflow_dict["collector"].append(attrs)
# get sampling info
sample = root.find("sflow/samplings/sampling")
if sample:
for attr in sample:
if attr.tag in ["ifName", "collectorID", "direction", "length", "rate"]:
sflow_dict["sampling"][attr.tag] = attr.text
# get counter info
counter = root.find("sflow/counters/counter")
if counter:
for attr in counter:
if attr.tag in ["ifName", "collectorID", "interval"]:
sflow_dict["counter"][attr.tag] = attr.text
# get export info
export = root.find("sflow/exports/export")
if export:
for attr in export:
if attr.tag == "ExportRoute":
sflow_dict["export"][attr.tag] = attr.text
return sflow_dict
def config_agent(self):
"""configures sFlow agent"""
xml_str = ''
if not self.agent_ip:
return xml_str
self.agent_version = get_ip_version(self.agent_ip)
if not self.agent_version:
self.module.fail_json(msg="Error: agent_ip is invalid.")
if self.state == "present":
if self.agent_ip != self.sflow_dict["agent"].get("ipv4Addr") \
and self.agent_ip != self.sflow_dict["agent"].get("ipv6Addr"):
xml_str += '<agents><agent operation="merge">'
xml_str += '<family>%s</family>' % self.agent_version
if self.agent_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ip %s" % self.agent_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ipv6 %s" % self.agent_ip)
xml_str += '</agent></agents>'
else:
flag = False
if self.agent_ip == self.sflow_dict["agent"].get("ipv4Addr"):
self.updates_cmd.append("undo sflow agent ip %s" % self.agent_ip)
flag = True
elif self.agent_ip == self.sflow_dict["agent"].get("ipv6Addr"):
self.updates_cmd.append("undo sflow agent ipv6 %s" % self.agent_ip)
flag = True
if flag is True:
xml_str += '<agents><agent operation="delete"></agent></agents>'
return xml_str
def config_source(self):
"""configures the source IP address for sFlow packets"""
xml_str = ''
if not self.source_ip:
return xml_str
self.source_version = get_ip_version(self.source_ip)
if not self.source_version:
self.module.fail_json(msg="Error: source_ip is invalid.")
src_dict = dict()
for src in self.sflow_dict["source"]:
if src.get("family") == self.source_version:
src_dict = src
break
if self.state == "present":
if self.source_ip != src_dict.get("ipv4Addr") \
and self.source_ip != src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="merge">'
xml_str += '<family>%s</family>' % self.source_version
if self.source_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.source_ip
self.updates_cmd.append("sflow source ip %s" % self.source_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.source_ip
self.updates_cmd.append(
"sflow source ipv6 %s" % self.source_ip)
xml_str += '</source ></sources>'
else:
if self.source_ip == src_dict.get("ipv4Addr"):
xml_str += '<sources><source operation="delete"><family>ipv4</family></source ></sources>'
self.updates_cmd.append("undo sflow source ip %s" % self.source_ip)
elif self.source_ip == src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="delete"><family>ipv6</family></source ></sources>'
self.updates_cmd.append("undo sflow source ipv6 %s" % self.source_ip)
return xml_str
def config_collector(self):
"""creates an sFlow collector and sets or modifies optional parameters for the sFlow collector"""
xml_str = ''
if not self.collector_id:
return xml_str
if self.state == "present" and not self.collector_ip:
return xml_str
if self.collector_ip:
self.collector_version = get_ip_version(self.collector_ip)
if not self.collector_version:
self.module.fail_json(msg="Error: collector_ip is invalid.")
# get collector dict
exist_dict = dict()
for collector in self.sflow_dict["collector"]:
if collector.get("collectorID") == self.collector_id:
exist_dict = collector
break
change = False
if self.state == "present":
if not exist_dict:
change = True
elif self.collector_version != exist_dict.get("family"):
change = True
elif self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
change = True
elif self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
change = True
elif self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
change = True
elif not self.collector_ip_vpn and exist_dict.get("vrfName") != "_public_":
change = True
elif self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
change = True
elif not self.collector_udp_port and exist_dict.get("port") != "6343":
change = True
elif self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
change = True
elif not self.collector_datagram_size and exist_dict.get("datagramSize") != "1400":
change = True
elif self.collector_meth and self.collector_meth != exist_dict.get("meth"):
change = True
elif not self.collector_meth and exist_dict.get("meth") and exist_dict.get("meth") != "meth":
change = True
elif self.collector_description and self.collector_description != exist_dict.get("description"):
change = True
elif not self.collector_description and exist_dict.get("description"):
change = True
else:
pass
else: # absent
# collector not exist
if not exist_dict:
return xml_str
if self.collector_version and self.collector_version != exist_dict.get("family"):
return xml_str
if self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
return xml_str
if self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
return xml_str
if self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
return xml_str
if self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
return xml_str
if self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
return xml_str
if self.collector_meth and self.collector_meth != exist_dict.get("meth"):
return xml_str
if self.collector_description and self.collector_description != exist_dict.get("description"):
return xml_str
change = True
if not change:
return xml_str
# update or delete
if self.state == "absent":
xml_str += '<collectors><collector operation="delete"><collectorID>%s</collectorID>' % self.collector_id
self.updates_cmd.append("undo sflow collector %s" % self.collector_id)
else:
xml_str += '<collectors><collector operation="merge"><collectorID>%s</collectorID>' % self.collector_id
cmd = "sflow collector %s" % self.collector_id
xml_str += '<family>%s</family>' % self.collector_version
if self.collector_version == "ipv4":
cmd += " ip %s" % self.collector_ip
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.collector_ip
else:
cmd += " ipv6 %s" % self.collector_ip
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.collector_ip
if self.collector_ip_vpn:
cmd += " vpn-instance %s" % self.collector_ip_vpn
xml_str += '<vrfName>%s</vrfName>' % self.collector_ip_vpn
if self.collector_datagram_size:
cmd += " length %s" % self.collector_datagram_size
xml_str += '<datagramSize>%s</datagramSize>' % self.collector_datagram_size
if self.collector_udp_port:
cmd += " udp-port %s" % self.collector_udp_port
xml_str += '<port>%s</port>' % self.collector_udp_port
if self.collector_description:
cmd += " description %s" % self.collector_description
xml_str += '<description>%s</description>' % self.collector_description
else:
xml_str += '<description></description>'
if self.collector_meth:
if self.collector_meth == "enhanced":
cmd += " enhanced"
xml_str += '<meth>%s</meth>' % self.collector_meth
self.updates_cmd.append(cmd)
xml_str += "</collector></collectors>"
return xml_str
def config_sampling(self):
"""configure sflow sampling on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["sampling"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<samplings><sampling operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<samplings><sampling operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# sample_collector
if self.sample_collector:
if self.sflow_dict["sampling"].get("collectorID") \
and self.sflow_dict["sampling"].get("collectorID") != "invalid":
existing = self.sflow_dict["sampling"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.sample_collector) - set(existing))
if diff:
self.updates_cmd.append(
"sflow sampling collector %s" % ' '.join(diff))
new_set = list(self.sample_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.sample_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow sampling collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# sample_rate
if self.sample_rate:
exist = bool(self.sample_rate == self.sflow_dict["sampling"].get("rate"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
# sample_length
if self.sample_length:
exist = bool(self.sample_length == self.sflow_dict["sampling"].get("length"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
# sample_direction
if self.sample_direction:
direction = list()
if self.sample_direction == "both":
direction = ["inbound", "outbound"]
else:
direction.append(self.sample_direction)
existing = list()
if self.sflow_dict["sampling"].get("direction"):
if self.sflow_dict["sampling"].get("direction") == "both":
existing = ["inbound", "outbound"]
else:
existing.append(
self.sflow_dict["sampling"].get("direction"))
if self.state == "present":
diff = list(set(direction) - set(existing))
if diff:
new_set = list(set(direction + existing))
self.updates_cmd.append(
"sflow sampling %s" % ' '.join(diff))
if len(new_set) > 1:
new_dir = "both"
else:
new_dir = new_set[0]
xml_str += '<direction>%s</direction>' % new_dir
else:
same = list(set(existing) & set(direction))
if same:
self.updates_cmd.append("undo sflow sampling %s" % ' '.join(same))
if len(same) > 1:
del_dir = "both"
else:
del_dir = same[0]
xml_str += '<direction>%s</direction>' % del_dir
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</sampling></samplings>'
return xml_str
def config_counter(self):
"""configures sflow counter on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["counter"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<counters><counter operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<counters><counter operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# counter_collector
if self.counter_collector:
if self.sflow_dict["counter"].get("collectorID") \
and self.sflow_dict["counter"].get("collectorID") != "invalid":
existing = self.sflow_dict["counter"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.counter_collector) - set(existing))
if diff:
self.updates_cmd.append("sflow counter collector %s" % ' '.join(diff))
new_set = list(self.counter_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.counter_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow counter collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# counter_interval
if self.counter_interval:
exist = bool(self.counter_interval == self.sflow_dict["counter"].get("interval"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</counter></counters>'
return xml_str
def config_export(self):
"""configure sflow export"""
xml_str = ''
if not self.export_route:
return xml_str
if self.export_route == "enable":
if self.sflow_dict["export"] and self.sflow_dict["export"].get("ExportRoute") == "disable":
xml_str = '<exports><export operation="delete"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("undo sflow export extended-route-data disable")
else: # disable
if not self.sflow_dict["export"] or self.sflow_dict["export"].get("ExportRoute") != "disable":
xml_str = '<exports><export operation="create"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("sflow export extended-route-data disable")
return xml_str
def netconf_load_config(self, xml_str):
"""load sflow config by netconf"""
if not xml_str:
return
xml_cfg = """
<config>
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</sflow>
</config>""" % xml_str
self.netconf_set_config(xml_cfg, "SET_SFLOW")
self.changed = True
def check_params(self):
"""Check all input params"""
# check agent_ip
if self.agent_ip:
self.agent_ip = self.agent_ip.upper()
if not check_ip_addr(self.agent_ip):
self.module.fail_json(msg="Error: agent_ip is invalid.")
# check source_ip
if self.source_ip:
self.source_ip = self.source_ip.upper()
if not check_ip_addr(self.source_ip):
self.module.fail_json(msg="Error: source_ip is invalid.")
# check collector
if self.collector_id:
# check collector_ip and collector_ip_vpn
if self.collector_ip:
self.collector_ip = self.collector_ip.upper()
if not check_ip_addr(self.collector_ip):
self.module.fail_json(
msg="Error: collector_ip is invalid.")
if self.collector_ip_vpn and not is_valid_ip_vpn(self.collector_ip_vpn):
self.module.fail_json(
msg="Error: collector_ip_vpn is invalid.")
# check collector_datagram_size ranges from 1024 to 8100
if self.collector_datagram_size:
if not self.collector_datagram_size.isdigit():
self.module.fail_json(
msg="Error: collector_datagram_size is not digit.")
if int(self.collector_datagram_size) < 1024 or int(self.collector_datagram_size) > 8100:
self.module.fail_json(
msg="Error: collector_datagram_size is not ranges from 1024 to 8100.")
# check collector_udp_port ranges from 1 to 65535
if self.collector_udp_port:
if not self.collector_udp_port.isdigit():
self.module.fail_json(
msg="Error: collector_udp_port is not digit.")
if int(self.collector_udp_port) < 1 or int(self.collector_udp_port) > 65535:
self.module.fail_json(
msg="Error: collector_udp_port is not ranges from 1 to 65535.")
# check collector_description 1 to 255 case-sensitive characters
if self.collector_description:
if self.collector_description.count(" "):
self.module.fail_json(
msg="Error: collector_description should without spaces.")
if len(self.collector_description) < 1 or len(self.collector_description) > 255:
self.module.fail_json(
msg="Error: collector_description is not ranges from 1 to 255.")
# check sflow_interface
if self.sflow_interface:
intf_type = get_interface_type(self.sflow_interface)
if not intf_type:
self.module.fail_json(msg="Error: intf_type is invalid.")
if intf_type not in ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'eth-trunk']:
self.module.fail_json(
msg="Error: interface %s is not support sFlow." % self.sflow_interface)
# check sample_collector
if 0 < len(self.sample_collector) < 3:
self.sample_collector = [str(i) for i in self.sample_collector]
for id in self.sample_collector:
if id not in ("1", "2"):
self.module.fail_json(
msg="Error: sample_collector is invalid.")
# check sample_rate ranges from 1 to 4294967295
if self.sample_rate:
if not self.sample_rate.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_rate) < 1 or int(self.sample_rate) > 4294967295:
self.module.fail_json(
msg="Error: sample_rate is not ranges from 1 to 4294967295.")
# check sample_length ranges from 18 to 512
if self.sample_length:
if not self.sample_length.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_length) < 18 or int(self.sample_length) > 512:
self.module.fail_json(
msg="Error: sample_length is not ranges from 18 to 512.")
# check counter_collector
if 0 < len(self.counter_collector) < 3:
self.counter_collector = [str(i) for i in self.counter_collector]
for id in self.counter_collector:
if id not in ("1", "2"):
self.module.fail_json(
msg="Error: counter_collector is invalid.")
# counter_interval ranges from 10 to 4294967295
if self.counter_interval:
if not self.counter_interval.isdigit():
self.module.fail_json(
msg="Error: counter_interval is not digit.")
if int(self.counter_interval) < 10 or int(self.counter_interval) > 4294967295:
self.module.fail_json(
msg="Error: sample_length is not ranges from 10 to 4294967295.")
def get_proposed(self):
"""get proposed info"""
# base config
if self.agent_ip:
self.proposed["agent_ip"] = self.agent_ip
if self.source_ip:
self.proposed["source_ip"] = self.source_ip
if self.export_route:
self.proposed["export_route"] = self.export_route
if self.collector_id:
self.proposed["collector_id"] = self.collector_id
if self.collector_ip:
self.proposed["collector_ip"] = self.collector_ip
self.proposed["collector_ip_vpn"] = self.collector_ip_vpn
if self.collector_datagram_size:
self.proposed[
"collector_datagram_size"] = self.collector_datagram_size
if self.collector_udp_port:
self.proposed["collector_udp_port"] = self.collector_udp_port
if self.collector_meth:
self.proposed["collector_meth"] = self.collector_meth
if self.collector_description:
self.proposed[
"collector_description"] = self.collector_description
# sample and counter config
if self.sflow_interface:
self.proposed["sflow_interface"] = self.sflow_interface
if self.sample_collector:
self.proposed["sample_collector"] = self.sample_collector
if self.sample_rate:
self.proposed["sample_rate"] = self.sample_rate
if self.sample_length:
self.proposed["sample_length"] = self.sample_length
if self.sample_direction:
self.proposed["sample_direction"] = self.sample_direction
if self.counter_collector:
self.proposed["counter_collector"] = self.counter_collector
if self.counter_interval:
self.proposed["counter_interval"] = self.counter_interval
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.sflow_dict:
return
if self.agent_ip:
self.existing["agent"] = self.sflow_dict["agent"]
if self.source_ip:
self.existing["source"] = self.sflow_dict["source"]
if self.collector_id:
self.existing["collector"] = self.sflow_dict["collector"]
if self.export_route:
self.existing["export"] = self.sflow_dict["export"]
if self.sflow_interface:
self.existing["sampling"] = self.sflow_dict["sampling"]
self.existing["counter"] = self.sflow_dict["counter"]
def get_end_state(self):
"""get end state info"""
# else:
sflow_dict = self.get_sflow_dict()
if not sflow_dict:
return
if self.agent_ip:
self.end_state["agent"] = sflow_dict["agent"]
if self.source_ip:
self.end_state["source"] = sflow_dict["source"]
if self.collector_id:
self.end_state["collector"] = sflow_dict["collector"]
if self.export_route:
self.end_state["export"] = sflow_dict["export"]
if self.sflow_interface:
self.end_state["sampling"] = sflow_dict["sampling"]
self.end_state["counter"] = sflow_dict["counter"]
def work(self):
"""worker"""
self.check_params()
self.sflow_dict = self.get_sflow_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
xml_str = ''
if self.export_route:
xml_str += self.config_export()
if self.agent_ip:
xml_str += self.config_agent()
if self.source_ip:
xml_str += self.config_source()
if self.state == "present":
if self.collector_id and self.collector_ip:
xml_str += self.config_collector()
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
else:
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
if self.collector_id:
xml_str += self.config_collector()
if xml_str:
self.netconf_load_config(xml_str)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
agent_ip=dict(required=False, type='str'),
source_ip=dict(required=False, type='str'),
export_route=dict(required=False, type='str',
choices=['enable', 'disable']),
collector_id=dict(required=False, type='str', choices=['1', '2']),
collector_ip=dict(required=False, type='str'),
collector_ip_vpn=dict(required=False, type='str'),
collector_datagram_size=dict(required=False, type='str'),
collector_udp_port=dict(required=False, type='str'),
collector_meth=dict(required=False, type='str',
choices=['meth', 'enhanced']),
collector_description=dict(required=False, type='str'),
sflow_interface=dict(required=False, type='str'),
sample_collector=dict(required=False, type='list'),
sample_rate=dict(required=False, type='str'),
sample_length=dict(required=False, type='str'),
sample_direction=dict(required=False, type='str',
choices=['inbound', 'outbound', 'both']),
counter_collector=dict(required=False, type='list'),
counter_interval=dict(required=False, type='str'),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = Sflow(argument_spec)
module.work()
if __name__ == '__main__':
main()
| |
# Copyright (c) 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Huawei Nas Driver for Huawei storage arrays."""
from xml.etree import ElementTree as ET
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from manila import exception
from manila.i18n import _
from manila.share import driver
HUAWEI_UNIFIED_DRIVER_REGISTRY = {
'V3': 'manila.share.drivers.huawei.v3.connection.V3StorageConnection', }
huawei_opts = [
cfg.StrOpt('manila_huawei_conf_file',
default='/etc/manila/manila_huawei_conf.xml',
help='The configuration file for the Manila Huawei driver.')]
CONF = cfg.CONF
CONF.register_opts(huawei_opts)
LOG = log.getLogger(__name__)
class HuaweiNasDriver(driver.ShareDriver):
"""Huawei Share Driver.
Executes commands relating to Shares.
Driver version history::
1.0 - Initial version.
1.1 - Add shrink share.
Add extend share.
Add manage share.
Add share level(ro).
Add smartx capabilities.
Support multi pools in one backend.
1.2 - Add share server support.
Add ensure share.
Add QoS support.
Add create share from snapshot.
1.3 - Add manage snapshot.
Support reporting disk type of pool.
Add replication support.
"""
def __init__(self, *args, **kwargs):
"""Do initialization."""
LOG.debug("Enter into init function of Huawei Driver.")
super(HuaweiNasDriver, self).__init__((True, False), *args, **kwargs)
if not self.configuration:
raise exception.InvalidInput(reason=_(
"Huawei driver configuration missing."))
self.configuration.append_config_values(huawei_opts)
kwargs.pop('configuration')
self.plugin = importutils.import_object(self.get_backend_driver(),
self.configuration,
**kwargs)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self.plugin.check_conf_file()
self.plugin.check_service()
def get_backend_driver(self):
filename = self.configuration.manila_huawei_conf_file
try:
tree = ET.parse(filename)
root = tree.getroot()
except Exception as err:
message = (_('Read Huawei config file(%(filename)s)'
' for Manila error: %(err)s')
% {'filename': filename,
'err': err})
LOG.error(message)
raise exception.InvalidInput(reason=message)
product = root.findtext('Storage/Product')
backend_driver = HUAWEI_UNIFIED_DRIVER_REGISTRY.get(product)
if backend_driver is None:
raise exception.InvalidInput(
reason=_('Product %s is not supported. Product '
'must be set to V3.') % product)
return backend_driver
def do_setup(self, context):
"""Any initialization the huawei nas driver does while starting."""
LOG.debug("Do setup the plugin.")
self.plugin.connect()
def create_share(self, context, share, share_server=None):
"""Create a share."""
LOG.debug("Create a share.")
location = self.plugin.create_share(share, share_server)
return location
def extend_share(self, share, new_size, share_server=None):
LOG.debug("Extend a share.")
self.plugin.extend_share(share, new_size, share_server)
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Create a share from snapshot."""
LOG.debug("Create a share from snapshot %s.", snapshot['snapshot_id'])
location = self.plugin.create_share_from_snapshot(share, snapshot)
return location
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
LOG.debug("Shrink a share.")
self.plugin.shrink_share(share, new_size, share_server)
def delete_share(self, context, share, share_server=None):
"""Delete a share."""
LOG.debug("Delete a share.")
self.plugin.delete_share(share, share_server)
def create_snapshot(self, context, snapshot, share_server=None):
"""Create a snapshot."""
LOG.debug("Create a snapshot.")
snapshot_name = self.plugin.create_snapshot(snapshot, share_server)
return {'provider_location': snapshot_name}
def delete_snapshot(self, context, snapshot, share_server=None):
"""Delete a snapshot."""
LOG.debug("Delete a snapshot.")
self.plugin.delete_snapshot(snapshot, share_server)
def ensure_share(self, context, share, share_server=None):
"""Ensure that share is exported."""
LOG.debug("Ensure share.")
location = self.plugin.ensure_share(share, share_server)
return location
def allow_access(self, context, share, access, share_server=None):
"""Allow access to the share."""
LOG.debug("Allow access.")
self.plugin.allow_access(share, access, share_server)
def deny_access(self, context, share, access, share_server=None):
"""Deny access to the share."""
LOG.debug("Deny access.")
self.plugin.deny_access(share, access, share_server)
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules list."""
LOG.debug("Update access.")
self.plugin.update_access(share, access_rules,
add_rules, delete_rules, share_server)
def get_pool(self, share):
"""Return pool name where the share resides on."""
LOG.debug("Get pool.")
return self.plugin.get_pool(share)
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
LOG.debug("Get network allocations number.")
return self.plugin.get_network_allocations_number()
def manage_existing(self, share, driver_options):
"""Manage existing share."""
LOG.debug("Manage existing share to manila.")
share_size, location = self.plugin.manage_existing(share,
driver_options)
return {'size': share_size, 'export_locations': location}
def manage_existing_snapshot(self, snapshot, driver_options):
"""Manage existing snapshot."""
LOG.debug("Manage existing snapshot to manila.")
snapshot_name = self.plugin.manage_existing_snapshot(snapshot,
driver_options)
return {'provider_location': snapshot_name}
def _update_share_stats(self):
"""Retrieve status info from share group."""
backend_name = self.configuration.safe_get('share_backend_name')
data = dict(
share_backend_name=backend_name or 'HUAWEI_NAS_Driver',
vendor_name='Huawei',
driver_version='1.3',
storage_protocol='NFS_CIFS',
qos=True,
total_capacity_gb=0.0,
free_capacity_gb=0.0,
snapshot_support=self.plugin.snapshot_support,
create_share_from_snapshot_support=self.plugin.snapshot_support,
revert_to_snapshot_support=self.plugin.snapshot_support,
)
# huawei array doesn't support snapshot replication, so driver can't
# create replicated snapshot, this's not fit the requirement of
# replication feature.
# to avoid this problem, we specify huawei driver can't support
# snapshot and replication both, as a workaround.
if not data['snapshot_support'] and self.plugin.replication_support:
data['replication_type'] = 'dr'
self.plugin.update_share_stats(data)
super(HuaweiNasDriver, self)._update_share_stats(data)
def _setup_server(self, network_info, metadata=None):
"""Set up share server with given network parameters."""
# NOTE(felipe_rodrigues): keep legacy network_info support as a dict.
network_info = network_info[0]
return self.plugin.setup_server(network_info, metadata)
def _teardown_server(self, server_details, security_services=None):
"""Teardown share server."""
return self.plugin.teardown_server(server_details, security_services)
def create_replica(self, context, replica_list, new_replica,
access_rules, replica_snapshots, share_server=None):
"""Replicate the active replica to a new replica on this backend."""
return self.plugin.create_replica(context,
replica_list,
new_replica,
access_rules,
replica_snapshots,
share_server)
def update_replica_state(self, context, replica_list, replica,
access_rules, replica_snapshots,
share_server=None):
"""Update the replica_state of a replica."""
return self.plugin.update_replica_state(context,
replica_list,
replica,
access_rules,
replica_snapshots,
share_server)
def promote_replica(self, context, replica_list, replica, access_rules,
share_server=None):
"""Promote a replica to 'active' replica state.."""
return self.plugin.promote_replica(context,
replica_list,
replica,
access_rules,
share_server)
def delete_replica(self, context, replica_list, replica_snapshots,
replica, share_server=None):
"""Delete a replica."""
self.plugin.delete_replica(context,
replica_list,
replica_snapshots,
replica,
share_server)
def revert_to_snapshot(self, context, snapshot, share_access_rules,
snapshot_access_rules, share_server=None):
self.plugin.revert_to_snapshot(context,
snapshot,
share_access_rules,
snapshot_access_rules,
share_server)
| |
"""
sqldiff.py - Prints the (approximated) difference between models and database
TODO:
- better support for relations
- better support for constraints (mainly postgresql?)
- support for table spaces with postgresql
- when a table is not managed (meta.managed==False) then only do a one-way
sqldiff ? show differences from db->table but not the other way around since
it's not managed.
KNOWN ISSUES:
- MySQL has by far the most problems with introspection. Please be
carefull when using MySQL with sqldiff.
- Booleans are reported back as Integers, so there's know way to know if
there was a real change.
- Varchar sizes are reported back without unicode support so their size
may change in comparison to the real length of the varchar.
- Some of the 'fixes' to counter these problems might create false
positives or false negatives.
"""
from django.core.management.base import BaseCommand
from django.core.management import sql as _sql
from django.core.management import CommandError
from django.core.management.color import no_style
from django.db import transaction, connection
from django.db.models.fields import IntegerField
from optparse import make_option
ORDERING_FIELD = IntegerField('_order', null=True)
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def all_local_fields(meta):
all_fields = meta.local_fields[:]
for parent in meta.parents:
all_fields.extend(all_local_fields(parent._meta))
return all_fields
class SQLDiff(object):
DATA_TYPES_REVERSE_OVERRIDE = {}
DIFF_TYPES = [
'error',
'comment',
'table-missing-in-db',
'field-missing-in-db',
'field-missing-in-model',
'index-missing-in-db',
'index-missing-in-model',
'unique-missing-in-db',
'unique-missing-in-model',
'field-type-differ',
'field-parameter-differ',
'notnull-differ',
]
DIFF_TEXTS = {
'error': 'error: %(0)s',
'comment': 'comment: %(0)s',
'table-missing-in-db': "table '%(0)s' missing in database",
'field-missing-in-db': "field '%(1)s' defined in model but missing in database",
'field-missing-in-model': "field '%(1)s' defined in database but missing in model",
'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database",
'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model",
'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database",
'unique-missing-in-model': "field '%(1)s' UNIQUE defined in database schema but missing in model",
'field-type-differ': "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'",
'field-parameter-differ': "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'",
'notnull-differ': "field '%(1)s' null differ: db='%(3)s', model='%(2)s'",
}
SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1])))
SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup index name instead of just appending _idx to table + fieldname
SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))))
SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname
SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2])))))
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('MODIFY'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
SQL_ERROR = lambda self, style, qn, args: style.NOTICE('-- Error: %s' % style.ERROR(args[0]))
SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0]))
SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0])
def __init__(self, app_models, options):
self.app_models = app_models
self.options = options
self.dense = options.get('dense_output', False)
try:
self.introspection = connection.introspection
except AttributeError:
from django.db import get_introspection_module
self.introspection = get_introspection_module()
self.cursor = connection.cursor()
self.django_tables = self.get_django_tables(options.get('only_existing', True))
self.db_tables = self.introspection.get_table_list(self.cursor)
self.differences = []
self.unknown_db_fields = {}
self.DIFF_SQL = {
'error': self.SQL_ERROR,
'comment': self.SQL_COMMENT,
'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB,
'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB,
'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB,
'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL,
'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB,
'unique-missing-in-model': self.SQL_UNIQUE_MISSING_IN_MODEL,
'field-type-differ': self.SQL_FIELD_TYPE_DIFFER,
'field-parameter-differ': self.SQL_FIELD_PARAMETER_DIFFER,
'notnull-differ': self.SQL_NOTNULL_DIFFER,
}
def add_app_model_marker(self, app_label, model_name):
self.differences.append((app_label, model_name, []))
def add_difference(self, diff_type, *args):
assert diff_type in self.DIFF_TYPES, 'Unknown difference type'
self.differences[-1][-1].append((diff_type, args))
def get_django_tables(self, only_existing):
try:
django_tables = self.introspection.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
try:
django_tables = _sql.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before svn r7568
django_tables = _sql.django_table_list(only_existing=only_existing)
return django_tables
def sql_to_dict(self, query, param):
""" sql_to_dict(query, param) -> list of dicts
code from snippet at http://www.djangosnippets.org/snippets/1383/
"""
cursor = connection.cursor()
cursor.execute(query, param)
fieldnames = [name[0] for name in cursor.description]
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def get_field_model_type(self, field):
return field.db_type(connection=connection)
def get_field_db_type(self, description, field=None, table_name=None):
from django.db import models
# DB-API cursor.description
#(name, type_code, display_size, internal_size, precision, scale, null_ok) = description
type_code = description[1]
if type_code in self.DATA_TYPES_REVERSE_OVERRIDE:
reverse_type = self.DATA_TYPES_REVERSE_OVERRIDE[type_code]
else:
try:
try:
reverse_type = self.introspection.data_types_reverse[type_code]
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
reverse_type = self.introspection.DATA_TYPES_REVERSE.get(type_code)
except KeyError:
reverse_type = self.get_field_db_type_lookup(type_code)
if not reverse_type:
# type_code not found in data_types_reverse map
key = (self.differences[-1][:2], description[:2])
if key not in self.unknown_db_fields:
self.unknown_db_fields[key] = 1
self.add_difference('comment', "Unknown database type for field '%s' (%s)" % (description[0], type_code))
return None
kwargs = {}
if isinstance(reverse_type, tuple):
kwargs.update(reverse_type[1])
reverse_type = reverse_type[0]
if reverse_type == "CharField" and description[3]:
kwargs['max_length'] = description[3]
if reverse_type == "DecimalField":
kwargs['max_digits'] = description[4]
kwargs['decimal_places'] = description[5] and abs(description[5]) or description[5]
if description[6]:
kwargs['blank'] = True
if not reverse_type in ('TextField', 'CharField'):
kwargs['null'] = True
if '.' in reverse_type:
from django.utils import importlib
# TODO: when was importlib added to django.utils ? and do we
# need to add backwards compatibility code ?
module_path, package_name = reverse_type.rsplit('.', 1)
module = importlib.import_module(module_path)
field_db_type = getattr(module, package_name)(**kwargs).db_type(connection=connection)
else:
field_db_type = getattr(models, reverse_type)(**kwargs).db_type(connection=connection)
return field_db_type
def get_field_db_type_lookup(self, type_code):
return None
def strip_parameters(self, field_type):
if field_type and field_type != 'double precision':
return field_type.split(" ")[0].split("(")[0]
return field_type
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
def find_unique_missing_in_model(self, meta, table_indexes, table_name):
# TODO: Postgresql does not list unique_togethers in table_indexes
# MySQL does
fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_opts['unique'] and att_name in fields and not fields[att_name]:
if att_name in flatten(meta.unique_together):
continue
self.add_difference('unique-missing-in-model', table_name, att_name)
def find_index_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.db_index:
attname = field.db_column or field.attname
if not attname in table_indexes:
self.add_difference('index-missing-in-db', table_name, attname)
def find_index_missing_in_model(self, meta, table_indexes, table_name):
fields = dict([(field.name, field) for field in all_local_fields(meta)])
for att_name, att_opts in table_indexes.iteritems():
if att_name in fields:
field = fields[att_name]
if field.db_index:
continue
if att_opts['primary_key'] and field.primary_key:
continue
if att_opts['unique'] and field.unique:
continue
if att_opts['unique'] and att_name in flatten(meta.unique_together):
continue
self.add_difference('index-missing-in-model', table_name, att_name)
def find_field_missing_in_model(self, fieldmap, table_description, table_name):
for row in table_description:
if row[0] not in fieldmap:
self.add_difference('field-missing-in-model', table_name, row[0])
def find_field_missing_in_db(self, fieldmap, table_description, table_name):
db_fields = [row[0] for row in table_description]
for field_name, field in fieldmap.iteritems():
if field_name not in db_fields:
self.add_difference('field-missing-in-db', table_name, field_name, field.db_type(connection=connection))
def find_field_type_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.strip_parameters(self.get_field_model_type(field))
db_type = self.strip_parameters(self.get_field_db_type(description, field))
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not model_type == db_type:
self.add_difference('field-type-differ', table_name, field.name, model_type, db_type)
def find_field_parameter_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
if not self.strip_parameters(model_type) == self.strip_parameters(db_type):
continue
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not model_type == db_type:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
@transaction.commit_manually
def find_differences(self):
cur_app_label = None
for app_model in self.app_models:
meta = app_model._meta
table_name = meta.db_table
app_label = meta.app_label
if cur_app_label != app_label:
# Marker indicating start of difference scan for this table_name
self.add_app_model_marker(app_label, app_model.__name__)
#if not table_name in self.django_tables:
if not table_name in self.db_tables:
# Table is missing from database
self.add_difference('table-missing-in-db', table_name)
continue
table_indexes = self.introspection.get_indexes(self.cursor, table_name)
fieldmap = dict([(field.db_column or field.get_attname(), field) for field in all_local_fields(meta)])
# add ordering field if model uses order_with_respect_to
if meta.order_with_respect_to:
fieldmap['_order'] = ORDERING_FIELD
try:
table_description = self.introspection.get_table_description(self.cursor, table_name)
except Exception, e:
self.add_difference('error', 'unable to introspect table: %s' % str(e).strip())
transaction.rollback() # reset transaction
continue
else:
transaction.commit()
# Fields which are defined in database but not in model
# 1) find: 'unique-missing-in-model'
self.find_unique_missing_in_model(meta, table_indexes, table_name)
# 2) find: 'index-missing-in-model'
self.find_index_missing_in_model(meta, table_indexes, table_name)
# 3) find: 'field-missing-in-model'
self.find_field_missing_in_model(fieldmap, table_description, table_name)
# Fields which are defined in models but not in database
# 4) find: 'field-missing-in-db'
self.find_field_missing_in_db(fieldmap, table_description, table_name)
# 5) find: 'unique-missing-in-db'
self.find_unique_missing_in_db(meta, table_indexes, table_name)
# 6) find: 'index-missing-in-db'
self.find_index_missing_in_db(meta, table_indexes, table_name)
# Fields which have a different type or parameters
# 7) find: 'type-differs'
self.find_field_type_differ(meta, table_description, table_name)
# 8) find: 'type-parameter-differs'
self.find_field_parameter_differ(meta, table_description, table_name)
def print_diff(self, style=no_style()):
""" print differences to stdout """
if self.options.get('sql', True):
self.print_diff_sql(style)
else:
self.print_diff_text(style)
def print_diff_text(self, style):
cur_app_label = None
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print style.NOTICE("+ Application:"), style.SQL_TABLE(app_label)
cur_app_label = app_label
if not self.dense:
print style.NOTICE("|-+ Differences for model:"), style.SQL_TABLE(model_name)
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_TEXTS[diff_type] % dict((str(i), style.SQL_TABLE(e)) for i, e in enumerate(diff_args))
text = "'".join(i % 2 == 0 and style.ERROR(e) or e for i, e in enumerate(text.split("'")))
if not self.dense:
print style.NOTICE("|--+"), text
else:
print style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text
def print_diff_sql(self, style):
cur_app_label = None
qn = connection.ops.quote_name
has_differences = max([len(diffs) for app_label, model_name, diffs in self.differences])
if not has_differences:
if not self.dense:
print style.SQL_KEYWORD("-- No differences")
else:
print style.SQL_KEYWORD("BEGIN;")
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label))
cur_app_label = app_label
if not self.dense:
print style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_SQL[diff_type](style, qn, diff_args)
if self.dense:
text = text.replace("\n\t", " ")
print text
print style.SQL_KEYWORD("COMMIT;")
class GenericSQLDiff(SQLDiff):
pass
class MySQLDiff(SQLDiff):
# All the MySQL hacks together create something of a problem
# Fixing one bug in MySQL creates another issue. So just keep in mind
# that this is way unreliable for MySQL atm.
def get_field_db_type(self, description, field=None, table_name=None):
from MySQLdb.constants import FIELD_TYPE
# weird bug? in mysql db-api where it returns three times the correct value for field length
# if i remember correctly it had something todo with unicode strings
# TODO: Fix this is a more meaningful and better understood manner
description = list(description)
if description[1] not in [FIELD_TYPE.TINY, FIELD_TYPE.SHORT]: # exclude tinyints from conversion.
description[3] = description[3] / 3
description[4] = description[4] / 3
db_type = super(MySQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key and (db_type == 'integer' or db_type == 'bigint'):
db_type += ' AUTO_INCREMENT'
# MySQL isn't really sure about char's and varchar's like sqlite
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
# They like to call 'bool's 'tinyint(1)' and introspection makes that a integer
# just convert it back to it's proper type, a bool is a bool and nothing else.
if db_type == 'integer' and description[1] == FIELD_TYPE.TINY and description[4] == 1:
db_type = 'bool'
if db_type == 'integer' and description[1] == FIELD_TYPE.SHORT:
db_type = 'smallint UNSIGNED' # FIXME: what about if it's not UNSIGNED ?
return db_type
class SqliteSQLDiff(SQLDiff):
# Unique does not seem to be implied on Sqlite for Primary_key's
# if this is more generic among databases this might be usefull
# to add to the superclass's find_unique_missing_in_db method
def find_unique_missing_in_db(self, meta, table_indexes, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
if attname in table_indexes and table_indexes[attname]['primary_key']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
# Finding Indexes by using the get_indexes dictionary doesn't seem to work
# for sqlite.
def find_index_missing_in_db(self, meta, table_indexes, table_name):
pass
def find_index_missing_in_model(self, meta, table_indexes, table_name):
pass
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(SqliteSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
return db_type
class PostgresqlSQLDiff(SQLDiff):
DATA_TYPES_REVERSE_OVERRIDE = {
1042: 'CharField',
# postgis types (TODO: support is very incomplete)
17506: 'django.contrib.gis.db.models.fields.PointField',
55902: 'django.contrib.gis.db.models.fields.MultiPolygonField',
}
DATA_TYPES_REVERSE_NAME = {
'hstore': 'django_hstore.hstore.DictionaryField',
}
# Hopefully in the future we can add constraint checking and other more
# advanced checks based on this database.
SQL_LOAD_CONSTRAINTS = """
SELECT nspname, relname, conname, attname, pg_get_constraintdef(pg_constraint.oid)
FROM pg_constraint
INNER JOIN pg_attribute ON pg_constraint.conrelid = pg_attribute.attrelid AND pg_attribute.attnum = any(pg_constraint.conkey)
INNER JOIN pg_class ON conrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace
ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname;
"""
SQL_LOAD_NULL = """
SELECT nspname, relname, attname, attnotnull
FROM pg_attribute
INNER JOIN pg_class ON attrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace;
"""
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER COLUMN'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
def __init__(self, app_models, options):
SQLDiff.__init__(self, app_models, options)
self.check_constraints = {}
self.null = {}
self.load_constraints()
self.load_null()
def load_null(self):
for dct in self.sql_to_dict(self.SQL_LOAD_NULL, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
self.null[key] = not dct['attnotnull']
def load_constraints(self):
for dct in self.sql_to_dict(self.SQL_LOAD_CONSTRAINTS, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
if 'CHECK' in dct['pg_get_constraintdef']:
self.check_constraints[key] = dct
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(PostgresqlSQLDiff, self).get_field_db_type(description)
if not db_type:
return
if field:
if field.primary_key and db_type == 'integer':
db_type = 'serial'
if table_name:
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
check_constraint = self.check_constraints.get((tablespace, table_name, field.attname), {}).get('pg_get_constraintdef', None)
if check_constraint:
check_constraint = check_constraint.replace("((", "(")
check_constraint = check_constraint.replace("))", ")")
check_constraint = '("'.join([')' in e and '" '.join(e.split(" ", 1)) or e for e in check_constraint.split("(")])
# TODO: might be more then one constraint in definition ?
db_type += ' ' + check_constraint
null = self.null.get((tablespace, table_name, field.attname), 'fixme')
if field.null != null:
action = field.null and 'DROP' or 'SET'
self.add_difference('notnull-differ', table_name, field.name, action)
return db_type
@transaction.autocommit
def get_field_db_type_lookup(self, type_code):
try:
name = self.sql_to_dict("SELECT typname FROM pg_type WHERE typelem=%s;", [type_code])[0]['typname']
return self.DATA_TYPES_REVERSE_NAME.get(name.strip('_'))
except (IndexError, KeyError):
pass
"""
def find_field_type_differ(self, meta, table_description, table_name):
def callback(field, description, model_type, db_type):
if field.primary_key and db_type=='integer':
db_type = 'serial'
return model_type, db_type
super(PostgresqlSQLDiff, self).find_field_type_differ(meta, table_description, table_name, callback)
"""
DATABASE_SQLDIFF_CLASSES = {
'postgis': PostgresqlSQLDiff,
'postgresql_psycopg2': PostgresqlSQLDiff,
'postgresql': PostgresqlSQLDiff,
'mysql': MySQLDiff,
'sqlite3': SqliteSQLDiff,
'oracle': GenericSQLDiff
}
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all-applications', '-a', action='store_true', dest='all_applications',
help="Automaticly include all application from INSTALLED_APPS."),
make_option('--not-only-existing', '-e', action='store_false', dest='only_existing',
help="Check all tables that exist in the database, not only tables that should exist based on models."),
make_option('--dense-output', '-d', action='store_true', dest='dense_output',
help="Shows the output in dense format, normally output is spreaded over multiple lines."),
make_option('--output_text', '-t', action='store_false', dest='sql', default=True,
help="Outputs the differences as descriptive text instead of SQL"),
)
help = """Prints the (approximated) difference between models and fields in the database for the given app name(s).
It indicates how columns in the database are different from the sql that would
be generated by Django. This command is not a database migration tool. (Though
it can certainly help) It's purpose is to show the current differences as a way
to check/debug ur models compared to the real database tables and columns."""
output_transaction = False
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django import VERSION
if VERSION[:2] < (1, 0):
raise CommandError("SQLDiff only support Django 1.0 or higher!")
from django.db import models
from django.conf import settings
engine = None
if hasattr(settings, 'DATABASES'):
engine = settings.DATABASES['default']['ENGINE']
else:
engine = settings.DATABASE_ENGINE
if engine == 'dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set DATABASE_ENGINE.
raise CommandError("""Django doesn't know which syntax to use for your SQL statements,
because you haven't specified the DATABASE_ENGINE setting.
Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.""")
if options.get('all_applications', False):
app_models = models.get_models()
else:
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (models.ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
app_models = []
for app in app_list:
app_models.extend(models.get_models(app))
## remove all models that are not managed by Django
#app_models = [model for model in app_models if getattr(model._meta, 'managed', True)]
if not app_models:
raise CommandError('Unable to execute sqldiff no models founds.')
if not engine:
engine = connection.__module__.split('.')[-2]
if '.' in engine:
engine = engine.split('.')[-1]
cls = DATABASE_SQLDIFF_CLASSES.get(engine, GenericSQLDiff)
sqldiff_instance = cls(app_models, options)
sqldiff_instance.find_differences()
sqldiff_instance.print_diff(self.style)
return
| |
#
# rgb_matrix.py
#
# Copyright 2014 by John K. Hinsdale <hin@alma.com>
# Use under BSD license with above copyright notice(s) retained.
import struct
import serial
import time
# Serial constants
SERIAL_FN = "/dev/ttyUSB0"
SERIAL_SPEED = 115200
SERIAL_PORT_OBJECT = None
# Op codes
OP_CODES = {
"RESET": 0,
"BLANK_SCREEN": 1,
"DEMO": 2,
"FILL_SCREEN": 3,
"SET_TEXT_PARAMS": 4,
"SET_TEXT_COLOR": 5,
"WRITE_TEXT": 6,
"DRAW_PIXEL": 7,
"DRAW_LINE": 8,
"DRAW_RECT": 9,
"DRAW_TRIANGLE": 10,
"DRAW_CIRCLE": 11,
"DRAW_BITMAP": 12,
"XOR_RECT": 13,
"WRITE_BITCOLS": 14,
"COPY_RECT": 15,
"WIPE": 16,
"SLEEP": 30,
"PING": 31,
"SET_HORIZ_SCROLL": 50,
"SET_TIMER_DELAYS": 60,
"DUMP_MATRIX": 250,
"DUMP_MATRIX_RECT": 251,
}
# Wipe types
WIPE_DOWN = 1
WIPE_UP = 2
WIPE_LEFT = 3
WIPE_RIGHT = 4
WIPE_RADIAL_OUT = 5
WIPE_RADIAL_IN = 6
WIPE_DISSOLVE = 7
############ Commands
# RESET
def cmd_RESET():
send_code_only("RESET")
return None
# BLANK_SCREEN
def cmd_BLANK_SCREEN():
send_code_only("BLANK_SCREEN")
return None
# DEMO
def cmd_DEMO():
send_code_only("DEMO")
return None
# FILL_SCREEN (color)
def cmd_FILL_SCREEN(rgb):
ser = get_serial()
write_byte(ser, OP_CODES["FILL_SCREEN"])
write_bytes(ser, rgb)
close_serial(ser)
return None
# SET_TEXT_CURSOR(x, y)
def cmd_SET_TEXT_CURSOR(x, y):
ser = get_serial()
write_byte(ser, OP_CODES["SET_TEXT_PARAMS"])
# Add 1 for size and true for wrapped
write_bytes(ser, [x, y, 1, 1])
close_serial(ser)
# SET_TEXT_COLOR(fg_color, bg_color)
def cmd_SET_TEXT_COLOR(fg_rgb, bg_rgb):
# Set the color
ser = get_serial()
write_byte(ser, OP_CODES["SET_TEXT_COLOR"])
write_bytes(ser, fg_rgb)
write_bytes(ser, bg_rgb)
close_serial(ser)
# WRITE_BITCOLS(bc_array)
def cmd_WRITE_BITCOLS(bc_array):
if not bc_array:
return
ser = get_serial()
write_byte(ser, OP_CODES["WRITE_BITCOLS"])
write_byte(ser, len(bc_array))
for b in bc_array:
write_byte(ser, b)
close_serial(ser)
# COPY_RECT(x, y, w, h, new_x, new_y)
def cmd_COPY_RECT(x, y, w, h, new_x, new_y):
ser = get_serial()
write_byte(ser, OP_CODES["COPY_RECT"])
write_bytes(ser, [x, y, w, h, new_x, new_y])
close_serial(ser)
# WIPE(wipe_type, color, wipe_time_ms)
def cmd_WIPE(wipe_type, rgb, wipe_time):
ser = get_serial()
write_byte(ser, OP_CODES["WIPE"])
write_byte(ser, wipe_type)
write_bytes(ser, rgb)
write_int(ser, wipe_time)
close_serial(ser)
# WRITE_TEXT (color, line)
def cmd_WRITE_TEXT(fg_rgb, bg_rgb, x, y, text):
# Set the color
ser = get_serial()
write_byte(ser, OP_CODES["SET_TEXT_COLOR"])
write_bytes(ser, fg_rgb)
write_bytes(ser, bg_rgb)
# Maybe set the location
if x is not None and y is not None:
write_byte(ser, OP_CODES["SET_TEXT_PARAMS"])
# Add 1 for size and true for wrapped
write_bytes(ser, [x, y, 1, 1])
# Write the bytes of text
write_byte(ser, OP_CODES["WRITE_TEXT"])
write_byte(ser, len(text))
for c in text:
write_byte(ser, ord(c))
close_serial(ser)
return None
# DRAW_PIXEL (x, y, color)
def cmd_DRAW_PIXEL(x, y, rgb):
ser = get_serial()
write_byte(ser, OP_CODES["DRAW_PIXEL"])
write_bytes(ser, [x, y])
write_bytes(ser, rgb)
close_serial(ser)
return None
# DRAW_LINE (x0, y0, x1, y1, color)
def cmd_DRAW_LINE(x0, y0, x1, y1, rgb):
ser = get_serial()
write_byte(ser, OP_CODES["DRAW_LINE"])
write_bytes(ser, [x0, y0, x1, y1])
write_bytes(ser, rgb)
close_serial(ser)
return None
# DRAW_RECT (x, y, w, h, color, is_filled, round_radius)
def cmd_DRAW_RECT(x, y, w, h, rgb, is_filled, round_radius):
ser = get_serial()
write_byte(ser, OP_CODES["DRAW_RECT"])
write_bytes(ser, [x, y, w, h])
write_bytes(ser, rgb)
# Always give round-radius of zero since rounding does not work
write_bytes(ser, [is_filled, round_radius])
close_serial(ser)
return None
# XOR_RECT (x, y, w, h, color)
def cmd_XOR_RECT(x, y, w, h, rgb):
ser = get_serial()
write_byte(ser, OP_CODES["XOR_RECT"])
write_bytes(ser, [x, y, w, h])
write_bytes(ser, rgb)
close_serial(ser)
return None
# DRAW_TRIANGLE (x0, y0, x1, y1, x2, y2, color, is_filled)
def cmd_DRAW_TRIANGLE(x0, y0, x1, y1, x2, y2, rgb, is_filled):
ser = get_serial()
write_byte(ser, OP_CODES["DRAW_TRIANGLE"])
write_bytes(ser, [x0, y0, x1, y1, x2, y2])
write_bytes(ser, rgb)
# Triangle filling does not work (has glitches)
write_byte(ser, is_filled)
close_serial(ser)
return None
# DRAW_CIRCLE (x, y, r, color, is_filled)
def cmd_DRAW_CIRCLE(x, y, radius, rgb, is_filled):
ser = get_serial()
write_byte(ser, OP_CODES["DRAW_CIRCLE"])
write_bytes(ser, [x, y, radius])
write_bytes(ser, rgb)
write_byte(ser, is_filled)
close_serial(ser)
return None
# SLEEP (ms)
def cmd_SLEEP(ms):
ser = get_serial()
write_byte(ser, OP_CODES["SLEEP"])
write_int(ser, ms)
close_serial(ser)
return None
# PING - send a ping request and wait until it returns
def cmd_PING():
ser = get_serial()
write_byte(ser, OP_CODES["PING"])
# Now wait for the response to come back
byte = read_byte(ser)
if byte == 0:
print "*** TIMEOUT waiting for response to ping:"
elif byte != "!":
print "*** UNEXPECTED RESPONSE from ping: [" + str(byte) + "]"
close_serial(ser)
return None
# SET_HORIZ_SCROLL (offset)
def cmd_SET_HORIZ_SCROLL(offset):
ser = get_serial()
write_byte(ser, OP_CODES["SET_HORIZ_SCROLL"])
write_byte(ser, offset)
close_serial(ser)
return None
# SET_TIMER_DELAYS (int_arr)
def cmd_SET_TIMER_DELAYS(int_arr):
if int_arr is None:
# Reset to 32x32 defaults
int_arr = [260, 580, 1220, 2500]
elif len(int_arr) != 4:
raise Exception("SET_TIMER_DELAYS takes 4 integer arguments")
ser = get_serial()
write_byte(ser, OP_CODES["SET_TIMER_DELAYS"])
for i in int_arr:
write_int(ser, i)
close_serial(ser)
return None
#################
# Just send the op code byte for commands with no args
def send_code_only(code):
ser = get_serial()
write_byte(ser, OP_CODES[code])
close_serial(ser)
# Get the matrix of pixels as RGB triples
def get_matrix():
send_code_only("DUMP_MATRIX")
ser = get_serial()
s = read_until(ser, "<\n")
idx = s.find(">")
if idx > 0:
s = s[idx + 1:]
else:
return None
idx = s.find("<")
if idx > 0:
s = s[:idx]
else:
return None
s = s.strip()
# Array of pixel color runs in RRRRrGGGGggBBBBb format
run_strings = s.split("\n")
# Parse RGB triples and counts
runs = []
for range_str in run_strings:
if ":" in range_str:
[pixel_str, count_str] = range_str.split(":")
count = int(count_str, 16)
pixel = int(pixel_str, 16)
else:
pixel = int(range_str, 16)
count = 1
# Split 16-bit pixel integer into 5/6/5 R/G/B
rgb_triple = ((pixel >> 12) & 0xF, (pixel >> 7) & 0XF, (pixel >> 1) & 0xF)
runs.append((rgb_triple, count))
# Assemble the linearized pixels
pixels = []
for run in runs:
(trip, count) = run
for i in range(count):
pixels.append(trip)
# Sanity check: should have gotten 1024 (32x32) pixels
if len(pixels) != 1024:
return None
matrix = [[() for x in xrange(32)] for x in xrange(32)]
i = 0
for row in range(32):
for col in range(32):
matrix[row][col] = pixels[i]
i += 1
return matrix
# Get an integer, or -1 if invalid
def get_int(s):
result = -1
try:
result = int(s)
except:
pass
return result
# Get the serial port - call close() on it when done!
def get_serial():
global SERIAL_PORT_OBJECT
if SERIAL_PORT_OBJECT is None:
SERIAL_PORT_OBJECT = serial.Serial(SERIAL_FN, SERIAL_SPEED)
return SERIAL_PORT_OBJECT
# Close the serial when done
def close_serial(ser):
# We cache, so do nothing
pass
# Write a byte to serial, retrying and timing out.
# The retry may not be needed since pyserial may do
# the blocking for us(?)
def write_byte(ser, b):
nretry = 0
while ( 0 == ser.write(struct.pack('B', b)) ):
nretry += 1
if nretry > 500: # 5 second timeout at 10ms interval
return
time.sleep(0.010)
# Write byte values to the serial port
def write_bytes(ser, byte_arr):
# print "Writing " + str(args) + " to " + ser.name + " ..."
for b in byte_arr:
write_byte(ser, b)
# Write a two-byte int to serial, msb first
def write_int(ser, i):
write_byte(ser, int(i/256))
write_byte(ser, i % 256)
# Read byte from serial, retrying until timeout
# The retry may not be needed since pyserial may do the
# blocking for us(?)
def read_byte(ser):
nretry = 0
while True:
byte = ser.read(1)
if not byte:
nretry += 1
if nretry > 500: # 5 second timeout at 10ms interval
return 0
time.sleep(0.010)
else:
return byte[0]
# Read bytes from serial until terminating string seen, or timed out
def read_until(ser, termstr):
result = ""
while not result.endswith(termstr):
next_char = read_byte(ser)
if "\0" == next_char: # timeout
return result
result += next_char
return result
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The program reads an existing model file and generates models for different amounts of occlusions
"""
from __future__ import print_function
from detector_model_pb2 import DetectorModel
import detector_model_pb2 as dm
import detections_pb2 as det
import os, os.path#, glob
from optparse import OptionParser
from plot_detector_model import read_cascade, read_model
def add_feature_to_channels(channel_index, box, weight):
#for y in range(box.min_corner.y, box.max_corner.y+1):
# for x in range(box.min_corner.x, box.max_corner.x+1):
# channels[channel_index, y, x] += weight
slice_y = slice(box.min_corner.y, box.max_corner.y+1)
slice_x = slice(box.min_corner.x, box.max_corner.x+1)
channels[channel_index, slice_y, slice_x] += weight
if print_the_features:
print("box (min x,y) (max x,y) ==",
(box.min_corner.x, box.min_corner.y),
(box.max_corner.x, box.max_corner.y),
"\tweight ==", weight)
return
def get_stump_box(stump):
feature = stump.feature
return feature.box
def get_node_boxes(node):
if node.decision_stump:
return get_stump_box(node.decision_stump)
def getMaxXY_tree(tree):
nodes = []
for node in tree.nodes:
nodes.append(get_node_boxes(node))
#check for the maximal y position
maxy = -1
maxx = -1
for node in nodes:
x = node.max_corner.x
y = node.max_corner.y
if x> maxx:
maxx = x
if y> maxy:
maxy = y
return [maxx, maxy]
def update_cascade(old_cascade, new_cascade, yThresh):
new_cascade.Clear()
for i, stage in enumerate(old_cascade.stages):
tree = 0
if stage.feature_type == stage.Level2DecisionTree:
maxx, maxy = getMaxXY_tree(stage.level2_decision_tree)
else:
print("stage.feature_type ==", stage.feature_type)
raise Exception("Received an unhandled stage.feature_type")
if maxy< yThresh:
new_stage = new_cascade.stages.add()
new_stage.CopyFrom(stage)
return
def update_cascade_left(old_cascade, new_cascade, xThresh):
new_cascade.Clear()
for i, stage in enumerate(old_cascade.stages):
tree = 0
if stage.feature_type == stage.Level2DecisionTree:
maxx, maxy = getMaxXY_tree(stage.level2_decision_tree)
else:
print("stage.feature_type ==", stage.feature_type)
raise Exception("Received an unhandled stage.feature_type")
if maxx< xThresh:
new_stage = new_cascade.stages.add()
new_stage.CopyFrom(stage)
return
def generate_occlusionClassifier(input_model):
width = 32
half_width = 16
model=read_model(input_model)
for i in range(1+half_width):
yThresh = half_width-i
new_model = DetectorModel()
new_model.CopyFrom(model)
if model.model_window_size:
model_width = model.model_window_size.x
model_height = model.model_window_size.y
print("model.detector_type", model.detector_type)
if model.detector_type == model.SoftCascadeOverIntegralChannels:
old_cascade = model.soft_cascade_model
new_cascade = new_model.soft_cascade_model
print("Model has %i stages" % len(old_cascade.stages))
update_cascade(old_cascade, new_cascade, width-yThresh)
output_path = input_model + "_artificial_crop_" + str(yThresh*4)
out_file = open(output_path, "wb")
out_file.write(new_model.SerializeToString())
out_file.close()
print("Create output model file", output_path)
def generate_occlusionClassifier_left(input_model):
height = 16
half_height = 8
model=read_model(input_model)
for i in range(1+half_height):
xThresh = half_height-i
new_model = DetectorModel()
new_model.CopyFrom(model)
if model.model_window_size:
model_width = model.model_window_size.x
model_height = model.model_window_size.y
print("model.detector_type", model.detector_type)
if model.detector_type == model.SoftCascadeOverIntegralChannels:
old_cascade = model.soft_cascade_model
new_cascade = new_model.soft_cascade_model
print("Model has %i stages" % len(old_cascade.stages))
update_cascade_left(old_cascade, new_cascade, height-xThresh)
output_path = input_model + "_artificial_crop_" + str(xThresh*4)
out_file = open(output_path, "wb")
out_file.write(new_model.SerializeToString())
out_file.close()
print("Create output model file", output_path)
def parse_arguments():
parser = OptionParser()
parser.description = \
"The program reads an existing model file and generates models for different amounts of occlusions"
parser.add_option("-i", "--input_model", dest="input_model",
type="string",
help="path to the trained model.")
parser.add_option("-c", "--classifier_type", dest="classifier_type",
type="string",
help="this option is required and denotes the type of the classifier: \"up\" or \"left\"")
(options, args) = parser.parse_args()
#print (options, args)
if not options.classifier_type:
parser.error("'classifier_type' has to be specified")
if not options.input_model:
parser.error("'input' option is required to run this program")
else:
if not os.path.exists(options.input_model):
parser.error("Could not find the input file %s" % options.input_model)
return options
def main():
options = parse_arguments()
if options.classifier_type == "up":
generate_occlusionClassifier(options.input_model)
elif options.classifier_type =="left":
generate_occlusionClassifier_left(options.input_model)
else:
raise Exception("classifier type must be eighter 'up or 'left'")
return
print("End of game, have a nice day!")
return
if __name__ == "__main__":
main()
| |
# -*- coding: utf-8 -*-
import os
import logging
from urllib.parse import urlencode
from ast import literal_eval as make_tuple
from calendar import month_name
from celery import shared_task
import requests
from django.conf import settings
from django.core.cache import cache
from mmw.settings import layer_classmaps
from apps.modeling.geoprocessing import multi, parse
from apps.modeling.tr55.utils import aoi_resolution
from apps.modeling.tasks import run_gwlfe
from apps.modeling.mapshed.tasks import (NOCACHE,
collect_data,
convert_data,
nlcd_streams,
)
from apps.geoprocessing_api.calcs import (animal_population,
point_source_pollution,
catchment_water_quality,
stream_data,
streams_for_huc12s,
huc12s_with_aois,
drexel_fast_zonal,
)
logger = logging.getLogger(__name__)
DRB = 'drb'
RWD_HOST = os.environ.get('RWD_HOST', 'localhost')
RWD_PORT = os.environ.get('RWD_PORT', '5000')
ACRES_PER_SQM = 0.000247105
CM_PER_MM = 0.1
M_PER_CM = 0.01
@shared_task
def start_rwd_job(location, snapping, simplify, data_source):
"""
Calls the Rapid Watershed Delineation endpoint
that is running in the Docker container, and returns
the response unless there is an out-of-watershed error
which raises an exception.
"""
lat, lng = location
end_point = 'rwd' if data_source == DRB else 'rwd-nhd'
rwd_url = 'http://%s:%s/%s/%f/%f' % (RWD_HOST, RWD_PORT, end_point,
lat, lng)
params = {}
# The Webserver defaults to enable snapping, uses 1 (true) 0 (false)
if not snapping:
params['snapping'] = 0
# RWD also defaults to simplify the shape according to a tolerance.
# Passing it `?simplify=0` returns the unsimplified result.
if simplify is not False:
params['simplify'] = simplify
query_string = urlencode(params)
if query_string:
rwd_url += ('?%s' % query_string)
logger.debug('rwd request: %s' % rwd_url)
response_json = requests.get(rwd_url).json()
if 'error' in response_json:
raise Exception(response_json['error'])
return response_json
@shared_task
def analyze_streams(results, area_of_interest, datasource='nhdhr', wkaoi=None):
"""
Given geoprocessing results with stream data and an area of interest,
returns the streams and stream order within it.
If a wkaoi is specified and caching is enabled, the results will be
cached and reused.
"""
key = None
if wkaoi and settings.GEOP['cache']:
key = f'db_{wkaoi}__{datasource}__stream_data'
cached = cache.get(key)
if cached:
return {'survey': cached}
survey = stream_data(results, area_of_interest, datasource)
if key:
cache.set(key, survey, None)
return {'survey': survey}
@shared_task
def analyze_animals(area_of_interest):
"""
Given an area of interest, returns the animal population within it.
"""
return {'survey': animal_population(area_of_interest)}
@shared_task
def analyze_pointsource(area_of_interest):
"""
Given an area of interest, returns point sources of pollution within it.
"""
return {'survey': point_source_pollution(area_of_interest)}
@shared_task
def analyze_catchment_water_quality(area_of_interest):
"""
Given an area of interest in the DRB, returns catchment water quality data
within it.
"""
return {'survey': catchment_water_quality(area_of_interest)}
@shared_task(throws=Exception)
def analyze_nlcd(result, area_of_interest=None, nlcd_year='2011_2011'):
if 'error' in result:
raise Exception(f'[analyze_nlcd_{nlcd_year}] {result["error"]}')
pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1
result = parse(result)
histogram = {}
total_ara = 0
total_count = 0
categories = []
def area(dictionary, key, default=0):
return dictionary.get(key, default) * pixel_width * pixel_width
# Convert results to histogram, calculate total
for key, count in result.items():
nlcd, ara = key
total_count += count
total_ara += count if ara == 1 else 0
histogram[nlcd] = count + histogram.get(nlcd, 0)
has_ara = total_ara > 0
for nlcd, (code, name) in layer_classmaps.NLCD.items():
categories.append({
'area': area(histogram, nlcd),
'active_river_area': area(result, (nlcd, 1)) if has_ara else None,
'code': code,
'coverage': float(histogram.get(nlcd, 0)) / total_count,
'nlcd': nlcd,
'type': name,
})
return {
'survey': {
'name': f'land_{nlcd_year}',
'displayName':
f'Land Use/Cover {nlcd_year[5:]} (NLCD{nlcd_year[2:4]})',
'categories': categories,
}
}
@shared_task(throws=Exception)
def analyze_soil(result, area_of_interest=None):
if 'error' in result:
raise Exception(f'[analyze_soil] {result["error"]}')
pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1
histogram = {}
total_count = 0
categories = []
# Convert results to histogram, calculate total
for key, count in result.items():
total_count += count
s = make_tuple(key[4:]) # Change {"List(1)":5} to {1:5}
s = s if s != settings.NODATA else 3 # Map NODATA to 3
histogram[s] = count + histogram.get(s, 0)
for soil, (code, name) in layer_classmaps.SOIL.items():
categories.append({
'area': histogram.get(soil, 0) * pixel_width * pixel_width,
'code': code,
'coverage': float(histogram.get(soil, 0)) / total_count,
'type': name,
})
return {
'survey': {
'name': 'soil',
'displayName': 'Soil',
'categories': categories,
}
}
@shared_task(throws=Exception)
def analyze_climate(result, wkaoi):
"""
Given the result of multigeoprocessing call for climate rasters,
combines them so that the 'ppt' values are grouped together and
'tmean' together. Each group is a dictionary where the keys are strings
of the month '1', '2', ..., '12', and the values the average in the
area of interest.
Then, transforms these dictionaries into a final result of the format used
for all other Analyze operations. The 'categories' contain twelve objects,
one for each month, with a 'month' field containing the name of the month,
and 'ppt' and 'tmean' fields with corresponding values. The 'index' can be
used for sorting purposes on the client side.
"""
if 'error' in result:
raise Exception(f'[analyze_climate] {result["error"]}')
ppt = {k[5:]: v['List(0)']
for k, v in result[wkaoi].items() if 'ppt' in k}
tmean = {k[7:]: v['List(0)']
for k, v in result[wkaoi].items() if 'tmean' in k}
categories = [{
'monthidx': i,
'month': month_name[i],
'ppt': ppt[str(i)] * CM_PER_MM,
'tmean': tmean[str(i)],
} for i in range(1, 13)]
return {
'survey': {
'name': 'climate',
'displayName': 'Climate',
'categories': categories
}
}
@shared_task
def analyze_terrain(result):
"""
Given a geoprocessing result in the shape of:
[
{
"avg": 2503.116786250801,
"max": 10501.0,
"min": -84.0
},
{
"avg": 2.708598957407307,
"max": 44.52286911010742,
"min": 0.0
}
]
Assumes the first result is for Elevation in cm and the second for Slope
in %, and transforms it into a dictionary of the shape:
[
{
"elevation": 25.03116786250801,
"slope": 2.708598957407307,
"type": "average"
},
{
"elevation": -0.84,
"slope": 0.0,
"type": "minimum"
},
{
"elevation": 105.01,
"slope": 44.52286911010742,
"type": "maximum"
}
]
which has Elevation in m and keeps Slope in %.
"""
if 'error' in result:
raise Exception(f'[analyze_terrain] {result["error"]}')
[elevation, slope] = result
def cm_to_m(x):
return x * M_PER_CM if x else None
categories = [
dict(type='average',
elevation=cm_to_m(elevation['avg']),
slope=slope['avg']),
dict(type='minimum',
elevation=cm_to_m(elevation['min']),
slope=slope['min']),
dict(type='maximum',
elevation=cm_to_m(elevation['max']),
slope=slope['max'])
]
return {
'survey': {
'name': 'terrain',
'displayName': 'Terrain',
'categories': categories
}
}
@shared_task
def analyze_protected_lands(result, area_of_interest=None):
if 'error' in result:
raise Exception(f'[analyze_protected_lands] {result["error"]}')
pixel_width = aoi_resolution(area_of_interest) if area_of_interest else 1
result = parse(result)
histogram = {}
total_count = 0
categories = []
for key, count in result.items():
total_count += count
histogram[key] = count + histogram.get(key, 0)
for class_id, (code, name) in layer_classmaps.PROTECTED_LANDS.items():
categories.append({
'area': histogram.get(class_id, 0) * pixel_width * pixel_width,
'class_id': class_id,
'code': code,
'coverage': float(histogram.get(class_id, 0)) / total_count,
'type': name,
})
return {
'survey': {
'name': 'protected_lands',
'displayName': 'Protected Lands',
'categories': categories,
}
}
@shared_task
def analyze_drb_2100_land(area_of_interest, key):
result = drexel_fast_zonal(area_of_interest, key)
histogram = {}
total_count = 0
categories = []
for nlcd, count in result.items():
total_count += count
histogram[nlcd] = count + histogram.get(nlcd, 0)
for nlcd, (code, name) in layer_classmaps.NLCD.items():
categories.append({
'area': histogram.get(nlcd, 0),
'code': code,
'coverage': float(histogram.get(nlcd, 0)) / total_count,
'nlcd': nlcd,
'type': name,
})
return {
'survey': {
'name': f'drb_2100_land_{key}',
'displayName': f'DRB 2100 land forecast ({key})',
'categories': categories,
}
}
def collect_nlcd(histogram, geojson=None):
"""
Convert raw NLCD geoprocessing result to area dictionary
"""
pixel_width = aoi_resolution(geojson) if geojson else 1
categories = [{
'area': histogram.get(nlcd, 0) * pixel_width * pixel_width,
'code': code,
'nlcd': nlcd,
'type': name,
} for nlcd, (code, name) in layer_classmaps.NLCD.items()]
return {'categories': categories}
@shared_task
def collect_worksheet_aois(result, shapes):
"""
Given a geoprocessing result of NLCD and NLCD+Streams for every
area of interest within every HUC-12, processes the raw results
and returns a dictionary a area of interest IDs corresponding to
their processed results.
"""
if 'error' in result:
raise Exception(f'[collect_worksheet_aois] {result["error"]}')
NULL_RESULT = {'nlcd_streams': {}, 'nlcd': {}}
collection = {}
for shape in shapes:
output = result.get(shape['id'], NULL_RESULT)
nlcd = collect_nlcd(parse(output['nlcd']),
shape['shape'])
streams = stream_data(nlcd_streams(output['nlcd_streams']),
shape['shape'])
collection[shape['id']] = {'nlcd': nlcd, 'streams': streams}
return collection
@shared_task
def collect_worksheet_wkaois(result, shapes):
"""
Given a geoprocessing result of MapShed and a list of HUC-12s, processes
the raw results through GWLFE and returns a dictionary of WKAOIs to the
modeled results, and also the processed NLCD and NLCD+Streams.
"""
if 'error' in result:
raise Exception(f'[collect_worksheet_wkaois] {result["error"]}')
collection = {}
for shape in shapes:
wkaoi = shape['id']
geojson = shape['shape']
converted = convert_data(result, wkaoi)
histogram = converted[0]['n_count']
collected = collect_data(converted, geojson)
modeled = run_gwlfe(collected, None, None)
collection[wkaoi] = {
'mapshed': collected,
'gwlfe': modeled,
'nlcd': collect_nlcd(histogram, geojson),
'streams': stream_data(nlcd_streams(result[wkaoi]['nlcd_streams']),
geojson)
}
return collection
@shared_task(time_limit=300)
def collect_worksheet(area_of_interest):
"""
Given an area of interest, matches it to HUC-12s and generates a dictionary
containing land and stream analysis for the matched AoIs, land and stream
analysis for the matched HUC-12s, and GWLF-E results for the HUC-12s.
This dictionary can be POSTed to /export/worksheet to generate an Excel
worksheet containing these values, which can be used for further modeling.
"""
def to_aoi_id(m):
return f'{NOCACHE}-{m["wkaoi"]}'
matches = huc12s_with_aois(area_of_interest)
huc12_ids = [m['huc12'] for m in matches]
streams = streams_for_huc12s(huc12_ids)
aoi_shapes = [{
'id': to_aoi_id(m),
'shape': m['aoi_geom'],
} for m in matches]
aoi_results = collect_worksheet_aois(
multi('worksheet_aoi', aoi_shapes, streams),
aoi_shapes)
wkaoi_shapes = [{
'id': m['wkaoi'],
'shape': m['huc12_geom']
} for m in matches]
wkaoi_results = collect_worksheet_wkaois(
multi('mapshed', wkaoi_shapes, streams),
wkaoi_shapes)
collection = {}
for m in matches:
filename = f'{m["huc12"]}__{m["name"].replace(" ", "_")}'
collection[filename] = {
'name': m['name'],
'aoi': aoi_results.get(to_aoi_id(m), {}),
'huc12': wkaoi_results.get(m['wkaoi'], {}),
'geojson': m['aoi_geom'],
}
return collection
| |
#
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path
import re
import time
import copy
from datetime import datetime, timedelta
import sys, requests, json
import bdd_test_util
CORE_REST_PORT = 5000
JSONRPC_VERSION = "2.0"
class ContainerData:
def __init__(self, containerName, ipAddress, envFromInspect, composeService):
self.containerName = containerName
self.ipAddress = ipAddress
self.envFromInspect = envFromInspect
self.composeService = composeService
def getEnv(self, key):
envValue = None
for val in self.envFromInspect:
if val.startswith(key):
envValue = val[len(key):]
break
if envValue == None:
raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
return envValue
def parseComposeOutput(context):
"""Parses the compose output results and set appropriate values into context. Merges existing with newly composed."""
# Use the prefix to get the container name
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
containerNames = []
for l in context.compose_error.splitlines():
tokens = l.split()
print(tokens)
if 1 < len(tokens):
thisContainer = tokens[1]
if containerNamePrefix not in thisContainer:
thisContainer = containerNamePrefix + thisContainer + "_1"
if thisContainer not in containerNames:
containerNames.append(thisContainer)
print("Containers started: ")
print(containerNames)
# Now get the Network Address for each name, and set the ContainerData onto the context.
containerDataList = []
for containerName in containerNames:
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .NetworkSettings.IPAddress }}", containerName], expect_success=True)
print("container {0} has address = {1}".format(containerName, output.splitlines()[0]))
ipAddress = output.splitlines()[0]
# Get the environment array
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Env }}", containerName], expect_success=True)
env = output.splitlines()[0][1:-1].split()
# Get the Labels to access the com.docker.compose.service value
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Labels }}", containerName], expect_success=True)
labels = output.splitlines()[0][4:-1].split()
dockerComposeService = [composeService[27:] for composeService in labels if composeService.startswith("com.docker.compose.service:")][0]
print("dockerComposeService = {0}".format(dockerComposeService))
print("container {0} has env = {1}".format(containerName, env))
containerDataList.append(ContainerData(containerName, ipAddress, env, dockerComposeService))
# Now merge the new containerData info with existing
newContainerDataList = []
if "compose_containers" in context:
# Need to merge I new list
newContainerDataList = context.compose_containers
newContainerDataList = newContainerDataList + containerDataList
setattr(context, "compose_containers", newContainerDataList)
print("")
def buildUrl(context, ipAddress, path):
schema = "http"
if 'TLS' in context.tags:
schema = "https"
return "{0}://{1}:{2}{3}".format(schema, ipAddress, CORE_REST_PORT, path)
def currentTime():
return time.strftime("%H:%M:%S")
def getDockerComposeFileArgsFromYamlFile(compose_yaml):
parts = compose_yaml.split()
args = []
for part in parts:
args = args + ["-f"] + [part]
return args
@given(u'we compose "{composeYamlFile}"')
def step_impl(context, composeYamlFile):
context.compose_yaml = composeYamlFile
fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
context.compose_output, context.compose_error, context.compose_returncode = \
bdd_test_util.cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["up","--force-recreate", "-d"], expect_success=True)
assert context.compose_returncode == 0, "docker-compose failed to bring up {0}".format(composeYamlFile)
parseComposeOutput(context)
time.sleep(10) # Should be replaced with a definitive interlock guaranteeing that all peers/membersrvc are ready
@when(u'requesting "{path}" from "{containerName}"')
def step_impl(context, path, containerName):
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, path)
print("Requesting path = {0}".format(request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
assert resp.status_code == 200, "Failed to GET url %s: %s" % (request_url,resp.text)
context.response = resp
print("")
@then(u'I should get a JSON response containing "{attribute}" attribute')
def step_impl(context, attribute):
getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
@then(u'I should get a JSON response containing no "{attribute}" attribute')
def step_impl(context, attribute):
try:
getAttributeFromJSON(attribute, context.response.json(), "")
assert None, "Attribute found in response (%s)" %(attribute)
except AssertionError:
print("Attribute not found as was expected.")
def getAttributeFromJSON(attribute, jsonObject, msg):
return getHierarchyAttributesFromJSON(attribute.split("."), jsonObject, msg)
def getHierarchyAttributesFromJSON(attributes, jsonObject, msg):
if len(attributes) > 0:
assert attributes[0] in jsonObject, msg
return getHierarchyAttributesFromJSON(attributes[1:], jsonObject[attributes[0]], msg)
return jsonObject
def formatStringToCompare(value):
# double quotes are replaced by simple quotes because is not possible escape double quotes in the attribute parameters.
return str(value).replace("\"", "'")
@then(u'I should get a JSON response with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
foundValue = getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@then(u'I should get a JSON response with array "{attribute}" contains "{expectedValue}" elements')
def step_impl(context, attribute, expectedValue):
foundValue = getAttributeFromJSON(attribute, context.response.json(), "Attribute not found in response (%s)" %(attribute))
assert (len(foundValue) == int(expectedValue)), "For attribute %s, expected array of size (%s), instead found (%s)" % (attribute, expectedValue, len(foundValue))
@given(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@when(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
@then(u'I wait "{seconds}" seconds')
def step_impl(context, seconds):
time.sleep(float(seconds))
def getChaincodeTypeValue(chainLang):
if chainLang == "GOLANG":
return 1
elif chainLang =="JAVA":
return 4
elif chainLang == "NODE":
return 2
elif chainLang == "CAR":
return 3
elif chainLang == "UNDEFINED":
return 0
return 1
@when(u'I deploy lang chaincode "{chaincodePath}" of "{chainLang}" with ctor "{ctor}" to "{containerName}"')
def step_impl(context, chaincodePath, chainLang, ctor, containerName):
print("Printing chaincode language " + chainLang)
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chaincode")
print("Requesting path = {0}".format(request_url))
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
#typeGolang =
# Create a ChaincodeSpec structure
chaincodeSpec = {
"type": getChaincodeTypeValue(chainLang),
"chaincodeID": {
"path" : chaincodePath,
"name" : ""
},
"ctorMsg": {
"function" : ctor,
"args" : args
},
}
if 'userName' in context:
chaincodeSpec["secureContext"] = context.userName
chaincodeOpPayload = createChaincodeOpPayload("deploy", chaincodeSpec)
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
chaincodeName = resp.json()['result']['message']
chaincodeSpec['chaincodeID']['name'] = chaincodeName
context.chaincodeSpec = chaincodeSpec
print(json.dumps(chaincodeSpec, indent=4))
print("")
@when(u'I deploy chaincode "{chaincodePath}" with ctor "{ctor}" to "{containerName}"')
def step_impl(context, chaincodePath, ctor, containerName):
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chaincode")
print("Requesting path = {0}".format(request_url))
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
typeGolang = 1
# Create a ChaincodeSpec structure
chaincodeSpec = {
"type": typeGolang,
"chaincodeID": {
"path" : chaincodePath,
"name" : ""
},
"ctorMsg": {
"function" : ctor,
"args" : args
},
#"secureContext" : "binhn"
}
if 'userName' in context:
chaincodeSpec["secureContext"] = context.userName
if 'metadata' in context:
chaincodeSpec["metadata"] = context.metadata
chaincodeOpPayload = createChaincodeOpPayload("deploy", chaincodeSpec)
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
chaincodeName = resp.json()['result']['message']
chaincodeSpec['chaincodeID']['name'] = chaincodeName
context.chaincodeSpec = chaincodeSpec
print(json.dumps(chaincodeSpec, indent=4))
print("")
@then(u'I should have received a chaincode name')
def step_impl(context):
if 'chaincodeSpec' in context:
assert context.chaincodeSpec['chaincodeID']['name'] != ""
# Set the current transactionID to the name passed back
context.transactionID = context.chaincodeSpec['chaincodeID']['name']
elif 'grpcChaincodeSpec' in context:
assert context.grpcChaincodeSpec.chaincodeID.name != ""
# Set the current transactionID to the name passed back
context.transactionID = context.grpcChaincodeSpec.chaincodeID.name
else:
fail('chaincodeSpec not in context')
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}" with "{idGenAlg}"')
def step_impl(context, chaincodeName, functionName, containerName, idGenAlg):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
invokeChaincode(context, "invoke", functionName, containerName, idGenAlg)
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}" "{times}" times')
def step_impl(context, chaincodeName, functionName, containerName, times):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
for i in range(int(times)):
invokeChaincode(context, "invoke", functionName, containerName)
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" with attributes "{attrs}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, attrs, containerName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert attrs, "attrs were not specified"
invokeChaincode(context, "invoke", functionName, containerName, None, attrs.split(","))
@when(u'I invoke chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
invokeChaincode(context, "invoke", functionName, containerName)
@when(u'I invoke master chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
invokeMasterChaincode(context, "invoke", chaincodeName, functionName, containerName)
@then(u'I should have received a transactionID')
def step_impl(context):
assert 'transactionID' in context, 'transactionID not found in context'
assert context.transactionID != ""
pass
@when(u'I unconditionally query chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
invokeChaincode(context, "query", functionName, containerName)
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on "{containerName}"')
def step_impl(context, chaincodeName, functionName, containerName):
invokeChaincode(context, "query", functionName, containerName)
def createChaincodeOpPayload(method, chaincodeSpec):
chaincodeOpPayload = {
"jsonrpc": JSONRPC_VERSION,
"method" : method,
"params" : chaincodeSpec,
"id" : 1
}
return chaincodeOpPayload
def invokeChaincode(context, devopsFunc, functionName, containerName, idGenAlg=None, attributes=[]):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
# Update the chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
for idx, attr in enumerate(attributes):
attributes[idx] = attr.strip()
context.chaincodeSpec['ctorMsg']['function'] = functionName
context.chaincodeSpec['ctorMsg']['args'] = args
context.chaincodeSpec['attributes'] = attributes
#If idGenAlg is passed then, we still using the deprecated devops API because this parameter can't be passed in the new API.
if idGenAlg != None:
invokeUsingDevopsService(context, devopsFunc, functionName, containerName, idGenAlg)
else:
invokeUsingChaincodeService(context, devopsFunc, functionName, containerName)
def invokeUsingChaincodeService(context, devopsFunc, functionName, containerName):
# Invoke the POST
chaincodeOpPayload = createChaincodeOpPayload(devopsFunc, context.chaincodeSpec)
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
print("Using attributes {0}".format(context.chaincodeSpec['attributes']))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
print(json.dumps(context.response.json(), indent = 4))
if 'result' in resp.json():
result = resp.json()['result']
if 'message' in result:
transactionID = result['message']
context.transactionID = transactionID
def invokeUsingDevopsService(context, devopsFunc, functionName, containerName, idGenAlg):
# Invoke the POST
chaincodeInvocationSpec = {
"chaincodeSpec" : context.chaincodeSpec
}
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
if idGenAlg is not None:
chaincodeInvocationSpec['idGenerationAlg'] = idGenAlg
request_url = buildUrl(context, ipAddress, "/devops/{0}".format(devopsFunc))
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeInvocationSpec), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
print(json.dumps(context.response.json(), indent = 4))
if 'message' in resp.json():
transactionID = context.response.json()['message']
context.transactionID = transactionID
def invokeMasterChaincode(context, devopsFunc, chaincodeName, functionName, containerName):
args = []
if 'table' in context:
args = context.table[0].cells
typeGolang = 1
chaincodeSpec = {
"type": typeGolang,
"chaincodeID": {
"name" : chaincodeName
},
"ctorMsg": {
"function" : functionName,
"args" : args
}
}
if 'userName' in context:
chaincodeSpec["secureContext"] = context.userName
chaincodeOpPayload = createChaincodeOpPayload(devopsFunc, chaincodeSpec)
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("RESULT from {0} of chaincode from peer {1}".format(functionName, containerName))
print(json.dumps(context.response.json(), indent = 4))
if 'result' in resp.json():
result = resp.json()['result']
if 'message' in result:
transactionID = result['message']
context.transactionID = transactionID
@then(u'I wait "{seconds}" seconds for chaincode to build')
def step_impl(context, seconds):
""" This step takes into account the chaincodeImagesUpToDate tag, in which case the wait is reduce to some default seconds"""
reducedWaitTime = 4
if 'chaincodeImagesUpToDate' in context.tags:
print("Assuming images are up to date, sleeping for {0} seconds instead of {1} in scenario {2}".format(reducedWaitTime, seconds, context.scenario.name))
time.sleep(float(reducedWaitTime))
else:
time.sleep(float(seconds))
@then(u'I wait "{seconds}" seconds for transaction to be committed to block on "{containerName}"')
def step_impl(context, seconds, containerName):
assert 'transactionID' in context, "transactionID not found in context"
ipAddress = bdd_test_util.ipFromContainerNamePart(containerName, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
def multiRequest(context, seconds, containerDataList, pathBuilderFunc):
"""Perform a multi request against the system"""
# Build map of "containerName" : response
respMap = {container.containerName:None for container in containerDataList}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in containerDataList:
ipAddress = container.ipAddress
request_url = buildUrl(context, ipAddress, pathBuilderFunc(context, container))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
respMap[container.containerName] = resp
else:
raise Exception("Max time exceeded waiting for multiRequest with current response map = {0}".format(respMap))
@then(u'I wait up to "{seconds}" seconds for transaction to be committed to all peers')
def step_impl(context, seconds):
assert 'transactionID' in context, "transactionID not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
# Build map of "containerName" : resp.statusCode
respMap = {container.containerName:0 for container in context.compose_containers}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in context.compose_containers:
ipAddress = container.ipAddress
request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
if resp.status_code == 404:
# Pause then try again
respMap[container.containerName] = 404
time.sleep(1)
continue
elif resp.status_code == 200:
# Success, continue
respMap[container.containerName] = 200
break
else:
raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
else:
raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
print("Result of request to all peers = {0}".format(respMap))
print("")
@then(u'I check the transaction ID if it is "{tUUID}"')
def step_impl(context, tUUID):
assert 'transactionID' in context, "transactionID not found in context"
assert context.transactionID == tUUID, "transactionID is not tUUID"
def getContainerDataValuesFromContext(context, aliases, callback):
"""Returns the IPAddress based upon a name part of the full container name"""
assert 'compose_containers' in context, "compose_containers not found in context"
values = []
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
for namePart in aliases:
for containerData in context.compose_containers:
if containerData.containerName.startswith(containerNamePrefix + namePart):
values.append(callback(containerData))
break
return values
@then(u'I wait up to "{seconds}" seconds for transaction to be committed to peers')
def step_impl(context, seconds):
assert 'transactionID' in context, "transactionID not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
aliases = context.table.headings
containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
# Build map of "containerName" : resp.statusCode
respMap = {container.containerName:0 for container in containerDataList}
# Set the max time before stopping attempts
maxTime = datetime.now() + timedelta(seconds = int(seconds))
for container in containerDataList:
ipAddress = container.ipAddress
request_url = buildUrl(context, ipAddress, "/transactions/{0}".format(context.transactionID))
# Loop unless failure or time exceeded
while (datetime.now() < maxTime):
print("{0} GETing path = {1}".format(currentTime(), request_url))
resp = requests.get(request_url, headers={'Accept': 'application/json'}, verify=False)
if resp.status_code == 404:
# Pause then try again
respMap[container.containerName] = 404
time.sleep(1)
continue
elif resp.status_code == 200:
# Success, continue
respMap[container.containerName] = 200
break
else:
raise Exception("Error requesting {0}, returned result code = {1}".format(request_url, resp.status_code))
else:
raise Exception("Max time exceeded waiting for transactions with current response map = {0}".format(respMap))
print("Result of request to all peers = {0}".format(respMap))
print("")
@then(u'I should get a rejection message in the listener after stopping it')
def step_impl(context):
assert "eventlistener" in context, "no eventlistener is started"
context.eventlistener.terminate()
output = context.eventlistener.stdout.read()
rejection = "Received rejected transaction"
assert rejection in output, "no rejection message was found"
assert output.count(rejection) == 1, "only one rejection message should be found"
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" on all peers')
def step_impl(context, chaincodeName, functionName):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
# Update the chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
# There is ctor arguments
args = context.table[0].cells
context.chaincodeSpec['ctorMsg']['function'] = functionName
context.chaincodeSpec['ctorMsg']['args'] = args #context.table[0].cells if ('table' in context) else []
# Invoke the POST
chaincodeOpPayload = createChaincodeOpPayload("query", context.chaincodeSpec)
responses = []
for container in context.compose_containers:
request_url = buildUrl(context, container.ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
responses.append(resp)
context.responses = responses
@when(u'I unconditionally query chaincode "{chaincodeName}" function name "{functionName}" with value "{value}" on peers')
def step_impl(context, chaincodeName, functionName, value):
query_common(context, chaincodeName, functionName, value, False)
@when(u'I query chaincode "{chaincodeName}" function name "{functionName}" with value "{value}" on peers')
def step_impl(context, chaincodeName, functionName, value):
query_common(context, chaincodeName, functionName, value, True)
def query_common(context, chaincodeName, functionName, value, failOnError):
assert 'chaincodeSpec' in context, "chaincodeSpec not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
assert 'peerToSecretMessage' in context, "peerToSecretMessage map not found in context"
aliases = context.table.headings
containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
# Update the chaincodeSpec ctorMsg for invoke
context.chaincodeSpec['ctorMsg']['function'] = functionName
context.chaincodeSpec['ctorMsg']['args'] = [value]
# Invoke the POST
# Make deep copy of chaincodeSpec as we will be changing the SecurityContext per call.
chaincodeOpPayload = createChaincodeOpPayload("query", copy.deepcopy(context.chaincodeSpec))
responses = []
for container in containerDataList:
# Change the SecurityContext per call
chaincodeOpPayload['params']["secureContext"] = context.peerToSecretMessage[container.composeService]['enrollId']
print("Container {0} enrollID = {1}".format(container.containerName, container.getEnv("CORE_SECURITY_ENROLLID")))
request_url = buildUrl(context, container.ipAddress, "/chaincode")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(chaincodeOpPayload), timeout=30, verify=False)
if failOnError:
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
print("RESULT from {0} of chaincode from peer {1}".format(functionName, container.containerName))
print(json.dumps(resp.json(), indent = 4))
responses.append(resp)
context.responses = responses
@then(u'I should get a JSON response from all peers with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
assert 'responses' in context, "responses not found in context"
for resp in context.responses:
foundValue = getAttributeFromJSON(attribute, resp.json(), "Attribute not found in response (%s)" %(attribute))
assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@then(u'I should get a JSON response from peers with "{attribute}" = "{expectedValue}"')
def step_impl(context, attribute, expectedValue):
assert 'responses' in context, "responses not found in context"
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
for resp in context.responses:
foundValue = getAttributeFromJSON(attribute, resp.json(), "Attribute not found in response (%s)" %(attribute))
assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue)
@given(u'I register with CA supplying username "{userName}" and secret "{secret}" on peers')
def step_impl(context, userName, secret):
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers) not found in context"
# Get list of IPs to login to
aliases = context.table.headings
containerDataList = bdd_test_util.getContainerDataValuesFromContext(context, aliases, lambda containerData: containerData)
secretMsg = {
"enrollId": userName,
"enrollSecret" : secret
}
# Login to each container specified
for containerData in containerDataList:
request_url = buildUrl(context, containerData.ipAddress, "/registrar")
print("{0} POSTing path = {1}".format(currentTime(), request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("message = {0}".format(resp.json()))
# Create new User entry
bdd_test_util.registerUser(context, secretMsg, containerData.composeService)
# Store the username in the context
context.userName = userName
# if we already have the chaincodeSpec, change secureContext
if 'chaincodeSpec' in context:
context.chaincodeSpec["secureContext"] = context.userName
@given(u'I use the following credentials for querying peers')
def step_impl(context):
assert 'compose_containers' in context, "compose_containers not found in context"
assert 'table' in context, "table (of peers, username, secret) not found in context"
peerToSecretMessage = {}
# Login to each container specified using username and secret
for row in context.table.rows:
peer, userName, secret = row['peer'], row['username'], row['secret']
secretMsg = {
"enrollId": userName,
"enrollSecret" : secret
}
ipAddress = bdd_test_util.ipFromContainerNamePart(peer, context.compose_containers)
request_url = buildUrl(context, ipAddress, "/registrar")
print("POSTing to service = {0}, path = {1}".format(peer, request_url))
resp = requests.post(request_url, headers={'Content-type': 'application/json'}, data=json.dumps(secretMsg), verify=False)
assert resp.status_code == 200, "Failed to POST to %s: %s" %(request_url, resp.text)
context.response = resp
print("message = {0}".format(resp.json()))
peerToSecretMessage[peer] = secretMsg
context.peerToSecretMessage = peerToSecretMessage
@given(u'I stop peers')
def step_impl(context):
compose_op(context, "stop")
@given(u'I start a listener')
def step_impl(context):
gopath = os.environ.get('GOPATH')
assert gopath is not None, "Please set GOPATH properly!"
listener = os.path.join(gopath, "src/github.com/hyperledger/fabric/build/docker/bin/block-listener")
assert os.path.isfile(listener), "Please build the block-listener binary!"
bdd_test_util.start_background_process(context, "eventlistener", [listener, "-listen-to-rejections"] )
@given(u'I start peers')
def step_impl(context):
compose_op(context, "start")
@given(u'I pause peers')
def step_impl(context):
compose_op(context, "pause")
@given(u'I unpause peers')
def step_impl(context):
compose_op(context, "unpause")
def compose_op(context, op):
assert 'table' in context, "table (of peers) not found in context"
assert 'compose_yaml' in context, "compose_yaml not found in context"
fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
services = context.table.headings
# Loop through services and start/stop them, and modify the container data list if successful.
for service in services:
context.compose_output, context.compose_error, context.compose_returncode = \
bdd_test_util.cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + [op, service], expect_success=True)
assert context.compose_returncode == 0, "docker-compose failed to {0} {0}".format(op, service)
if op == "stop" or op == "pause":
context.compose_containers = [containerData for containerData in context.compose_containers if containerData.composeService != service]
else:
parseComposeOutput(context)
print("After {0}ing, the container service list is = {1}".format(op, [containerData.composeService for containerData in context.compose_containers]))
| |
from contextlib import suppress
from functools import update_wrapper
from weakref import WeakSet
from django.apps import apps
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils.text import capfirst
from django.utils.translation import gettext as _, gettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.i18n import JavaScriptCatalog
all_sites = WeakSet()
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite:
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = gettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = gettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = gettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
all_sites.add(self)
def check(self, app_configs):
"""
Run the system checks on all ModelAdmins, except if they aren't
customized at all.
"""
if app_configs is None:
app_configs = apps.get_app_configs()
app_configs = set(app_configs) # Speed up lookups below
errors = []
modeladmins = (o for o in self._registry.values() if o.__class__ is not ModelAdmin)
for modeladmin in modeladmins:
if modeladmin.model._meta.app_config in app_configs:
errors.extend(modeladmin.check())
return errors
def register(self, model_or_iterable, admin_class=None, **options):
"""
Register the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, use ModelAdmin (the default admin
options). If keyword arguments are given -- e.g., list_display --
apply them as options to the admin class.
If a model is already registered, raise AlreadyRegistered.
If a model is abstract, raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured(
'The model %s is abstract, so it cannot be registered with admin.' % model.__name__
)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregister the given model(s).
If a model isn't already registered, raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raise KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raise KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.items()
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Return True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super().get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Return a dictionary of variables to put in the template context for
*every* page in the admin site.
For sites running on a subpath, use the SCRIPT_NAME value if site_url
hasn't been customized.
"""
script_name = request.META['SCRIPT_NAME']
site_url = script_name if self.site_url == '/' and script_name else self.site_url
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handle the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import PasswordChangeView
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'form_class': AdminPasswordChangeForm,
'success_url': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return PasswordChangeView.as_view(**defaults)(request)
def password_change_done(self, request, extra_context=None):
"""
Display the "success" page after a password change.
"""
from django.contrib.auth.views import PasswordChangeDoneView
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return PasswordChangeDoneView.as_view(**defaults)(request)
def i18n_javascript(self, request, extra_context=None):
"""
Display the i18n JavaScript that the Django admin requires.
`extra_context` is unused but present for consistency with the other
admin views.
"""
return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request)
@never_cache
def logout(self, request, extra_context=None):
"""
Log out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import LogoutView
defaults = {
'extra_context': dict(
self.each_context(request),
# Since the user isn't logged out at this point, the value of
# has_permission must be overridden.
has_permission=False,
**(extra_context or {})
),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return LogoutView.as_view(**defaults)(request)
@never_cache
def login(self, request, extra_context=None):
"""
Display the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import LoginView
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(
self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
username=request.user.get_username(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return LoginView.as_view(**defaults)(request)
def _build_app_dict(self, request, label=None):
"""
Build the app dictionary. The optional `label` parameter filters models
of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
with suppress(NoReverseMatch):
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
if perms.get('add'):
with suppress(NoReverseMatch):
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Return a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Display the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or 'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(
self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| |
## A script for finding every cox coefficient and pvalue for every miRNA in KIRC Tier 3 data downloaded Jan. 6th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','clinical','nationwidechildrens.org_clinical_patient_kirc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mirna files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
#### 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
##normalized files were used
if 'isoform.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
## A list of lists of miRNAs is constructed, the order of miRNA lists is same as the clinical_and_files data
## The order of mirnas within the lists is defined by me (they are sorted).
## I use my reannonated read counts derived from the isoform files.
## Data structure: [[mirnas for patient 1], [mirnas for patient 2], ....]
f=open(os.path.join(BASE_DIR,'mirna','mirna_list.txt'))
mirna_list=[i.strip() for i in f]
mirnas=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','KIRC','mirna',j.split('.txt')[0]+'new.txt'))
mirna_dict={mirna:counts for mirna,counts in [[i.split()[0],float(i.strip().split()[-1])] for i in f]}
temp.append([[mirna,mirna_dict.get(mirna,0)] for mirna in mirna_list])
## In the case that the patient only contained 1 primary tumor miRNA file.
if len(temp)==1:
mirnas.append(temp[0])
## If the patient contained more than 1 primary tumor miRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
mirnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want mirnas that meet an expression cutoff
## A cutoff of .5 reads per million mirna mapped and no more than a fourth of the patients containing no expression was chosen
final_mirnas=[[]]*len(mirnas)
for i in range(len(mirnas[0])):
temp=[]
for j in mirnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(mirnas)/4.0 and median>.5:
for index, kk in enumerate(temp):
final_mirnas[index]=final_mirnas[index]+[kk]
## This will write the final mirnas to a file (1-20 MB) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRC','final_mirnas.txt'),'w')
for i in final_mirnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the mirnas in final_mirnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
mirnas=[] ##This list tracks the mirna names
for i in range(len(final_mirnas[0])):
kaplan=[]
mirnas.append(final_mirnas[0][i][0])
for k,j in zip(clinical_and_files,final_mirnas): ## These lists contain the clinical information and miRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the mirna values for the current mirna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['mirna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ mirna + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the mirna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='mirna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with mirna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mirna','cox','KIRC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(mirnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| |
'''
The models and Cassandra serializers for the Game and
Requests models to be used in the Game, Requests, Received, and
Sent APIs.
@author: Andy Oberlin, Jake Gregg
'''
from cassa import CassaModel
from django.db import models
import pycassa
from django.conf import settings
import uuid
from rest_framework import serializers
from uuid import UUID
# User model faked to use Cassandra
POOL = pycassa.ConnectionPool('games', server_list=settings.CASSANDRA_NODES)
class Game(CassaModel):
'''
The Game model to support the API.
'''
table = pycassa.ColumnFamily(POOL, 'game')
game_id = models.TextField(primary_key=True)
leader_id = models.TextField()
current_round_id = models.TextField()
date_created = models.DateTimeField()
last_modified = models.DateTimeField()
winning_score = models.IntegerField()
deck = models.TextField()
@staticmethod
def fromMap(mapRep):
'''
Creates a Game object from a map object with the properties.
'''
game = Game(**mapRep)
return game
@staticmethod
def fromCassa(cassRep):
'''
Creates a Game object from the tuple return from Cassandra.
'''
mapRep = {key : val for key, val in cassRep[1].iteritems()}
mapRep['game_id'] = str(cassRep[0])
return Game.fromMap(mapRep)
@staticmethod
def get(game_id=None):
'''
Method for getting a user's Game list from cassandra given the user_id.
'''
if game_id:
return Game.getByID(game_id)
return None
@staticmethod
def getByID(game_id):
'''
Gets the user's Game given an ID.
@param user_id: The uuid of the user.
'''
if not isinstance(game_id, uuid.UUID):
game_id = uuid.UUID(game_id)
return Game.fromCassa((str(game_id), Game.table.get(game_id)))
def save(self):
'''
Saves a set of Game given by the cassandra in/output, which is
a dictionary of values.
@param users: The user and friend list to store.
'''
game_id = uuid.uuid1() if not self.game_id else uuid.UUID(self.game_id)
Game.table.insert(game_id, CassaGameSerializer(self).data)
self.game_id = str(game_id)
class CassaGameSerializer(serializers.ModelSerializer):
'''
The Game serializer used to create a python dictionary for submitting to the
Cassandra database with the correct options.
'''
def transform_leader_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
class Meta:
model = Game
fields = ('winning_score', 'leader_id', 'current_round_id', 'date_created', 'last_modified', 'deck')
class GameMember(CassaModel):
'''
The Party Member model to support the API.
'''
table = pycassa.ColumnFamily(POOL, 'game_member')
game_member_id = models.TextField(primary_key=True)
user_id = models.TextField()
game_id = models.TextField()
date_created = models.DateTimeField()
last_modified = models.DateTimeField()
status = models.IntegerField()
score = models.IntegerField()
@staticmethod
def fromMap(mapRep):
'''
Creates a Party object from a map object with the properties.
'''
member = GameMember(**mapRep)
return member
@staticmethod
def fromCassa(cassRep):
'''
Creates a Party object from the tuple return from Cassandra.
'''
mapRep = {key : val for key, val in cassRep[1].iteritems()}
mapRep['game_member_id'] = str(cassRep[0])
return GameMember.fromMap(mapRep)
@staticmethod
def get(game_member_id=None):
'''
Method for getting a party member by id, party_id or user_id
'''
if game_member_id:
return GameMember.getByID(game_member_id)
return None
@staticmethod
def filter(game_id=None, user_id=None):
if game_id:
return GameMember.filterByGame(game_id)
if user_id:
return GameMember.filterByUser(user_id)
@staticmethod
def getByID(game_member_id):
'''
Gets the game members given an ID.
@param game_member_id: The uuid of the game member.
'''
if not isinstance(game_member_id, uuid.UUID):
game_member_id = uuid.UUID(game_member_id)
return GameMember.fromCassa((str(game_member_id), GameMember.table.get(game_member_id)))
@staticmethod
def filterByGame(game_id):
'''
Gets the party members by party.
'''
if not isinstance(game_id, uuid.UUID):
game_id = uuid.UUID(game_id)
expr = pycassa.create_index_expression('game_id', game_id)
clause = pycassa.create_index_clause([expr])
ans = list(GameMember.table.get_indexed_slices(clause))
return [GameMember.fromCassa(cassRep) for cassRep in ans]
@staticmethod
def filterByUser(user_id):
'''
Gets the party members by party.
'''
if not isinstance(user_id, uuid.UUID):
user_id = uuid.UUID(user_id)
expr = pycassa.create_index_expression('user_id', user_id)
clause = pycassa.create_index_clause([expr])
ans = list(GameMember.table.get_indexed_slices(clause))
return [GameMember.fromCassa(cassRep) for cassRep in ans]
def save(self):
'''
Saves a set of Party given by the cassandra in/output, which is
a dictionary of values.
'''
game_member_id = uuid.uuid1() if not self.game_member_id else uuid.UUID(self.game_member_id)
GameMember.table.insert(game_member_id, CassaGameMemberSerializer(self).data)
self.game_member_id = str(game_member_id)
class CassaGameMemberSerializer(serializers.ModelSerializer):
'''
The Party serializer used to create a python dictionary for submitting to the
Cassandra database with the correct options.
'''
def transform_user_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
def transform_game_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
class Meta:
model = GameMember
fields = ('user_id', 'game_id', 'status', 'date_created', 'last_modified', 'score')
class Round(CassaModel):
'''
The Round model to support the API.
'''
table = pycassa.ColumnFamily(POOL, 'round')
round_id = models.TextField(primary_key=True)
selector_id = models.TextField()
selection_id = models.TextField()
phrase_card_id = models.TextField()
game_id = models.TextField()
date_created = models.DateTimeField()
last_modified = models.DateTimeField()
@staticmethod
def fromMap(mapRep):
'''
Creates a Party object from a map object with the properties.
'''
member = Round(**mapRep)
return member
@staticmethod
def fromCassa(cassRep):
'''
Creates a Party object from the tuple return from Cassandra.
'''
mapRep = {key : val for key, val in cassRep[1].iteritems()}
mapRep['round_id'] = str(cassRep[0])
return Round.fromMap(mapRep)
@staticmethod
def get(round_id=None):
'''
Method for getting a party member by id, party_id or user_id
'''
if round_id:
return Round.getByID(round_id)
return None
@staticmethod
def getByID(round_id):
'''
Gets the Party members given an ID.
@param party_id: The uuid of the party.
'''
if not isinstance(round_id, uuid.UUID):
round_id = uuid.UUID(round_id)
return Round.fromCassa((str(round_id), Round.table.get(round_id)))
def save(self):
'''
Saves a set of Party given by the cassandra in/output, which is
a dictionary of values.
'''
round_id = uuid.uuid1() if not self.round_id else uuid.UUID(self.round_id)
Round.table.insert(round_id, CassaRoundSerializer(self).data)
self.round_id = str(round_id)
class CassaRoundSerializer(serializers.ModelSerializer):
'''
The Party serializer used to create a python dictionary for submitting to the
Cassandra database with the correct options.
'''
def transform_selector_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
def transform_game_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
def transform_phrase_card_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
class Meta:
model = Round
fields = ('selector_id', 'selection_id', 'game_id', 'phrase_card_id', 'date_created', 'last_modified')
class Nomination(CassaModel):
'''
The Nomination model to support the API.
'''
table = pycassa.ColumnFamily(POOL, 'nomination')
nomination_id = models.TextField(primary_key=True)
round_id = models.TextField()
nominator_id = models.TextField()
nomination_card_id = models.TextField()
date_created = models.DateTimeField()
last_modified = models.DateTimeField()
@staticmethod
def fromMap(mapRep):
'''
Creates a Nomination object from a map object with the properties.
'''
member = Nomination(**mapRep)
return member
@staticmethod
def fromCassa(cassRep):
'''
Creates a Nomination object from the tuple return from Cassandra.
'''
mapRep = {key : val for key, val in cassRep[1].iteritems()}
mapRep['nomination_id'] = str(cassRep[0])
return Nomination.fromMap(mapRep)
@staticmethod
def get(nomination_id=None):
'''
Method for getting a Nomination by id, party_id or user_id
'''
if nomination_id:
return Nomination.getByID(nomination_id)
return None
@staticmethod
def filter(round_id=None, nominator_id=None):
if round_id:
return Nomination.filterByRound(round_id)
if nominator_id:
return Nomination.filterByUser(nominator_id)
return None
@staticmethod
def getByID(nomination_id):
'''
Gets the Nomination given an ID.
@param nomination_id: The uuid of the nomination.
'''
if not isinstance(nomination_id, uuid.UUID):
nomination_id = uuid.UUID(nomination_id)
return Nomination.fromCassa((str(nomination_id), Nomination.table.get(nomination_id)))
@staticmethod
def filterByRound(round_id):
'''
Gets the nomination by the round.
'''
if not isinstance(round_id, uuid.UUID):
round_id = uuid.UUID(round_id)
expr = pycassa.create_index_expression('round_id', round_id)
clause = pycassa.create_index_clause([expr])
ans = list(Nomination.table.get_indexed_slices(clause))
return [Nomination.fromCassa(cassRep) for cassRep in ans]
def save(self):
'''
Saves a set of Nomination given by the cassandra in/output, which is
a dictionary of values.
'''
nomination_id = uuid.uuid1() if not self.nomination_id else uuid.UUID(self.nomination_id)
Nomination.table.insert(nomination_id, CassaNominationSerializer(self).data)
self.nomination_id = str(nomination_id)
class CassaNominationSerializer(serializers.ModelSerializer):
'''
The Party serializer used to create a python dictionary for submitting to the
Cassandra database with the correct options.
'''
def transform_round_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
def transform_nominator_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
def transform_nomination_card_id(self, obj, value):
if not isinstance(value, UUID):
return UUID(value)
return value
class Meta:
model = Nomination
fields = ('round_id', 'nominator_id', 'nomination_card_id', 'date_created', 'last_modified')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''A FlickrAPI interface.
The main functionality can be found in the `flickrapi.FlickrAPI`
class.
See `the FlickrAPI homepage`_ for more info.
.. _`the FlickrAPI homepage`: http://stuvel.eu/projects/flickrapi
'''
__version__ = '1.4.3'
__all__ = ('FlickrAPI', 'IllegalArgumentException', 'FlickrError',
'CancelUpload', 'XMLNode', 'set_log_level', '__version__')
__author__ = u'Sybren St\u00fcvel'.encode('utf-8')
# Copyright (c) 2007 by the respective coders, see
# http://www.stuvel.eu/projects/flickrapi
#
# This code is subject to the Python licence, as can be read on
# http://www.python.org/download/releases/2.5.2/license/
#
# For those without an internet connection, here is a summary. When this
# summary clashes with the Python licence, the latter will be applied.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import urllib
import urllib2
import os.path
import logging
import copy
import webbrowser
# Smartly import hashlib and fall back on md5
try: from hashlib import md5
except ImportError: from md5 import md5
from flickrapi.tokencache import TokenCache, SimpleTokenCache, \
LockingTokenCache
from flickrapi.xmlnode import XMLNode
from flickrapi.multipart import Part, Multipart, FilePart
from flickrapi.exceptions import *
from flickrapi.cache import SimpleCache
from flickrapi import reportinghttp
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
#LOG.setLevel(logging.DEBUG)
def make_utf8(dictionary):
'''Encodes all Unicode strings in the dictionary to UTF-8. Converts
all other objects to regular strings.
Returns a copy of the dictionary, doesn't touch the original.
'''
result = {}
for (key, value) in dictionary.iteritems():
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
value = str(value)
result[key] = value
return result
def debug(method):
'''Method decorator for debugging method calls.
Using this automatically sets the log level to DEBUG.
'''
LOG.setLevel(logging.DEBUG)
def debugged(*args, **kwargs):
LOG.debug("Call: %s(%s, %s)" % (method.__name__, args,
kwargs))
result = method(*args, **kwargs)
LOG.debug("\tResult: %s" % result)
return result
return debugged
# REST parsers, {format: parser_method, ...}. Fill by using the
# @rest_parser(format) function decorator
rest_parsers = {}
def rest_parser(format):
'''Method decorator, use this to mark a function as the parser for
REST as returned by Flickr.
'''
def decorate_parser(method):
rest_parsers[format] = method
return method
return decorate_parser
def require_format(required_format):
'''Method decorator, raises a ValueError when the decorated method
is called if the default format is not set to ``required_format``.
'''
def decorator(method):
def decorated(self, *args, **kwargs):
# If everything is okay, call the method
if self.default_format == required_format:
return method(self, *args, **kwargs)
# Otherwise raise an exception
msg = 'Function %s requires that you use ' \
'ElementTree ("etree") as the communication format, ' \
'while the current format is set to "%s".'
raise ValueError(msg % (method.func_name, self.default_format))
return decorated
return decorator
class FlickrAPI(object):
"""Encapsulates Flickr functionality.
Example usage::
flickr = flickrapi.FlickrAPI(api_key)
photos = flickr.photos_search(user_id='73509078@N00', per_page='10')
sets = flickr.photosets_getList(user_id='73509078@N00')
"""
flickr_host = "api.flickr.com"
flickr_rest_form = "/services/rest/"
flickr_auth_form = "/services/auth/"
flickr_upload_form = "/services/upload/"
flickr_replace_form = "/services/replace/"
def __init__(self, api_key, secret=None, username=None,
token=None, format='etree', store_token=True,
cache=False):
"""Construct a new FlickrAPI instance for a given API key
and secret.
api_key
The API key as obtained from Flickr.
secret
The secret belonging to the API key.
username
Used to identify the appropriate authentication token for a
certain user.
token
If you already have an authentication token, you can give
it here. It won't be stored on disk by the FlickrAPI instance.
format
The response format. Use either "xmlnode" or "etree" to get a parsed
response, or use any response format supported by Flickr to get an
unparsed response from method calls. It's also possible to pass the
``format`` parameter on individual calls.
store_token
Disables the on-disk token cache if set to False (default is True).
Use this to ensure that tokens aren't read nor written to disk, for
example in web applications that store tokens in cookies.
cache
Enables in-memory caching of FlickrAPI calls - set to ``True`` to
use. If you don't want to use the default settings, you can
instantiate a cache yourself too:
>>> f = FlickrAPI(api_key='123')
>>> f.cache = SimpleCache(timeout=5, max_entries=100)
"""
self.api_key = api_key
self.secret = secret
self.default_format = format
self.__handler_cache = {}
if token:
# Use a memory-only token cache
self.token_cache = SimpleTokenCache()
self.token_cache.token = token
elif not store_token:
# Use an empty memory-only token cache
self.token_cache = SimpleTokenCache()
else:
# Use a real token cache
self.token_cache = TokenCache(api_key, username)
if cache:
self.cache = SimpleCache()
else:
self.cache = None
def __repr__(self):
'''Returns a string representation of this object.'''
return '[FlickrAPI for key "%s"]' % self.api_key
__str__ = __repr__
def trait_names(self):
'''Returns a list of method names as supported by the Flickr
API. Used for tab completion in IPython.
'''
try:
rsp = self.reflection_getMethods(format='etree')
except FlickrError:
return None
def tr(name):
'''Translates Flickr names to something that can be called
here.
>>> tr(u'flickr.photos.getInfo')
u'photos_getInfo'
'''
return name[7:].replace('.', '_')
return [tr(m.text) for m in rsp.getiterator('method')]
@rest_parser('xmlnode')
def parse_xmlnode(self, rest_xml):
'''Parses a REST XML response from Flickr into an XMLNode object.'''
rsp = XMLNode.parse(rest_xml, store_xml=True)
if rsp['stat'] == 'ok':
return rsp
err = rsp.err[0]
raise FlickrError(u'Error: %(code)s: %(msg)s' % err)
@rest_parser('etree')
def parse_etree(self, rest_xml):
'''Parses a REST XML response from Flickr into an ElementTree object.'''
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
# For Python 2.4 compatibility:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
raise ImportError("You need to install "
"ElementTree for using the etree format")
rsp = ElementTree.fromstring(rest_xml)
if rsp.attrib['stat'] == 'ok':
return rsp
err = rsp.find('err')
raise FlickrError(u'Error: %(code)s: %(msg)s' % err.attrib)
@rest_parser('json')
def parse_json(self, json_string):
'''Parses a REST JSON response from Flickr into an dict object.'''
try:
import json as json
except ImportError:
raise ImportError("You need to be able to import"
"json for using the json format")
json_string = json_string[14:-1]
rsp = json.loads(json_string)
if rsp['stat'] == 'ok':
return rsp
err = rsp['err']
raise FlickrError(u'Error: %(code)s: %(msg)s' % err)
def sign(self, dictionary):
"""Calculate the flickr signature for a set of params.
data
a hash of all the params and values to be hashed, e.g.
``{"api_key":"AAAA", "auth_token":"TTTT", "key":
u"value".encode('utf-8')}``
"""
data = [self.secret]
for key in sorted(dictionary.keys()):
data.append(key)
datum = dictionary[key]
if isinstance(datum, unicode):
raise IllegalArgumentException("No Unicode allowed, "
"argument %s (%r) should have been UTF-8 by now"
% (key, datum))
data.append(datum)
md5_hash = md5(''.join(data))
return md5_hash.hexdigest()
def encode_and_sign(self, dictionary):
'''URL encodes the data in the dictionary, and signs it using the
given secret, if a secret was given.
'''
dictionary = make_utf8(dictionary)
if self.secret:
dictionary['api_sig'] = self.sign(dictionary)
return urllib.urlencode(dictionary)
def __getattr__(self, attrib):
"""Handle all the regular Flickr API calls.
Example::
flickr.auth_getFrob(api_key="AAAAAA")
etree = flickr.photos_getInfo(photo_id='1234')
etree = flickr.photos_getInfo(photo_id='1234', format='etree')
xmlnode = flickr.photos_getInfo(photo_id='1234', format='xmlnode')
json = flickr.photos_getInfo(photo_id='1234', format='json')
"""
# Refuse to act as a proxy for unimplemented special methods
if attrib.startswith('_'):
raise AttributeError("No such attribute '%s'" % attrib)
# Construct the method name and see if it's cached
method = "flickr." + attrib.replace("_", ".")
if method in self.__handler_cache:
return self.__handler_cache[method]
def handler(**args):
'''Dynamically created handler for a Flickr API call'''
if self.token_cache.token and not self.secret:
raise ValueError("Auth tokens cannot be used without "
"API secret")
# Set some defaults
defaults = {'method': method,
'auth_token': self.token_cache.token,
'api_key': self.api_key,
'format': self.default_format}
args = self.__supply_defaults(args, defaults)
return self.__wrap_in_parser(self.__flickr_call,
parse_format=args['format'], **args)
handler.method = method
self.__handler_cache[method] = handler
return handler
def __supply_defaults(self, args, defaults):
'''Returns a new dictionary containing ``args``, augmented with defaults
from ``defaults``.
Defaults can be overridden, or completely removed by setting the
appropriate value in ``args`` to ``None``.
>>> f = FlickrAPI('123')
>>> f._FlickrAPI__supply_defaults(
... {'foo': 'bar', 'baz': None, 'token': None},
... {'baz': 'foobar', 'room': 'door'})
{'foo': 'bar', 'room': 'door'}
'''
result = args.copy()
for key, default_value in defaults.iteritems():
# Set the default if the parameter wasn't passed
if key not in args:
result[key] = default_value
for key, value in result.copy().iteritems():
# You are able to remove a default by assigning None, and we can't
# pass None to Flickr anyway.
if result[key] is None:
del result[key]
return result
def __flickr_call(self, **kwargs):
'''Performs a Flickr API call with the given arguments. The method name
itself should be passed as the 'method' parameter.
Returns the unparsed data from Flickr::
data = self.__flickr_call(method='flickr.photos.getInfo',
photo_id='123', format='rest')
'''
LOG.debug("Calling %s" % kwargs)
post_data = self.encode_and_sign(kwargs)
# Return value from cache if available
if self.cache and self.cache.get(post_data):
return self.cache.get(post_data)
url = "https://" + self.flickr_host + self.flickr_rest_form
flicksocket = urllib2.urlopen(url, post_data)
reply = flicksocket.read()
flicksocket.close()
# Store in cache, if we have one
if self.cache is not None:
self.cache.set(post_data, reply)
return reply
def __wrap_in_parser(self, wrapped_method, parse_format, *args, **kwargs):
'''Wraps a method call in a parser.
The parser will be looked up by the ``parse_format`` specifier. If there
is a parser and ``kwargs['format']`` is set, it's set to ``rest``, and
the response of the method is parsed before it's returned.
'''
# Find the parser, and set the format to rest if we're supposed to
# parse it.
if parse_format in rest_parsers and 'format' in kwargs:
kwargs['format'] = 'rest'
if parse_format == "json":
kwargs['format'] = 'json'
LOG.debug('Wrapping call %s(self, %s, %s)' % (wrapped_method, args,
kwargs))
data = wrapped_method(*args, **kwargs)
# Just return if we have no parser
if parse_format not in rest_parsers:
return data
# Return the parsed data
parser = rest_parsers[parse_format]
return parser(self, data)
def auth_url(self, perms, frob):
"""Return the authorization URL to get a token.
This is the URL the app will launch a browser toward if it
needs a new token.
perms
"read", "write", or "delete"
frob
picked up from an earlier call to FlickrAPI.auth_getFrob()
"""
encoded = self.encode_and_sign({
"api_key": self.api_key,
"frob": frob,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def web_login_url(self, perms):
'''Returns the web login URL to forward web users to.
perms
"read", "write", or "delete"
'''
encoded = self.encode_and_sign({
"api_key": self.api_key,
"perms": perms})
return "http://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def __extract_upload_response_format(self, kwargs):
'''Returns the response format given in kwargs['format'], or
the default format if there is no such key.
If kwargs contains 'format', it is removed from kwargs.
If the format isn't compatible with Flickr's upload response
type, a FlickrError exception is raised.
'''
# Figure out the response format
format = kwargs.get('format', self.default_format)
if format not in rest_parsers and format != 'rest':
raise FlickrError('Format %s not supported for uploading '
'photos' % format)
# The format shouldn't be used in the request to Flickr.
if 'format' in kwargs:
del kwargs['format']
return format
def upload(self, filename, callback=None, **kwargs):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
filename
name of a file to upload
callback
method that gets progress reports
title
title of the photo
description
description a.k.a. caption of the photo
tags
space-delimited list of tags, ``'''tag1 tag2 "long
tag"'''``
is_public
"1" or "0" for a public resp. private photo
is_friend
"1" or "0" whether friends can see the photo while it's
marked as private
is_family
"1" or "0" whether family can see the photo while it's
marked as private
content_type
Set to "1" for Photo, "2" for Screenshot, or "3" for Other.
hidden
Set to "1" to keep the photo in global search results, "2"
to hide from public searches.
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST.
The callback method should take two parameters:
``def callback(progress, done)``
Progress is a number between 0 and 100, and done is a boolean
that's true only when the upload is done.
"""
return self.__upload_to_form(self.flickr_upload_form,
filename, callback, **kwargs)
def replace(self, filename, photo_id, callback=None, **kwargs):
"""Replace an existing photo.
Supported parameters:
filename
name of a file to upload
photo_id
the ID of the photo to replace
callback
method that gets progress reports
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST. Defaults to the
format passed to the constructor.
The callback parameter has the same semantics as described in the
``upload`` function.
"""
if not photo_id:
raise IllegalArgumentException("photo_id must be specified")
kwargs['photo_id'] = photo_id
return self.__upload_to_form(self.flickr_replace_form,
filename, callback, **kwargs)
def __upload_to_form(self, form_url, filename, callback, **kwargs):
'''Uploads a photo - can be used to either upload a new photo
or replace an existing one.
form_url must be either ``FlickrAPI.flickr_replace_form`` or
``FlickrAPI.flickr_upload_form``.
'''
if not filename:
raise IllegalArgumentException("filename must be specified")
if not self.token_cache.token:
raise IllegalArgumentException("Authentication is required")
# Figure out the response format
format = self.__extract_upload_response_format(kwargs)
# Update the arguments with the ones the user won't have to supply
arguments = {'auth_token': self.token_cache.token,
'api_key': self.api_key}
arguments.update(kwargs)
# Convert to UTF-8 if an argument is an Unicode string
kwargs = make_utf8(arguments)
if self.secret:
kwargs["api_sig"] = self.sign(kwargs)
url = "http://%s%s" % (self.flickr_host, form_url)
# construct POST data
body = Multipart()
for arg, value in kwargs.iteritems():
part = Part({'name': arg}, value)
body.attach(part)
filepart = FilePart({'name': 'photo'}, filename, 'image/jpeg')
body.attach(filepart)
return self.__wrap_in_parser(self.__send_multipart, format,
url, body, callback)
def __send_multipart(self, url, body, progress_callback=None):
'''Sends a Multipart object to an URL.
Returns the resulting unparsed XML from Flickr.
'''
LOG.debug("Uploading to %s" % url)
request = urllib2.Request(url)
request.add_data(str(body))
(header, value) = body.header()
request.add_header(header, value)
if not progress_callback:
# Just use urllib2 if there is no progress callback
# function
response = urllib2.urlopen(request)
return response.read()
def __upload_callback(percentage, done, seen_header=[False]):
'''Filters out the progress report on the HTTP header'''
# Call the user's progress callback when we've filtered
# out the HTTP header
if seen_header[0]:
return progress_callback(percentage, done)
# Remember the first time we hit 'done'.
if done:
seen_header[0] = True
response = reportinghttp.urlopen(request, __upload_callback)
return response.read()
def validate_frob(self, frob, perms):
'''Lets the user validate the frob by launching a browser to
the Flickr website.
'''
auth_url = self.auth_url(perms, frob)
try:
browser = webbrowser.get()
except webbrowser.Error:
if 'BROWSER' not in os.environ:
raise
browser = webbrowser.GenericBrowser(os.environ['BROWSER'])
browser.open(auth_url, True, True)
def get_token_part_one(self, perms="read", auth_callback=None):
"""Get a token either from the cache, or make a new one from
the frob.
This first attempts to find a token in the user's token cache
on disk. If that token is present and valid, it is returned by
the method.
If that fails (or if the token is no longer valid based on
flickr.auth.checkToken) a new frob is acquired. If an auth_callback
method has been specified it will be called. Otherwise the frob is
validated by having the user log into flickr (with a browser).
To get a proper token, follow these steps:
- Store the result value of this method call
- Give the user a way to signal the program that he/she
has authorized it, for example show a button that can be
pressed.
- Wait for the user to signal the program that the
authorization was performed, but only if there was no
cached token.
- Call flickrapi.get_token_part_two(...) and pass it the
result value you stored.
The newly minted token is then cached locally for the next
run.
perms
"read", "write", or "delete"
auth_callback
method to be called if authorization is needed. When not
passed, ``self.validate_frob(...)`` is called. You can
call this method yourself from the callback method too.
If authorization should be blocked, pass
``auth_callback=False``.
The auth_callback method should take ``(frob, perms)`` as
parameters.
An example::
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
Also take a look at ``authenticate_console(perms)``.
"""
# Check our auth_callback parameter for correctness before we
# do anything
authenticate = self.validate_frob
if auth_callback is not None:
if hasattr(auth_callback, '__call__'):
# use the provided callback function
authenticate = auth_callback
elif auth_callback is False:
authenticate = None
else:
# Any non-callable non-False value is invalid
raise ValueError('Invalid value for auth_callback: %s'
% auth_callback)
# see if we have a saved token
token = self.token_cache.token
frob = None
# see if it's valid
if token:
LOG.debug("Trying cached token '%s'" % token)
try:
rsp = self.auth_checkToken(auth_token=token, format='xmlnode')
# see if we have enough permissions
tokenPerms = rsp.auth[0].perms[0].text
if tokenPerms == "read" and perms != "read": token = None
elif tokenPerms == "write" and perms == "delete": token = None
except FlickrError:
LOG.debug("Cached token invalid")
self.token_cache.forget()
token = None
# get a new token if we need one
if not token:
# If we can't authenticate, it's all over.
if not authenticate:
raise FlickrError('Authentication required but '
'blocked using auth_callback=False')
# get the frob
LOG.debug("Getting frob for new token")
rsp = self.auth_getFrob(auth_token=None, format='xmlnode')
frob = rsp.frob[0].text
authenticate(frob, perms)
return (token, frob)
def get_token_part_two(self, (token, frob)):
"""Part two of getting a token, see ``get_token_part_one(...)`` for details."""
# If a valid token was obtained in the past, we're done
if token:
LOG.debug("get_token_part_two: no need, token already there")
self.token_cache.token = token
return token
LOG.debug("get_token_part_two: getting a new token for frob '%s'" % frob)
return self.get_token(frob)
def get_token(self, frob):
'''Gets the token given a certain frob. Used by ``get_token_part_two`` and
by the web authentication method.
'''
# get a token
rsp = self.auth_getToken(frob=frob, auth_token=None, format='xmlnode')
token = rsp.auth[0].token[0].text
LOG.debug("get_token: new token '%s'" % token)
# store the auth info for next time
self.token_cache.token = token
return token
def authenticate_console(self, perms='read', auth_callback=None):
'''Performs the authentication, assuming a console program.
Gets the token, if needed starts the browser and waits for the user to
press ENTER before continuing.
See ``get_token_part_one(...)`` for an explanation of the
parameters.
'''
(token, frob) = self.get_token_part_one(perms, auth_callback)
if not token: raw_input("Press ENTER after you authorized this program")
self.get_token_part_two((token, frob))
@require_format('etree')
def __data_walker(self, method, **params):
'''Calls 'method' with page=0, page=1 etc. until the total
number of pages has been visited. Yields the photos
returned.
Assumes that ``method(page=n, **params).findall('*/photos')``
results in a list of photos, and that the toplevel element of
the result contains a 'pages' attribute with the total number
of pages.
'''
page = 1
total = 1 # We don't know that yet, update when needed
while page <= total:
# Fetch a single page of photos
LOG.debug('Calling %s(page=%i of %i, %s)' %
(method.func_name, page, total, params))
rsp = method(page=page, **params)
photoset = rsp.getchildren()[0]
total = int(photoset.get('pages'))
photos = rsp.findall('*/photo')
# Yield each photo
for photo in photos:
yield photo
# Ready to get the next page
page += 1
@require_format('etree')
def walk_set(self, photoset_id, per_page=50, **kwargs):
'''walk_set(self, photoset_id, per_page=50, ...) -> \
generator, yields each photo in a single set.
:Parameters:
photoset_id
the photoset ID
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photosets.getPhotos_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photosets.getPhotos:
http://www.flickr.com/services/api/flickr.photosets.getPhotos.html
Uses the ElementTree format, incompatible with other formats.
'''
return self.__data_walker(self.photosets_getPhotos,
photoset_id=photoset_id, per_page=per_page, **kwargs)
@require_format('etree')
def walk(self, per_page=50, **kwargs):
'''walk(self, user_id=..., tags=..., ...) -> generator, \
yields each photo in a search query result
Accepts the same parameters as flickr.photos.search_ API call,
except for ``page`` because all pages will be returned
eventually.
.. _flickr.photos.search:
http://www.flickr.com/services/api/flickr.photos.search.html
Also see `walk_set`.
'''
return self.__data_walker(self.photos_search,
per_page=per_page, **kwargs)
def set_log_level(level):
'''Sets the log level of the logger used by the FlickrAPI module.
>>> import flickrapi
>>> import logging
>>> flickrapi.set_log_level(logging.INFO)
'''
import flickrapi.tokencache
LOG.setLevel(level)
flickrapi.tokencache.LOG.setLevel(level)
if __name__ == "__main__":
print "Running doctests"
import doctest
doctest.testmod()
print "Tests OK"
| |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The workflow to manipulate an AlertGroup.
We want to separate the workflow from data model. So workflow
is used to implement all AlertGroup's state transitions.
A typical workflow includes these steps:
- Associate anomalies to an AlertGroup
- Update related issues
- Trigger auto-triage if necessary
- Trigger auto-bisection if necessary
- Manage an AlertGroup's lifecycle
`AlertGroupWorkflow(group).Process()` is enough for most of use cases.
But it provides the ability to mock any input and any service, which makes
testing easier and we can have a more predictable behaviour.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import datetime
import itertools
import jinja2
import logging
import os
import six
from google.appengine.ext import ndb
from dashboard import pinpoint_request
from dashboard import sheriff_config_client
from dashboard import revision_info_client
from dashboard.common import file_bug
from dashboard.common import utils
from dashboard.models import alert_group
from dashboard.models import anomaly
from dashboard.models import subscription
from dashboard.services import crrev_service
from dashboard.services import gitiles_service
from dashboard.services import issue_tracker_service
from dashboard.services import pinpoint_service
# Templates used for rendering issue contents
_TEMPLATE_LOADER = jinja2.FileSystemLoader(
searchpath=os.path.join(os.path.dirname(os.path.realpath(__file__))))
_TEMPLATE_ENV = jinja2.Environment(loader=_TEMPLATE_LOADER)
_TEMPLATE_ISSUE_TITLE = jinja2.Template(
'[{{ group.subscription_name }}]: '
'{{ regressions|length }} regressions in {{ group.name }}')
_TEMPLATE_ISSUE_CONTENT = _TEMPLATE_ENV.get_template(
'alert_groups_bug_description.j2')
_TEMPLATE_ISSUE_COMMENT = _TEMPLATE_ENV.get_template(
'alert_groups_bug_comment.j2')
_TEMPLATE_REOPEN_COMMENT = _TEMPLATE_ENV.get_template('reopen_issue_comment.j2')
_TEMPLATE_AUTO_BISECT_COMMENT = _TEMPLATE_ENV.get_template(
'auto_bisect_comment.j2')
_TEMPLATE_GROUP_WAS_MERGED = _TEMPLATE_ENV.get_template(
'alert_groups_merge_bug_comment.j2')
# Waiting 7 days to gather more potential alerts. Just choose a long
# enough time and all alerts arrive after archived shouldn't be silent
# merged.
_ALERT_GROUP_ACTIVE_WINDOW = datetime.timedelta(days=7)
# (2020-05-01) Only ~62% issues' alerts are triggered in one hour.
# But we don't want to wait all these long tail alerts finished.
# 20 minutes are enough for a single bot.
#
# SELECT APPROX_QUANTILES(diff, 100) as percentiles
# FROM (
# SELECT TIMESTAMP_DIFF(MAX(timestamp), MIN(timestamp), MINUTE) as diff
# FROM chromeperf.chromeperf_dashboard_data.anomalies
# WHERE 'Chromium Perf Sheriff' IN UNNEST(subscription_names)
# AND bug_id IS NOT NULL AND timestamp > '2020-03-01'
# GROUP BY bug_id
# )
_ALERT_GROUP_TRIAGE_DELAY = datetime.timedelta(minutes=20)
# The score is based on overall 60% reproduction rate of pinpoint bisection.
_ALERT_GROUP_DEFAULT_SIGNAL_QUALITY_SCORE = 0.6
class SignalQualityScore(ndb.Model):
score = ndb.FloatProperty()
updated_time = ndb.DateTimeProperty()
class InvalidPinpointRequest(Exception):
pass
class AlertGroupWorkflow(object):
"""Workflow used to manipulate the AlertGroup.
Workflow will assume the group passed from caller is same as the group in
datastore. It may update the group in datastore multiple times during the
process.
"""
class Config(
collections.namedtuple('WorkflowConfig',
('active_window', 'triage_delay'))):
__slots__ = ()
class GroupUpdate(
collections.namedtuple('GroupUpdate',
('now', 'anomalies', 'issue', 'canonical_group'))):
__slots__ = ()
def __new__(cls, now, anomalies, issue, canonical_group=None):
return super(AlertGroupWorkflow.GroupUpdate,
cls).__new__(cls, now, anomalies, issue, canonical_group)
class BenchmarkDetails(
collections.namedtuple('BenchmarkDetails',
('name', 'owners', 'regressions', 'info_blurb'))):
__slots__ = ()
class BugUpdateDetails(
collections.namedtuple('BugUpdateDetails',
('components', 'cc', 'labels'))):
__slots__ = ()
def __init__(
self,
group,
config=None,
sheriff_config=None,
issue_tracker=None,
pinpoint=None,
crrev=None,
gitiles=None,
revision_info=None,
service_account=None,
):
self._group = group
self._config = config or self.Config(
active_window=_ALERT_GROUP_ACTIVE_WINDOW,
triage_delay=_ALERT_GROUP_TRIAGE_DELAY,
)
self._sheriff_config = (
sheriff_config or sheriff_config_client.GetSheriffConfigClient())
self._issue_tracker = issue_tracker or _IssueTracker()
self._pinpoint = pinpoint or pinpoint_service
self._crrev = crrev or crrev_service
self._gitiles = gitiles or gitiles_service
self._revision_info = revision_info or revision_info_client
self._service_account = service_account or utils.ServiceAccountEmail
def _FindCanonicalGroup(self, issue):
"""Finds the canonical issue group if any.
Args:
issue: Monorail API issue json. If the issue has any comments the json
should contain additional 'comments' key with the list of Monorail API
comments jsons.
Returns:
AlertGroup object or None if the issue is not duplicate, canonical issue
has no corresponding group or duplicate chain forms a loop.
"""
if issue.get('status') != issue_tracker_service.STATUS_DUPLICATE:
return None
merged_into = None
latest_id = 0
for comment in issue.get('comments', []):
if comment['updates'].get('mergedInto') and comment['id'] >= latest_id:
merged_into = int(comment['updates'].get('mergedInto'))
latest_id = comment['id']
if not merged_into:
return None
logging.info('Found canonical issue for the groups\' issue: %d',
merged_into)
query = alert_group.AlertGroup.query(
alert_group.AlertGroup.active == True,
# It is impossible to merge bugs from different projects in monorail.
# So the canonical group bug is guarandeed to have the same project.
alert_group.AlertGroup.bug.project == self._group.bug.project,
alert_group.AlertGroup.bug.bug_id == merged_into)
query_result = query.fetch(limit=1)
if not query_result:
return None
canonical_group = query_result[0]
visited = set()
while canonical_group.canonical_group:
visited.add(canonical_group.key)
next_group_key = canonical_group.canonical_group
# Visited check is just precaution.
# If it is true - the system previously failed to prevent loop creation.
if next_group_key == self._group.key or next_group_key in visited:
logging.warning(
'Alert group auto merge failed. Found a loop while '
'searching for a canonical group for %r', self._group)
return None
canonical_group = next_group_key.get()
logging.info('Found canonical group: %s', canonical_group.key.string_id())
return canonical_group
def _FindDuplicateGroups(self):
query = alert_group.AlertGroup.query(
alert_group.AlertGroup.active == True,
alert_group.AlertGroup.canonical_group == self._group.key)
return query.fetch()
def _FindRelatedAnomalies(self, groups):
query = anomaly.Anomaly.query(
anomaly.Anomaly.groups.IN([g.key for g in groups]))
return query.fetch()
def _PrepareGroupUpdate(self):
"""Prepares default input for the workflow Process
Returns:
GroupUpdate object that contains list of related anomalies,
Monorail API issue json and canonical AlertGroup if any.
"""
duplicate_groups = self._FindDuplicateGroups()
anomalies = self._FindRelatedAnomalies([self._group] + duplicate_groups)
now = datetime.datetime.utcnow()
issue = None
canonical_group = None
if self._group.status in {
self._group.Status.triaged, self._group.Status.bisected,
self._group.Status.closed
}:
issue = self._issue_tracker.GetIssue(
self._group.bug.bug_id, project=self._group.bug.project)
# GetIssueComments doesn't work with empty project id so we have to
# manually replace it with 'chromium'.
issue['comments'] = self._issue_tracker.GetIssueComments(
self._group.bug.bug_id, project=self._group.bug.project or 'chromium')
canonical_group = self._FindCanonicalGroup(issue)
return self.GroupUpdate(now, anomalies, issue, canonical_group)
def Process(self, update=None):
"""Process the workflow.
The workflow promises to only depend on the provided update and injected
dependencies. The workflow steps will always be reproducible if all the
inputs are the same.
Process will always update the group and store once the steps have
completed.
The update argument can be a prepared GroupUpdate instance or None (if
None, then Process will prepare the update itself).
Returns the key for the associated group when the workflow was
initialized."""
logging.info('Processing workflow for group %s', self._group.key)
update = update or self._PrepareGroupUpdate()
logging.info('%d anomalies', len(update.anomalies))
# TODO(crbug.com/1240370): understand why Datastore query may return empty
# anomalies list.
if (not update.anomalies and self._group.anomalies
and self._group.group_type != alert_group.AlertGroup.Type.reserved):
logging.error('No anomailes detected. Skipping this run.')
return self._group.key
# Process input before we start processing the group.
for a in update.anomalies:
subscriptions, _ = self._sheriff_config.Match(
a.test.string_id(), check=True)
a.subscriptions = subscriptions
matching_subs = [
s for s in subscriptions if s.name == self._group.subscription_name
]
a.auto_triage_enable = any(s.auto_triage_enable for s in matching_subs)
if a.auto_triage_enable:
logging.info('auto_triage_enable for %s due to subscription: %s',
a.test.string_id(),
[s.name for s in matching_subs if s.auto_triage_enable])
a.auto_merge_enable = any(s.auto_merge_enable for s in matching_subs)
if a.auto_merge_enable:
logging.info('auto_merge_enable for %s due to subscription: %s',
a.test.string_id(),
[s.name for s in matching_subs if s.auto_merge_enable])
a.auto_bisect_enable = any(s.auto_bisect_enable for s in matching_subs)
a.relative_delta = (
abs(a.absolute_delta / float(a.median_before_anomaly))
if a.median_before_anomaly != 0. else float('Inf'))
added = self._UpdateAnomalies(update.anomalies)
if update.issue:
group_merged = self._UpdateCanonicalGroup(update.anomalies,
update.canonical_group)
self._UpdateStatus(update.issue)
self._UpdateAnomaliesIssues(update.anomalies, update.canonical_group)
# Current group is a duplicate.
if self._group.canonical_group is not None:
if group_merged:
logging.info('Merged group %s into group %s',
self._group.key.string_id(),
update.canonical_group.key.string_id())
self._FileDuplicatedNotification(update.canonical_group)
self._UpdateDuplicateIssue(update.anomalies, added)
assert (self._group.status == self._group.Status.closed), (
'The issue is closed as duplicate (\'state\' is \'closed\'). '
'However the groups\' status doesn\'t match the issue status')
elif self._UpdateIssue(update.issue, update.anomalies, added):
# Only operate on alert group if nothing updated to prevent flooding
# monorail if some operations keep failing.
return self._CommitGroup()
group = self._group
if group.updated + self._config.active_window <= update.now:
self._Archive()
elif group.created + self._config.triage_delay <= update.now and (
group.status in {group.Status.untriaged}):
logging.info('created: %s, triage_delay: %s", now: %s, status: %s',
group.created, self._config.triage_delay, update.now,
group.status)
self._TryTriage(update.now, update.anomalies)
elif group.status in {group.Status.triaged}:
self._TryBisect(update)
return self._CommitGroup()
def _CommitGroup(self):
return self._group.put()
def _UpdateAnomalies(self, anomalies):
added = [a for a in anomalies if a.key not in self._group.anomalies]
self._group.anomalies = [a.key for a in anomalies]
return added
def _UpdateStatus(self, issue):
if issue.get('state') == 'closed':
self._group.status = self._group.Status.closed
elif self._group.status == self._group.Status.closed:
self._group.status = self._group.Status.triaged
def _UpdateCanonicalGroup(self, anomalies, canonical_group):
# If canonical_group is None, self._group will be separated from its'
# canonical group. Since we only rely on _group.canonical_group for
# determining duplicate status, setting canonical_group to None will
# separate the groups. Anomalies that were added to the canonical group
# during merged perios can't be removed.
if canonical_group is None:
self._group.canonical_group = None
return False
# Only merge groups if there is at least one anomaly that allows merge.
if (self._group.canonical_group != canonical_group.key
and any(a.auto_merge_enable for a in anomalies)):
self._group.canonical_group = canonical_group.key
return True
return False
def _UpdateAnomaliesIssues(self, anomalies, canonical_group):
for a in anomalies:
if not a.auto_triage_enable:
continue
if canonical_group is not None and a.auto_merge_enable:
a.project_id = canonical_group.project_id
a.bug_id = canonical_group.bug.bug_id
elif a.bug_id is None:
a.project_id = self._group.project_id
a.bug_id = self._group.bug.bug_id
# Write back bug_id to anomalies. We can't do it when anomaly is
# found because group may be updating at the same time.
ndb.put_multi(anomalies)
def _UpdateIssue(self, issue, anomalies, added):
"""Update the status of the monorail issue.
Returns True if the issue was changed.
"""
# Check whether all the anomalies associated have been marked recovered.
if all(a.recovered for a in anomalies if not a.is_improvement):
if issue.get('state') == 'open':
self._CloseBecauseRecovered()
return True
new_regressions, subscriptions = self._GetRegressions(added)
all_regressions, _ = self._GetRegressions(anomalies)
# Only update issue if there is at least one regression
if not new_regressions:
return False
closed_by_pinpoint = False
for c in sorted(
issue.get('comments') or [], key=lambda c: c["id"], reverse=True):
if c.get('updates', {}).get('status') in ('WontFix', 'Fixed', 'Verified',
'Invalid', 'Duplicate', 'Done'):
closed_by_pinpoint = (c.get('author') == self._service_account())
break
has_new_regression = any(a.auto_bisect_enable
for a in anomalies
if not a.is_improvement and not a.recovered)
if (issue.get('state') == 'closed' and closed_by_pinpoint
and has_new_regression):
self._ReopenWithNewRegressions(all_regressions, new_regressions,
subscriptions)
else:
self._FileNormalUpdate(all_regressions, new_regressions, subscriptions)
return True
def _UpdateDuplicateIssue(self, anomalies, added):
new_regressions, subscriptions = self._GetRegressions(added)
all_regressions, _ = self._GetRegressions(anomalies)
# Only update issue if there is at least one regression
if not new_regressions:
return
self._FileNormalUpdate(
all_regressions,
new_regressions,
subscriptions,
new_regression_notification=False)
def _CloseBecauseRecovered(self):
self._issue_tracker.AddBugComment(
self._group.bug.bug_id,
'All regressions for this issue have been marked recovered; closing.',
status='WontFix',
labels='Chromeperf-Auto-Closed',
project=self._group.project_id,
send_email=False,
)
def _ReopenWithNewRegressions(self, all_regressions, added, subscriptions):
summary = _TEMPLATE_ISSUE_TITLE.render(
self._GetTemplateArgs(all_regressions))
comment = _TEMPLATE_REOPEN_COMMENT.render(self._GetTemplateArgs(added))
components, cc, _ = self._ComputeBugUpdate(subscriptions, added)
self._issue_tracker.AddBugComment(
self._group.bug.bug_id,
comment,
summary=summary,
components=components,
labels=['Chromeperf-Auto-Reopened'],
status='Unconfirmed',
cc_list=cc,
project=self._group.project_id,
send_email=False,
)
def _FileNormalUpdate(self,
all_regressions,
added,
subscriptions,
new_regression_notification=True):
summary = _TEMPLATE_ISSUE_TITLE.render(
self._GetTemplateArgs(all_regressions))
comment = None
if new_regression_notification:
comment = _TEMPLATE_ISSUE_COMMENT.render(self._GetTemplateArgs(added))
components, cc, labels = self._ComputeBugUpdate(subscriptions, added)
self._issue_tracker.AddBugComment(
self._group.bug.bug_id,
comment,
summary=summary,
labels=labels,
cc_list=cc,
components=components,
project=self._group.project_id,
send_email=False,
)
def _FileDuplicatedNotification(self, canonical_group):
comment = _TEMPLATE_GROUP_WAS_MERGED.render({
'group': self._group,
'canonical_group': canonical_group,
})
self._issue_tracker.AddBugComment(
self._group.bug.bug_id,
comment,
project=self._group.project_id,
send_email=False,
)
def _GetRegressions(self, anomalies):
regressions = []
subscriptions_dict = {}
for a in anomalies:
# This logging is just for debugging
# https://bugs.chromium.org/p/chromium/issues/detail?id=1223401
# in production since I can't reproduce it in unit tests. One theory I
# have is that there's a bug in this part of the code, where
# details of one anomaly's subscription get replaced with another
# anomaly's subscription.
for s in a.subscriptions:
if (s.name in subscriptions_dict and s.auto_triage_enable !=
subscriptions_dict[s.name].auto_triage_enable):
logging.warning('altered merged auto_triage_enable: %s', s.name)
subscriptions_dict.update({s.name: s for s in a.subscriptions})
if not a.is_improvement and not a.recovered:
regressions.append(a)
return (regressions, list(subscriptions_dict.values()))
@classmethod
def _GetBenchmarksFromRegressions(cls, regressions):
benchmarks_dict = dict()
for regression in regressions:
name = regression.benchmark_name
emails = []
info_blurb = None
if regression.ownership:
emails = regression.ownership.get('emails') or []
info_blurb = regression.ownership.get('info_blurb') or ''
benchmark = benchmarks_dict.get(
name, cls.BenchmarkDetails(name, list(set(emails)), list(),
info_blurb))
benchmark.regressions.append(regression)
benchmarks_dict[name] = benchmark
return list(benchmarks_dict.values())
def _ComputeBugUpdate(self, subscriptions, regressions):
components = list(
set(c for s in subscriptions for c in s.bug_components)
| self._GetComponentsFromRegressions(regressions))
cc = list(set(e for s in subscriptions for e in s.bug_cc_emails))
labels = list(
set(l for s in subscriptions for l in s.bug_labels)
| {'Chromeperf-Auto-Triaged'})
# We layer on some default labels if they don't conflict with any of the
# provided ones.
if not any(l.startswith('Pri-') for l in labels):
labels.append('Pri-2')
if not any(l.startswith('Type-') for l in labels):
labels.append('Type-Bug-Regression')
if any(s.visibility == subscription.VISIBILITY.INTERNAL_ONLY
for s in subscriptions):
labels = list(set(labels) | {'Restrict-View-Google'})
return self.BugUpdateDetails(components, cc, labels)
@staticmethod
def _GetComponentsFromRegressions(regressions):
components = []
for r in regressions:
component = r.ownership and r.ownership.get('component')
if not component:
continue
if isinstance(component, list) and component:
components.append(component[0])
elif component:
components.append(component)
return set(components)
def _GetTemplateArgs(self, regressions):
# Preparing template arguments used in rendering issue's title and content.
regressions.sort(key=lambda x: x.relative_delta, reverse=True)
benchmarks = self._GetBenchmarksFromRegressions(regressions)
return {
# Current AlertGroup used for rendering templates
'group': self._group,
# Performance regressions sorted by relative difference
'regressions': regressions,
# Benchmarks that occur in regressions, including names, owners, and
# information blurbs.
'benchmarks': benchmarks,
# Parse the real unit (remove things like smallerIsBetter)
'parse_unit': lambda s: (s or '').rsplit('_', 1)[0],
}
def _Archive(self):
self._group.active = False
def _TryTriage(self, now, anomalies):
bug, anomalies = self._FileIssue(anomalies)
if not bug:
return
# Update the issue associated with his group, before we continue.
self._group.bug = bug
self._group.updated = now
self._group.status = self._group.Status.triaged
self._CommitGroup()
# Link the bug to auto-triage enabled anomalies.
for a in anomalies:
if a.bug_id is None and a.auto_triage_enable:
a.project_id = bug.project
a.bug_id = bug.bug_id
ndb.put_multi(anomalies)
def _AssignIssue(self, regression):
commit_info = file_bug.GetCommitInfoForAlert(regression, self._crrev,
self._gitiles)
if not commit_info:
return False
assert self._group.bug is not None
file_bug.AssignBugToCLAuthor(
self._group.bug.bug_id,
commit_info,
self._issue_tracker,
labels=['Chromeperf-Auto-Assigned'],
project=self._group.project_id)
return True
def _TryBisect(self, update):
if (update.issue
and 'Chromeperf-Auto-BisectOptOut' in update.issue.get('labels')):
return
try:
regressions, _ = self._GetRegressions(update.anomalies)
regression = self._SelectAutoBisectRegression(regressions)
# Do nothing if none of the regressions should be auto-bisected.
if regression is None:
return
# We'll only bisect a range if the range at least one point.
if regression.start_revision == regression.end_revision:
# At this point we've decided that the range of the commits is a single
# point, so we don't bother bisecting.
if not self._AssignIssue(regression):
self._UpdateWithBisectError(
update.now, 'Cannot find assignee for regression at %s.' %
(regression.end_revision,))
else:
self._group.updated = update.now
self._group.status = self._group.Status.bisected
self._CommitGroup()
return
job_id = self._StartPinpointBisectJob(regression)
except InvalidPinpointRequest as error:
self._UpdateWithBisectError(update.now, error)
return
# Update the issue associated with his group, before we continue.
self._group.bisection_ids.append(job_id)
self._group.updated = update.now
self._group.status = self._group.Status.bisected
self._CommitGroup()
self._issue_tracker.AddBugComment(
self._group.bug.bug_id,
_TEMPLATE_AUTO_BISECT_COMMENT.render(
{'test': utils.TestPath(regression.test)}),
labels=['Chromeperf-Auto-Bisected'],
project=self._group.project_id,
send_email=False,
)
regression.pinpoint_bisects.append(job_id)
regression.put()
def _FileIssue(self, anomalies):
regressions, subscriptions = self._GetRegressions(anomalies)
# Only file a issue if there is at least one regression
# We can't use subsciptions' auto_triage_enable here because it's
# merged across anomalies.
if not any(r.auto_triage_enable for r in regressions):
return None, []
auto_triage_regressions = []
for r in regressions:
if r.auto_triage_enable:
auto_triage_regressions.append(r)
logging.info('auto_triage_enabled due to %s', auto_triage_regressions)
template_args = self._GetTemplateArgs(regressions)
top_regression = template_args['regressions'][0]
template_args['revision_infos'] = self._revision_info.GetRangeRevisionInfo(
top_regression.test,
top_regression.start_revision,
top_regression.end_revision,
)
# Rendering issue's title and content
title = _TEMPLATE_ISSUE_TITLE.render(template_args)
description = _TEMPLATE_ISSUE_CONTENT.render(template_args)
# Fetching issue labels, components and cc from subscriptions and owner
components, cc, labels = self._ComputeBugUpdate(subscriptions, regressions)
logging.info('Creating a new issue for AlertGroup %s', self._group.key)
response = self._issue_tracker.NewBug(
title,
description,
labels=labels,
components=components,
cc=cc,
project=self._group.project_id)
if 'error' in response:
logging.warning('AlertGroup file bug failed: %s', response['error'])
return None, []
# Update the issue associated witht his group, before we continue.
return alert_group.BugInfo(
project=self._group.project_id,
bug_id=response['bug_id'],
), anomalies
def _StartPinpointBisectJob(self, regression):
try:
results = self._pinpoint.NewJob(self._NewPinpointRequest(regression))
except pinpoint_request.InvalidParamsError as e:
six.raise_from(
InvalidPinpointRequest('Invalid pinpoint request: %s' % (e,)), e)
if 'jobId' not in results:
raise InvalidPinpointRequest('Start pinpoint bisection failed: %s' %
(results,))
return results.get('jobId')
def _SelectAutoBisectRegression(self, regressions):
# Select valid regressions for bisection:
# 1. auto_bisect_enable
# 2. has a valid bug_id
# 3. hasn't start a bisection
# 4. is not a summary metric (has story)
regressions = [
r for r in regressions or []
if (r.auto_bisect_enable and r.bug_id > 0
and not set(r.pinpoint_bisects) & set(self._group.bisection_ids)
and r.test.get().unescaped_story_name)
]
if not regressions:
return None
max_regression = None
max_count = 0
scores = ndb.get_multi(
ndb.Key(
'SignalQuality',
utils.TestPath(r.test),
'SignalQualityScore',
'0',
) for r in regressions)
scores_dict = {s.key.parent().string_id(): s.score for s in scores if s}
def MaxRegression(x, y):
if x is None or y is None:
return x or y
get_score = lambda a: scores_dict.get(
utils.TestPath(a.test),
_ALERT_GROUP_DEFAULT_SIGNAL_QUALITY_SCORE)
if x.relative_delta == float('Inf'):
if y.relative_delta == float('Inf'):
return max(x, y, key=lambda a: (get_score(a), a.absolute_delta))
return y
if y.relative_delta == float('Inf'):
return x
return max(x, y, key=lambda a: (get_score(a), a.relative_delta))
bot_name = lambda r: r.bot_name
for _, rs in itertools.groupby(
sorted(regressions, key=bot_name), key=bot_name):
count = 0
group_max = None
for r in rs:
count += 1
group_max = MaxRegression(group_max, r)
if count >= max_count:
max_count = count
max_regression = MaxRegression(max_regression, group_max)
return max_regression
def _NewPinpointRequest(self, alert):
start_git_hash = pinpoint_request.ResolveToGitHash(
alert.start_revision, alert.benchmark_name, crrev=self._crrev)
end_git_hash = pinpoint_request.ResolveToGitHash(
alert.end_revision, alert.benchmark_name, crrev=self._crrev)
# Pinpoint also requires you specify which isolate target to run the
# test, so we derive that from the suite name. Eventually, this would
# ideally be stored in a SparseDiagnostic but for now we can guess. Also,
# Pinpoint only currently works well with Telemetry targets, so we only run
# benchmarks that are not explicitly denylisted.
target = pinpoint_request.GetIsolateTarget(alert.bot_name,
alert.benchmark_name)
if not target:
return None
job_name = 'Auto-Bisection on %s/%s' % (alert.bot_name,
alert.benchmark_name)
alert_magnitude = alert.median_after_anomaly - alert.median_before_anomaly
return pinpoint_service.MakeBisectionRequest(
test=alert.test.get(),
commit_range=pinpoint_service.CommitRange(
start=start_git_hash, end=end_git_hash),
issue=anomaly.Issue(
project_id=self._group.bug.project,
issue_id=self._group.bug.bug_id,
),
comparison_mode='performance',
target=target,
comparison_magnitude=alert_magnitude,
name=job_name,
priority=10,
tags={
'test_path': utils.TestPath(alert.test),
'alert': alert.key.urlsafe(),
'auto_bisection': 'true',
},
)
def _UpdateWithBisectError(self, now, error, labels=None):
self._group.updated = now
self._group.status = self._group.Status.bisected
self._CommitGroup()
self._issue_tracker.AddBugComment(
self._group.bug.bug_id,
'Auto-Bisection failed with the following message:\n\n'
'%s\n\nNot retrying' % (error,),
labels=labels if labels else ['Chromeperf-Auto-NeedsAttention'],
project=self._group.project_id)
def _IssueTracker():
"""Get a cached IssueTracker instance."""
# pylint: disable=protected-access
if not hasattr(_IssueTracker, '_client'):
_IssueTracker._client = issue_tracker_service.IssueTrackerService(
utils.ServiceAccountHttp())
return _IssueTracker._client
| |
import math
import struct
import time
CMD_POLL_MEASUREMENT = ord(b'p')
CMD_START_MEASUREMENT = ord(b's')
CMD_DGEN_OPTIONS = 0x80
CMD_APPLY_DGEN_OPTIONS = 0x81
CMD_RESET_INSTRUMENT = 0xA0
CMD_QUERY_INSTRUMENT = 0xA1
CMD_PROTOCOL_SET_BINARY = 0xF0
DGEN_MODE_ALWAYS_0 = 0
DGEN_MODE_ALWAYS_1 = 1
DGEN_MODE_PWM = 2
MEASUREMENT_PULSE_COUNT = 0x01
MEASUREMENT_PERIOD = 0x02
MEASUREMENT_INTERVAL = 0x04
MEASUREMENT_FREQ_RATIO = 0x05
INFO_RESULT_CODE = 0x10
INFO_MEASUREMENT_DATA = 0x20
INFO_INSTRUMENT_INFO = 0xA1
DEBUG = False
TIMEOUT = 1
VERSION = 1106
class PacketIO:
def __init__(self, stream):
self.stream = stream
self.rxBytes = b''
# Only needed for Nucleo work-around
self.totalRxBytes = 0
def awaitPacket(self):
start = time.time()
while time.time() < start + TIMEOUT:
packet = self.receivePacket()
if packet:
return packet
else:
#time.sleep(0.01) # stupid
break
raise Exception('awaitPacket timed out')
def flush(self):
self.stream.flush()
def flushInput(self):
A_BIG_NUMBER = 10000
self.stream.read(A_BIG_NUMBER)
while self.stream.in_waiting:
self.stream.read(self.stream.in_waiting)
def receivePacket(self):
header = self.recvall(2, False)
if header:
tag, length = struct.unpack('<BB', header)
rx = self.recvall(2 + length, True)
# if (enableLogging) {
# char buffer[1000];
# sprintf(buffer, "tag %02X, length %02X, rx +%d", header[0], header[1], (int)rx);
# qInfo("%s", buffer);
# }
if rx:
data = rx[2:]
# if (enableLogging) {
# char dangerousSprintfBuffer[1000];
# sprintf(dangerousSprintfBuffer, "tag %02X, length %02X", *tag_out, (unsigned int) *length_out);
#
# if (length) {
# strcat(dangerousSprintfBuffer, ", data: [");
# for (size_t i = 0; i < length; i++)
# sprintf(dangerousSprintfBuffer + strlen(dangerousSprintfBuffer), " %02X", receivedPacketData[2 + i]);
# strcat(dangerousSprintfBuffer, "]");
# }
# qInfo("%s", dangerousSprintfBuffer);
# }
return (tag, data)
return None
def recvall(self, count, removeFromBuffer):
if len(self.rxBytes) < count:
need = count - len(self.rxBytes)
new_bytes = self.stream.read(need)
# if (enableLogging) {
# char dangerousSprintfBuffer[1000] = "in\t";
#
# for (size_t i = 0; i < got; i++)
# sprintf(dangerousSprintfBuffer + strlen(dangerousSprintfBuffer), " %02X", rxBytes[numRxBytes + i]);
#
# qInfo("%s", dangerousSprintfBuffer);
# }
if self.totalRxBytes == 0:
# Throw away spurious 0x00s caused by poor hardware design on Nucleo kits
while len(new_bytes) and new_bytes[0] == 0:
new_bytes = new_bytes[1:]
self.rxBytes += new_bytes
self.totalRxBytes += len(new_bytes)
if len(self.rxBytes) >= count:
to_return = self.rxBytes[0:count]
if removeFromBuffer:
self.rxBytes = self.rxBytes[count:]
return to_return
return None
def sendPacket(self, tag, data):
if data is None:
data = b''
header = struct.pack('<BB', tag, len(data))
self.stream.write(header + data)
self.stream.flush()
def sendPacketAndExpectResultCode(self, tag, data):
self.sendPacket(tag, data)
reply_tag, reply_data = self.awaitPacket()
if reply_tag == INFO_RESULT_CODE and len(reply_data) >= 1:
return struct.unpack('<B', reply_data)[0]
raise Exception('Unexpected response packet')
class FrequencyMeasurementFunction:
def __init__(self, instrument):
self.instrument = instrument
def measure_frequency(self, gate_time=1):
gate_time_ms = int(gate_time * 1000)
assert gate_time_ms >= 1
#measurement_pulse_count_request_t request;
request = struct.pack('<I', gate_time_ms)
#measurement_pulse_count_result_t result;
result = self.instrument.doMeasurement(MEASUREMENT_PULSE_COUNT, request)
count, = struct.unpack('<I', result)
frequency = count / gate_time
# const double period = (result.count > 0) ? (1.0 / frequency) : INFINITY;
#
# const double frequencyError = ((1 /* off-by-one */) / gateTime) + frequency * getTimebaseRelativeError();
# const double periodErrorPoint = qMax(1.0 / (frequency - frequencyError), 0.0);
# const double periodError = periodErrorPoint - period;
return frequency
def get_frequency_range(self):
return (0, self.instrument.get_f_cpu() / 2 - 1)
def suggest_gate_time(self, frequency, desired_relative_error):
max_gate_time = 25 # FIXME: this needs to be queried from device
# for 16-bit timer: max_gate_time = 2^32 / f_cpu
# for 32-bit timer: max_gate_time = 2^64 / f_cpu
# However, when using nearly full range, decomposition into <prescaler, cycles> may be non-trivial!
return min(max(1 / frequency / desired_relative_error, 0.001), max_gate_time)
class FrequencyRatioMeasurementFunction:
def __init__(self, instrument):
self.instrument = instrument
def measure_frequency_ratio(self, num_periods: int):
# TODO: document num_periods
#measurement_freq_ratio_request_t request;
request = struct.pack('<I', num_periods)
#measurement_freq_ratio_result_t result;
result = self.instrument.doMeasurement(MEASUREMENT_FREQ_RATIO, request)
ratio, = struct.unpack('<Q', result)
return ratio * 2.0**-16
class PeriodMeasurementFunction:
def __init__(self, instrument):
self.instrument = instrument
def measure_period(self, num_periods: int = 1):
assert num_periods > 0
#measurement_period_request_t request;
request = struct.pack('<I', num_periods)
#measurement_period_result_t result;
result = self.instrument.doMeasurement(MEASUREMENT_PERIOD, request)
period_ticks, _ = struct.unpack('<QQ', result)
period = period_ticks / self.instrument.get_f_cpu() * (2.0 ** -32)
return period
def get_frequency_range(self):
return (0.02, self.instrument.get_f_cpu() / 2 - 1)
def suggest_num_periods(self, period, desired_relative_error):
return math.ceil(1 / self.instrument.get_f_cpu() / period / desired_relative_error)
class PhaseMeasurementFunction:
def __init__(self, instrument):
self.instrument = instrument
def measure_period_and_phase(self, num_periods: int = 1):
assert num_periods > 0
#measurement_phase_request_t request;
ch1_falling = 0
ch2_falling = 0
request = struct.pack('<BB', ch1_falling, ch2_falling)
#measurement_phase_result_t result;
result = self.instrument.doMeasurement(MEASUREMENT_INTERVAL, request)
period_ticks, interval_ticks = struct.unpack('<II', result)
period = period_ticks / self.instrument.get_f_cpu()
phase = 360 * interval_ticks / period_ticks
return period, phase
class PwmChannel:
def __init__(self, instrument, chan: int):
self.instrument = instrument
self.chan = chan
def set_frequency(self, frequency_hz: float, phase_deg: float = 0, duty: float = 0.5):
assert frequency_hz >= 0
assert duty >= 0 and duty <= 1
period = self.instrument.get_f_cpu() / frequency_hz
pulse_width = period * duty
while phase_deg < 0:
phase_deg += 360
prescaler = 1
prescaled = round(period)
while prescaled >= 65535:
prescaler += 1
prescaled = round(period / prescaler)
request = struct.pack('<HHHHHH',
self.chan,
DGEN_MODE_PWM,
prescaler - 1,
prescaled - 1,
math.ceil(pulse_width / prescaler),
round(phase_deg * period / prescaler / 360))
rc = self.instrument.io.sendPacketAndExpectResultCode(CMD_DGEN_OPTIONS, request)
if DEBUG:
print('rc=', rc)
rc = self.instrument.io.sendPacketAndExpectResultCode(CMD_APPLY_DGEN_OPTIONS, None)
if DEBUG:
print('rc=', rc)
# Calculate actual frequency
# params.freq = f_cpu / (prescaler * prescaled);
# params.duty = (float)request.pulse_width / request.period;
# params.phase = (float)request.phase / request.period * 360;
#
# if (params.phase > 180)
# params.phase -= 360;
#
# emit didSetPwm(index, params);
#self.instrument.io.send_packet...
class Instrument:
def __init__(self, io):
self.io = io
# Enter binary protocol
self.io.sendPacket(CMD_PROTOCOL_SET_BINARY, None)
self.io.flushInput()
self.info = self.query_instrument_info()
self.io.sendPacket(CMD_RESET_INSTRUMENT, None)
@staticmethod
def open_serial_port(*args, **kwargs):
import serial
return Instrument(PacketIO(serial.Serial(*args, **kwargs)))
def awaitMeasurementResult(self, which):
# measurementAborted = false;
while True:
# if (measurementAborted) {
# doAbortMeasurement(which);
# }
payload = struct.pack('<B', which)
self.io.sendPacket(CMD_POLL_MEASUREMENT, payload);
reply_tag, reply_data = self.io.awaitPacket()
if reply_tag == INFO_MEASUREMENT_DATA and len(reply_data) >= 1:
#reply_data[0] is rc, always >= 1
return reply_data[1:]
elif reply_tag == INFO_RESULT_CODE and len(reply_data) >= 1:
rc, = struct.unpack('<B', reply_data)
if rc == 0:
continue
else:
raise Exception(f"Measurement error: RESULT_CODE {rc}")
return None
def doMeasurement(self, which, request):
payload = struct.pack('<B', which) + request
RESULT_CODE_OK =1
rc = self.io.sendPacketAndExpectResultCode(CMD_START_MEASUREMENT, payload)
assert rc == RESULT_CODE_OK
result = self.awaitMeasurementResult(which)
return result
def get_frequency_measurement_function(self):
return FrequencyMeasurementFunction(self)
def get_frequency_ratio_measurement_function(self):
return FrequencyRatioMeasurementFunction(self)
def get_f_cpu(self):
return self.info['f_cpu']
def get_period_measurement_function(self):
return PeriodMeasurementFunction(self)
def get_phase_measurement_function(self):
return PhaseMeasurementFunction(self)
def get_pwm_channel(self, chan: int):
assert chan >= 0 and chan <= 2
return PwmChannel(self, chan)
def query_instrument_info(self):
self.io.sendPacket(CMD_QUERY_INSTRUMENT, None)
reply_tag, reply_data = self.io.awaitPacket()
assert reply_tag == INFO_INSTRUMENT_INFO
board_id, fw_ver, f_cpu, timebase_source = struct.unpack('<HHIB', reply_data)
if DEBUG:
print(f"board_id={board_id:04X}, fw_ver={fw_ver}, f_cpu={f_cpu}")
if fw_ver != VERSION:
raise Exception("Firmware version not supported")
return dict(f_cpu=f_cpu)
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
from os.path import isdir, isfile, join, dirname
import random
import shutil
import time
import itertools
from six import viewkeys
import six.moves.cPickle as pickle
from swift import gettext_ as _
import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
from eventlet.green import subprocess
from eventlet.support.greenlets import GreenletExit
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, ismount, \
rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
get_hub, tpool_reraise, config_auto_int_value, storage_directory
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir
from swift.common.storage_policy import POLICIES, REPL_POLICY
hubs.use_hub(get_hub())
class ObjectReplicator(Daemon):
"""
Replicate objects.
Encapsulates most logic and data needed by the object replication process.
Each call to .replicate() performs one replication pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-replicator')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6000))
self.concurrency = int(conf.get('concurrency', 1))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.rsync_timeout = int(conf.get('rsync_timeout', 900))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
if config_true_value(conf.get('vm_test_mode', 'no')):
self.logger.warn('Option object-replicator/vm_test_mode is '
'deprecated and will be removed in a future '
'version. Update your configuration to use '
'option object-replicator/rsync_module.')
self.rsync_module += '{replication_port}'
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warn('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self._diskfile_mgr = DiskFileManager(conf, self.logger)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0,
'hashmatch': 0, 'rsync': 0, 'remove': 0,
'start': time.time(), 'failure_nodes': {}}
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
ret_val = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if not ret_val:
self.logger.info(result)
else:
self.logger.error(result)
if ret_val:
error_line = _('Bad rsync return code: %(ret)d <- %(args)s') % \
{'args': str(args), 'ret': ret_val}
if self.rsync_error_log_line_length:
error_line = error_line[:self.rsync_error_log_line_length]
self.logger.error(error_line)
elif results:
self.logger.info(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
else:
self.logger.debug(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
]
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
return self._rsync(args) == 0, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in os.listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
self.replication_count += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
try:
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
self.stats['rsync'] += 1
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if success:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'],
node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
if node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success)
for region, cand_objs in synced_remote_regions.items():
if delete_objs is None:
delete_objs = cand_objs
else:
delete_objs = delete_objs & cand_objs
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
delete_handoff = len([resp for resp in responses if resp]) >= \
self.handoff_delete
else:
# delete handoff if all syncs were successful
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
self.stats['remove'] += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info(_("Removing %s objects"),
len(delete_objs))
_junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it failed,
# the remote devices which are target of the replication
# from the hand-off device will be marked. Because cleanup
# after replication failed means replicator needs to
# replicate again with the same info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else:
self.delete_partition(job['path'])
elif not suffixes:
self.delete_partition(job['path'])
except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition"))
finally:
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path)
def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception(
"Unexpected error trying to cleanup suffix dir:%r",
suffix_dir)
return success_paths, error_paths
def update(self, job):
"""
High-level method that replicates a single partition.
:param job: a dict containing info about the partition to be replicated
"""
self.replication_count += 1
self.logger.increment('partition.update.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time()
try:
hashed, local_hash = tpool_reraise(
self._diskfile_mgr._get_hashes, job['path'],
do_listdir=(self.replication_count % 10) == 0,
reclaim_age=self.reclaim_age)
self.suffix_hash += hashed
self.logger.update_stats('suffix.hashes', hashed)
attempts_left = len(job['nodes'])
synced_remote_regions = set()
random.shuffle(job['nodes'])
nodes = itertools.chain(
job['nodes'],
job['policy'].object_ring.get_more_nodes(
int(job['partition'])))
while attempts_left > 0:
# If this throws StopIteration it will be caught way below
node = next(nodes)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1
# if we have already synced to this remote region,
# don't sync again on this replication pass
if node['region'] in synced_remote_regions:
continue
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(_('%(ip)s/%(device)s responded'
' as unmounted'), node)
attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
if resp.status != HTTP_OK:
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status,
'ip': node['replication_ip']})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
remote_hash = pickle.loads(resp.read())
del resp
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
self.stats['hashmatch'] += 1
continue
hashed, recalc_hash = tpool_reraise(
self._diskfile_mgr._get_hashes,
job['path'], recalculate=suffixes,
reclaim_age=self.reclaim_age)
self.logger.update_stats('suffix.hashes', hashed)
local_hash = recalc_hash
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
self.stats['rsync'] += 1
success, _junk = self.sync(node, job, suffixes)
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes),
headers=headers)
conn.getresponse().read()
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded
if success and node['region'] != job['region']:
synced_remote_regions.add(node['region'])
self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception(_("Error syncing with node: %s") %
node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self.logger.exception(_("Error syncing partition"))
finally:
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin)
def stats_line(self):
"""
Logs various stats for the currently running replication pass.
"""
if self.replication_count:
elapsed = (time.time() - self.start) or 0.000001
rate = self.replication_count / elapsed
self.logger.info(
_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': self.replication_count, 'total': self.job_count,
'percentage': self.replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
self.replication_count,
self.job_count)})
if self.suffix_count:
self.logger.info(
_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info(
_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
for coro in list(self.run_pool.coroutines_running):
try:
coro.kill(GreenletExit)
except GreenletExit:
pass
def heartbeat(self):
"""
Loop that runs in the background during replication. It periodically
logs progress.
"""
while True:
eventlet.sleep(self.stats_interval)
self.stats_line()
def detect_lockups(self):
"""
In testing, the pool.waitall() call very occasionally failed to return.
This is an attempt to make sure the replicator finishes its replication
pass in some eventuality.
"""
while True:
eventlet.sleep(self.lockup_timeout)
if self.replication_count == self.last_replication_count:
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_replication_count = self.replication_count
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
if self.mount_check and not ismount(dev_path):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
return jobs
def collect_jobs(self, override_devices=None, override_partitions=None,
override_policies=None):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
:param override_devices: if set, only jobs on these devices
will be returned
:param override_partitions: if set, only jobs on these partitions
will be returned
:param override_policies: if set, only jobs in these storage
policies will be returned
"""
jobs = []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
if policy.policy_type == REPL_POLICY:
if (override_policies is not None and
str(policy.idx) not in override_policies):
continue
# ensure rings are loaded for policy
self.load_object_ring(policy)
jobs += self.build_replication_jobs(
policy, ips, override_devices=override_devices,
override_partitions=override_partitions)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
def replicate(self, override_devices=None, override_partitions=None,
override_policies=None):
"""Run a replication pass"""
self.start = time.time()
self.suffix_count = 0
self.suffix_sync = 0
self.suffix_hash = 0
self.replication_count = 0
self.last_replication_count = -1
self.partition_times = []
self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
stats = eventlet.spawn(self.heartbeat)
lockup_detector = eventlet.spawn(self.detect_lockups)
eventlet.sleep() # Give spawns a cycle
current_nodes = None
try:
self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
current_nodes = job['nodes']
if override_devices and job['device'] not in override_devices:
continue
if override_partitions and \
job['partition'] not in override_partitions:
continue
dev_path = join(self.devices_dir, job['device'])
if self.mount_check and not ismount(dev_path):
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warn(_('%s is not mounted'), job['device'])
continue
if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
try:
if isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
os.remove(job['path'])
continue
except OSError:
continue
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
else:
self.run_pool.spawn(self.update, job)
current_nodes = None
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
if current_nodes:
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
self._add_failure_stats(self.all_devs_info)
self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros()
finally:
stats.kill()
lockup_detector.kill()
self.stats_line()
self.stats['attempted'] = self.replication_count
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
def run_forever(self, *args, **kwargs):
self.logger.info(_("Starting object replicator in daemon mode."))
# Run the replicator continually
while True:
self._zero_stats()
self.logger.info(_("Starting object replication pass."))
# Run the replicator
self.replicate()
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete. (%.02f minutes)"), total)
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
self.logger.debug('Replication sleeping for %s seconds.',
self.interval)
sleep(self.interval)
| |
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from unittest import TestCase
from octodns.record import Record
from octodns.provider.dnsimple import DnsimpleClientNotFound, DnsimpleProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
class TestDnsimpleProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
# Our test suite differs a bit, add our NS and remove the simple one
expected.add_record(Record.new(expected, 'under', {
'ttl': 3600,
'type': 'NS',
'values': [
'ns1.unit.tests.',
'ns2.unit.tests.',
]
}))
for record in list(expected.records):
if record.name == 'sub' and record._type == 'NS':
expected._remove_record(record)
break
def test_populate(self):
provider = DnsimpleProvider('test', 'token', 42)
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401,
text='{"message": "Authentication failed"}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Unauthorized', ctx.exception.message)
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existant zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='{"message": "Domain `foo.bar` not found"}')
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# No diffs == no changes
with requests_mock() as mock:
base = 'https://api.dnsimple.com/v2/42/zones/unit.tests/' \
'records?page='
with open('tests/fixtures/dnsimple-page-1.json') as fh:
mock.get('{}{}'.format(base, 1), text=fh.read())
with open('tests/fixtures/dnsimple-page-2.json') as fh:
mock.get('{}{}'.format(base, 2), text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(14, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(14, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
# test handling of invalid content
with requests_mock() as mock:
with open('tests/fixtures/dnsimple-invalid-content.json') as fh:
mock.get(ANY, text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set([
Record.new(zone, '', {
'ttl': 3600,
'type': 'SSHFP',
'values': []
}),
Record.new(zone, '_srv._tcp', {
'ttl': 600,
'type': 'SRV',
'values': []
}),
Record.new(zone, 'naptr', {
'ttl': 600,
'type': 'NAPTR',
'values': []
}),
]), zone.records)
def test_apply(self):
provider = DnsimpleProvider('test', 'token', 42)
resp = Mock()
resp.json = Mock()
provider._client._request = Mock(return_value=resp)
# non-existant domain, create everything
resp.json.side_effect = [
DnsimpleClientNotFound, # no zone in populate
DnsimpleClientNotFound, # no domain during apply
]
plan = provider.plan(self.expected)
# No root NS, no ignored
n = len(self.expected.records) - 2
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
provider._client._request.assert_has_calls([
# created the domain
call('POST', '/domains', data={'name': 'unit.tests'}),
# created at least one of the record with expected data
call('POST', '/zones/unit.tests/records', data={
'content': '20 30 foo-1.unit.tests.',
'priority': 10,
'type': 'SRV',
'name': '_srv._tcp',
'ttl': 600
}),
])
# expected number of total calls
self.assertEquals(26, provider._client._request.call_count)
provider._client._request.reset_mock()
# delete 1 and update 1
provider._client.records = Mock(return_value=[
{
'id': 11189897,
'name': 'www',
'content': '1.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189898,
'name': 'www',
'content': '2.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189899,
'name': 'ttl',
'content': '3.2.3.4',
'ttl': 600,
'type': 'A',
}
])
# Domain exists, we don't care about return
resp.json.side_effect = ['{}']
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'ttl', {
'ttl': 300,
'type': 'A',
'value': '3.2.3.4'
}))
plan = provider.plan(wanted)
self.assertEquals(2, len(plan.changes))
self.assertEquals(2, provider.apply(plan))
# recreate for update, and deletes for the 2 parts of the other
provider._client._request.assert_has_calls([
call('POST', '/zones/unit.tests/records', data={
'content': '3.2.3.4',
'type': 'A',
'name': 'ttl',
'ttl': 300
}),
call('DELETE', '/zones/unit.tests/records/11189899'),
call('DELETE', '/zones/unit.tests/records/11189897'),
call('DELETE', '/zones/unit.tests/records/11189898')
], any_order=True)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Work out the port to generate the docs for
from collections import OrderedDict
micropy_port = os.getenv('MICROPY_PORT') or 'pyboard'
tags.add('port_' + micropy_port)
ports = OrderedDict((
('unix', 'unix'),
('pyboard', 'the pyboard'),
('wipy', 'the WiPy'),
('esp8266', 'the ESP8266'),
))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'port':micropy_port,
'port_name':ports[micropy_port],
'port_version':micropy_version,
'all_ports':[
(port_id, url_pattern % (micropy_version, port_id))
for port_id, port_name in ports.items()
],
'all_versions':[
(ver, url_pattern % (ver, micropy_port))
for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % (micropy_version, 'micropython-%s.pdf' % micropy_port)),
],
}
# Specify a custom master document based on the port name
master_doc = micropy_port + '_' + 'index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx_selective_exclude.modindex_exclude',
'sphinx_selective_exclude.eager_only',
'sphinx_selective_exclude.search_auto_exclude',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '2014-2016, Damien P. George and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8'
# The full version, including alpha/beta/rc tags.
release = '1.8.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Append the other ports' specific folders/files to the exclude pattern
exclude_patterns.extend([port + '*' for port in ports if port != micropy_port])
modules_port_specific = {
'pyboard': ['pyb'],
'wipy': ['wipy'],
'esp8266': ['esp'],
}
modindex_exclude = []
for p, l in modules_port_specific.items():
if p != micropy_port:
modindex_exclude += l
# Exclude extra modules per port
modindex_exclude += {
'esp8266': ['cmath', 'select'],
'wipy': ['cmath'],
}.get(micropy_port, [])
| |
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.externals.six import u
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
@ignore_warnings
def test_deprecation_minmax_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = MinMaxScaler().fit(X)
depr_message = ("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
data_range = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_range")
assert_array_equal(data_range, scaler.data_range)
depr_message = ("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
data_min = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_min")
assert_array_equal(data_min, scaler.data_min)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_deprecation_standard_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = StandardScaler().fit(X)
depr_message = ("Function std_ is deprecated; Attribute ``std_`` will be "
"removed in 0.19. Use ``scale_`` instead")
std_ = assert_warns_message(DeprecationWarning, depr_message, getattr,
scaler, "std_")
assert_array_equal(std_, scaler.scale_)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
| |
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# Glidein creation module Classes and functions needed to
# handle dictionary files
#
import os,os.path,string,copy
import cgWConsts,cWConsts
import cWDictFile
import pwd
import shutil
from glideinwms.lib import condorPrivsep
MY_USERNAME=pwd.getpwuid(os.getuid())[0]
# values are (group_name)
class MonitorGroupDictFile(cWDictFile.DictFile):
def file_header(self,want_comments):
if want_comments:
return ("<!-- This entry is part of following monitoring groups -->\n") + ("<monitorgroups>")
else:
return ("<monitorgroups>")
def file_footer(self,want_comments):
return ("</monitorgroups>")
# key can be None
# in that case it will be composed out of value
def add(self,key,val,allow_overwrite=0):
if not (type(val) in (type(()),type([]))):
raise RuntimeError, "Values '%s' not a list or tuple"%val
if len(val)!=1:
raise RuntimeError, "Values '%s' not (group_name)"%str(val)
if key is None:
key="%s"%val
return cWDictFile.DictFile.add(self,key,val,allow_overwrite)
def add_extended(self,
group_name,
allow_overwrite=0):
self.add(None,(group_name,))
def format_val(self,key,want_comments):
return " <monitorgroup group_name=\"%s\">"%(self.vals[key][0],)
def parse_val(self,line):
if len(line)==0:
return #ignore emoty lines
if line[0]=='#':
return # ignore comments
arr=line.split(None,3)
if len(arr)==0:
return # empty line
if len(arr)!=4:
raise RuntimeError,"Not a valid var line (expected 4, found %i elements): '%s'"%(len(arr),line)
key=arr[-1]
return self.add(key,arr[:-1])
# values are (Type,System,Ref)
class InfoSysDictFile(cWDictFile.DictFile):
def file_header(self,want_comments):
if want_comments:
return (cWDictFile.DictFile.file_header(self,want_comments)+"\n"+
("# %s \t%30s \t%s \t\t%s\n"%('Type','Server','Ref','ID'))+
("#"*78))
else:
return None
# key can be None
# in that case it will be composed out of value
def add(self,key,val,allow_overwrite=0):
if not (type(val) in (type(()),type([]))):
raise RuntimeError, "Values '%s' not a list or tuple"%val
if len(val)!=3:
raise RuntimeError, "Values '%s' not (Type,System,Ref)"%str(val)
if key is None:
key="%s://%s/%s"%val
return cWDictFile.DictFile.add(self,key,val,allow_overwrite)
def add_extended(self,
infosys_type,
server_name,
ref_str,
allow_overwrite=0):
self.add(None,(infosys_type,server_name,ref_str))
def format_val(self,key,want_comments):
return "%s \t%30s \t%s \t\t%s"%(self.vals[key][0],self.vals[key][1],self.vals[key][2],key)
def parse_val(self,line):
if len(line)==0:
return #ignore emoty lines
if line[0]=='#':
return # ignore comments
arr=line.split(None,3)
if len(arr)==0:
return # empty line
if len(arr)!=4:
raise RuntimeError,"Not a valid var line (expected 4, found %i elements): '%s'"%(len(arr),line)
key=arr[-1]
return self.add(key,arr[:-1])
class CondorJDLDictFile(cWDictFile.DictFile):
def __init__(self,dir,fname,sort_keys=False,order_matters=False,jobs_in_cluster=None,
fname_idx=None): # if none, use fname
cWDictFile.DictFile.__init__(self,dir,fname,sort_keys,order_matters,fname_idx)
self.jobs_in_cluster=jobs_in_cluster
def file_footer(self,want_comments):
if self.jobs_in_cluster is None:
return "Queue"
else:
return "Queue %s"%self.jobs_in_cluster
def format_val(self,key,want_comments):
if self.vals[key] == "##PRINT_KEY_ONLY##":
return "%s" % key
else:
return "%s = %s"%(key,self.vals[key])
def parse_val(self,line):
if line[0]=='#':
return # ignore comments
arr=line.split(None,2)
if len(arr)==0:
return # empty line
if arr[0]=='Queue':
# this is the final line
if len(arr)==1:
# default
self.jobs_in_cluster=None
else:
self.jobs_in_cluster=arr[1]
return
if len(arr) <= 2:
return self.add(arr[0],"") # key = <empty> or placeholder for env variable
else:
return self.add(arr[0],arr[2])
def is_equal(self,other, # other must be of the same class
compare_dir=False,compare_fname=False,
compare_keys=None): # if None, use order_matters
if self.jobs_in_cluster==other.jobs_in_cluster:
return cWDictFile.DictFile.is_equal(other,compare_dir,compare_fname,compare_keys)
else:
return False
################################################
#
# Functions that create default dictionaries
#
################################################
# internal, do not use from outside the module
def get_common_dicts(submit_dir,stage_dir):
common_dicts={'attrs':cWDictFile.ReprDictFile(submit_dir,cgWConsts.ATTRS_FILE),
'description':cWDictFile.DescriptionDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.DESCRIPTION_FILE),fname_idx=cWConsts.DESCRIPTION_FILE),
'consts':cWDictFile.StrDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.CONSTS_FILE),fname_idx=cWConsts.CONSTS_FILE),
'params':cWDictFile.ReprDictFile(submit_dir,cgWConsts.PARAMS_FILE),
'vars':cWDictFile.VarsDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.VARS_FILE),fname_idx=cWConsts.VARS_FILE),
'untar_cfg':cWDictFile.StrDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.UNTAR_CFG_FILE),fname_idx=cWConsts.UNTAR_CFG_FILE),
'file_list':cWDictFile.FileDictFile(stage_dir,cWConsts.insert_timestr(cWConsts.FILE_LISTFILE),fname_idx=cWConsts.FILE_LISTFILE),
"signature":cWDictFile.SHA1DictFile(stage_dir,cWConsts.insert_timestr(cWConsts.SIGNATURE_FILE),fname_idx=cWConsts.SIGNATURE_FILE)}
refresh_description(common_dicts)
return common_dicts
def get_main_dicts(submit_dir,stage_dir):
main_dicts=get_common_dicts(submit_dir,stage_dir)
main_dicts['summary_signature']=cWDictFile.SummarySHA1DictFile(submit_dir,cWConsts.SUMMARY_SIGNATURE_FILE)
main_dicts['glidein']=cWDictFile.StrDictFile(submit_dir,cgWConsts.GLIDEIN_FILE)
main_dicts['frontend_descript']=cWDictFile.ReprDictFile(submit_dir,cgWConsts.FRONTEND_DESCRIPT_FILE)
main_dicts['after_file_list']=cWDictFile.FileDictFile(stage_dir,cWConsts.insert_timestr(cgWConsts.AFTER_FILE_LISTFILE),fname_idx=cgWConsts.AFTER_FILE_LISTFILE)
return main_dicts
def get_entry_dicts(entry_submit_dir,entry_stage_dir,entry_name):
entry_dicts=get_common_dicts(entry_submit_dir,entry_stage_dir)
entry_dicts['job_descript']=cWDictFile.StrDictFile(entry_submit_dir,cgWConsts.JOB_DESCRIPT_FILE)
entry_dicts['infosys']=InfoSysDictFile(entry_submit_dir,cgWConsts.INFOSYS_FILE)
entry_dicts['mongroup']=MonitorGroupDictFile(entry_submit_dir,cgWConsts.MONITOR_CONFIG_FILE)
return entry_dicts
################################################
#
# Functions that load dictionaries
#
################################################
# internal, do not use from outside the module
def load_common_dicts(dicts, # update in place
description_el):
# first submit dir ones (mutable)
dicts['params'].load()
dicts['attrs'].load()
# now the ones keyed in the description
dicts['signature'].load(fname=description_el.vals2['signature'])
dicts['file_list'].load(fname=description_el.vals2['file_list'])
file_el=dicts['file_list']
# all others are keyed in the file_list
dicts['consts'].load(fname=file_el[cWConsts.CONSTS_FILE][0])
dicts['vars'].load(fname=file_el[cWConsts.VARS_FILE][0])
dicts['untar_cfg'].load(fname=file_el[cWConsts.UNTAR_CFG_FILE][0])
def load_main_dicts(main_dicts): # update in place
main_dicts['glidein'].load()
main_dicts['frontend_descript'].load()
# summary_signature has keys for description
main_dicts['summary_signature'].load()
# load the description
main_dicts['description'].load(fname=main_dicts['summary_signature']['main'][1])
# all others are keyed in the description
main_dicts['after_file_list'].load(fname=main_dicts['description'].vals2['after_file_list'])
load_common_dicts(main_dicts,main_dicts['description'])
def load_entry_dicts(entry_dicts, # update in place
entry_name,summary_signature):
try:
entry_dicts['infosys'].load()
except RuntimeError:
pass # ignore errors, this is optional
entry_dicts['job_descript'].load()
# load the description (name from summary_signature)
entry_dicts['description'].load(fname=summary_signature[cgWConsts.get_entry_stage_dir("",entry_name)][1])
# all others are keyed in the description
load_common_dicts(entry_dicts,entry_dicts['description'])
############################################################
#
# Functions that create data out of the existing dictionary
#
############################################################
def refresh_description(dicts): # update in place
description_dict=dicts['description']
description_dict.add(dicts['signature'].get_fname(),"signature",allow_overwrite=True)
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
description_dict.add(dicts[k].get_fname(),k,allow_overwrite=True)
def refresh_file_list(dicts,is_main, # update in place
files_set_readonly=True,files_reset_changed=True):
file_dict=dicts['file_list']
file_dict.add(cWConsts.CONSTS_FILE,(dicts['consts'].get_fname(),"regular","TRUE","CONSTS_FILE",dicts['consts'].save_into_str(set_readonly=files_set_readonly,reset_changed=files_reset_changed)),allow_overwrite=True)
file_dict.add(cWConsts.VARS_FILE,(dicts['vars'].get_fname(),"regular","TRUE","CONDOR_VARS_FILE",dicts['vars'].save_into_str(set_readonly=files_set_readonly,reset_changed=files_reset_changed)),allow_overwrite=True)
file_dict.add(cWConsts.UNTAR_CFG_FILE,(dicts['untar_cfg'].get_fname(),"regular","TRUE","UNTAR_CFG_FILE",dicts['untar_cfg'].save_into_str(set_readonly=files_set_readonly,reset_changed=files_reset_changed)),allow_overwrite=True)
# dictionaries must have been written to disk before using this
def refresh_signature(dicts): # update in place
signature_dict=dicts['signature']
for k in ('consts','vars','untar_cfg','file_list','after_file_list','description'):
if dicts.has_key(k):
signature_dict.add_from_file(dicts[k].get_filepath(),allow_overwrite=True)
# add signatures of all the files linked in the lists
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
filedict=dicts[k]
for fname in filedict.get_immutable_files():
signature_dict.add_from_file(os.path.join(filedict.dir,fname),allow_overwrite=True)
################################################
#
# Functions that save dictionaries
#
################################################
# internal, do not use from outside the module
def save_common_dicts(dicts, # will update in place, too
is_main,
set_readonly=True):
# make sure decription is up to date
refresh_description(dicts)
# save the immutable ones
for k in ('description',):
dicts[k].save(set_readonly=set_readonly)
# Load files into the file list
# 'consts','untar_cfg','vars' will be loaded
refresh_file_list(dicts,is_main)
# save files in the file lists
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
dicts[k].save_files(allow_overwrite=True)
# then save the lists
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
dicts[k].save(set_readonly=set_readonly)
# calc and save the signatues
refresh_signature(dicts)
dicts['signature'].save(set_readonly=set_readonly)
#finally save the mutable one(s)
dicts['params'].save(set_readonly=set_readonly)
dicts['attrs'].save(set_readonly=set_readonly)
# must be invoked after all the entries have been saved
def save_main_dicts(main_dicts, # will update in place, too
set_readonly=True):
main_dicts['glidein'].save(set_readonly=set_readonly)
main_dicts['frontend_descript'].save(set_readonly=set_readonly)
save_common_dicts(main_dicts,True,set_readonly=set_readonly)
summary_signature=main_dicts['summary_signature']
summary_signature.add_from_file(key="main",filepath=main_dicts['signature'].get_filepath(),fname2=main_dicts['description'].get_fname(),allow_overwrite=True)
summary_signature.save(set_readonly=set_readonly)
def save_entry_dicts(entry_dicts, # will update in place, too
entry_name,summary_signature, # update in place
set_readonly=True):
entry_dicts['mongroup'].save(set_readonly=set_readonly)
entry_dicts['infosys'].save(set_readonly=set_readonly)
entry_dicts['job_descript'].save(set_readonly=set_readonly)
save_common_dicts(entry_dicts,False,set_readonly=set_readonly)
summary_signature.add_from_file(key=cgWConsts.get_entry_stage_dir("",entry_name),filepath=entry_dicts['signature'].get_filepath(),fname2=entry_dicts['description'].get_fname(),allow_overwrite=True)
################################################
#
# Functions that reuse dictionaries
#
################################################
def reuse_simple_dict(dicts,other_dicts,key,compare_keys=None):
if dicts[key].is_equal(other_dicts[key],compare_dir=True,compare_fname=False,compare_keys=compare_keys):
# if equal, just use the old one, and mark it as unchanged and readonly
dicts[key]=copy.deepcopy(other_dicts[key])
dicts[key].changed=False
dicts[key].set_readonly(True)
return True
else:
return False
def reuse_file_dict(dicts,other_dicts,key):
dicts[key].reuse(other_dicts[key])
return reuse_simple_dict(dicts,other_dicts,key)
def reuse_common_dicts(dicts, other_dicts,is_main,all_reused):
# save the immutable ones
# check simple dictionaries
for k in ('consts','untar_cfg','vars'):
all_reused=reuse_simple_dict(dicts,other_dicts,k) and all_reused
# since the file names may have changed, refresh the file_list
refresh_file_list(dicts,is_main)
# check file-based dictionaries
for k in ('file_list','after_file_list'):
if dicts.has_key(k):
all_reused=reuse_file_dict(dicts,other_dicts,k) and all_reused
if all_reused:
# description and signature track other files
# so they change iff the others change
for k in ('description','signature'):
dicts[k]=copy.deepcopy(other_dicts[k])
dicts[k].changed=False
dicts[k].set_readonly(True)
# check the mutable ones
for k in ('attrs','params'):
reuse_simple_dict(dicts,other_dicts,k)
return all_reused
def reuse_main_dicts(main_dicts, other_main_dicts):
reuse_simple_dict(main_dicts, other_main_dicts,'glidein')
reuse_simple_dict(main_dicts, other_main_dicts,'frontend_descript')
all_reused=reuse_common_dicts(main_dicts, other_main_dicts,True,True)
# will not try to reuse the summary_signature... being in submit_dir
# can be rewritten and it is not worth the pain to try to prevent it
return all_reused
def reuse_entry_dicts(entry_dicts, other_entry_dicts,entry_name):
reuse_simple_dict(entry_dicts, other_entry_dicts,'job_descript')
reuse_simple_dict(entry_dicts, other_entry_dicts,'infosys')
all_reused=reuse_common_dicts(entry_dicts, other_entry_dicts,False,True)
return all_reused
################################################
#
# Handle dicts as Classes
#
################################################
################################################
#
# Support classes
#
################################################
###########################################
# Privsep support classes
class clientDirSupport(cWDictFile.simpleDirSupport):
def __init__(self,user,dir,dir_name,privsep_mkdir=False):
cWDictFile.simpleDirSupport.__init__(self,dir,dir_name)
self.user=user
self.privsep_mkdir=privsep_mkdir
def create_dir(self,fail_if_exists=True):
base_dir=os.path.dirname(self.dir)
if not os.path.isdir(base_dir):
raise RuntimeError,"Missing base %s directory %s."%(self.dir_name,base_dir)
if os.path.isdir(self.dir):
if fail_if_exists:
raise RuntimeError,"Cannot create %s dir %s, already exists."%(self.dir_name,self.dir)
else:
return False # already exists, nothing to do
if self.user==MY_USERNAME:
# keep it simple, if possible
try:
os.mkdir(self.dir)
except OSError,e:
raise RuntimeError,"Failed to create %s dir: %s"%(self.dir_name,e)
elif self.privsep_mkdir:
try:
# use privsep mkdir, as requested
condorPrivsep.mkdir(base_dir,os.path.basename(self.dir),self.user)
# with condor 7.9.4 a permissions change is required
condorPrivsep.execute(self.user,base_dir,'/bin/chmod',['chmod','0755',self.dir],stdout_fname=None)
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to create %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to create %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
else:
try:
# use the execute command
# do not use the mkdir one, as we do not need root privileges
condorPrivsep.execute(self.user,base_dir,'/bin/mkdir',['mkdir',self.dir],stdout_fname=None)
# with condor 7.9.4 a permissions change is required
condorPrivsep.execute(self.user,base_dir,'/bin/chmod',['chmod','0755',self.dir],stdout_fname=None)
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to create %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to create %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
return True
def delete_dir(self):
base_dir=os.path.dirname(self.dir)
if not os.path.isdir(base_dir):
raise RuntimeError,"Missing base %s directory %s!"%(self.dir_name,base_dir)
if self.user==MY_USERNAME:
# keep it simple, if possible
shutil.rmtree(self.dir)
elif self.privsep_mkdir:
try:
# use privsep rmtree, as requested
condorPrivsep.rmtree(base_dir,os.path.basename(self.dir))
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to remove %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to remove %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
else:
try:
# use the execute command
# do not use the rmtree one, as we do not need root privileges
condorPrivsep.execute(self.user,base_dir,'/bin/rm',['rm','-fr',self.dir],stdout_fname=None)
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to remove %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to remove %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
class chmodClientDirSupport(clientDirSupport):
def __init__(self,user,dir,chmod,dir_name):
clientDirSupport.__init__(self,user,dir,dir_name)
self.chmod=chmod
def create_dir(self,fail_if_exists=True):
base_dir=os.path.dirname(self.dir)
if not os.path.isdir(base_dir):
raise RuntimeError,"Missing base %s directory %s."%(self.dir_name,base_dir)
if os.path.isdir(self.dir):
if fail_if_exists:
raise RuntimeError,"Cannot create %s dir %s, already exists."%(self.dir_name,self.dir)
else:
return False # already exists, nothing to do
if self.user==MY_USERNAME:
# keep it simple, if possible
try:
os.mkdir(self.dir,self.chmod)
except OSError,e:
raise RuntimeError,"Failed to create %s dir: %s"%(self.dir_name,e)
else:
try:
# use the execute command
# do not use the mkdir one, as we do not need root privileges
condorPrivsep.execute(self.user,base_dir,'/bin/mkdir',['mkdir',self.dir],stdout_fname=None)
# with condor 7.9.4 a permissions change is required
condorPrivsep.execute(self.user,base_dir,'/bin/chmod',['chmod',"0%o"%self.chmod,self.dir],stdout_fname=None)
except condorPrivsep.ExeError, e:
raise RuntimeError,"Failed to create %s dir (user %s): %s"%(self.dir_name,self.user,e)
except:
raise RuntimeError,"Failed to create %s dir (user %s): Unknown privsep error"%(self.dir_name,self.user)
return True
###########################################
# Support classes used my Main
class baseClientDirSupport(cWDictFile.multiSimpleDirSupport):
def __init__(self,user,dir,dir_name='client'):
cWDictFile.multiSimpleDirSupport.__init__(self,(),dir_name)
self.user=user
self.base_dir=os.path.dirname(dir)
if not os.path.isdir(self.base_dir):
# Parent does not exist
# This is the user base directory
# In order to make life easier for the factory admins, create it automatically when needed
self.add_dir_obj(clientDirSupport(user,self.base_dir,"base %s"%dir_name,privsep_mkdir=True))
self.add_dir_obj(clientDirSupport(user,dir,dir_name))
class clientSymlinksSupport(cWDictFile.multiSimpleDirSupport):
def __init__(self,user_dirs,work_dir,symlink_base_subdir,dir_name):
self.symlink_base_dir=os.path.join(work_dir,symlink_base_subdir)
cWDictFile.multiSimpleDirSupport.__init__(self,(self.symlink_base_dir,),dir_name)
for user in user_dirs.keys():
self.add_dir_obj(cWDictFile.symlinkSupport(user_dirs[user],os.path.join(self.symlink_base_dir,"user_%s"%user),dir_name))
###########################################
# Support classes used my Entry
class clientLogDirSupport(clientDirSupport):
def __init__(self,user,log_dir,dir_name='clientlog'):
clientDirSupport.__init__(self,user,log_dir,dir_name)
class clientProxiesDirSupport(chmodClientDirSupport):
def __init__(self,user,proxies_dir,proxiesdir_name="clientproxies"):
chmodClientDirSupport.__init__(self,user,proxies_dir,0700,proxiesdir_name)
################################################
#
# This Class contains the main dicts
#
################################################
class glideinMainDicts(cWDictFile.fileMainDicts):
def __init__(self,
work_dir,stage_dir,
workdir_name,
log_dir,
client_log_dirs,client_proxies_dirs):
cWDictFile.fileMainDicts.__init__(self,work_dir,stage_dir,workdir_name,
False, #simple_work_dir=False
log_dir)
self.client_log_dirs=client_log_dirs
for user in client_log_dirs.keys():
self.add_dir_obj(baseClientDirSupport(user,client_log_dirs[user],'clientlog'))
self.client_proxies_dirs=client_proxies_dirs
for user in client_proxies_dirs:
self.add_dir_obj(baseClientDirSupport(user,client_proxies_dirs[user],'clientproxies'))
# make them easier to find; create symlinks in work/client_proxies
self.add_dir_obj(clientSymlinksSupport(client_log_dirs,work_dir,'client_log','clientlog'))
self.add_dir_obj(clientSymlinksSupport(client_proxies_dirs,work_dir,'client_proxies','clientproxies'))
######################################
# Redefine methods needed by parent
def load(self):
load_main_dicts(self.dicts)
def save(self,set_readonly=True):
save_main_dicts(self.dicts,set_readonly=set_readonly)
# reuse as much of the other as possible
def reuse(self,other): # other must be of the same class
cWDictFile.fileMainDicts.reuse(self,other)
reuse_main_dicts(self.dicts,other.dicts)
####################
# Internal
####################
def get_daemon_log_dir(self,base_dir):
return os.path.join(base_dir,"factory")
# Child must overwrite this
def get_main_dicts(self):
return get_main_dicts(self.work_dir,self.stage_dir)
################################################
#
# This Class contains the entry dicts
#
################################################
class glideinEntryDicts(cWDictFile.fileSubDicts):
def __init__(self,base_work_dir,base_stage_dir,sub_name,
summary_signature,workdir_name,
base_log_dir,base_client_log_dirs,base_client_proxies_dirs):
cWDictFile.fileSubDicts.__init__(self,base_work_dir,base_stage_dir,sub_name,
summary_signature,workdir_name,
False, # simple_work_dir=False
base_log_dir)
for user in base_client_log_dirs.keys():
self.add_dir_obj(clientLogDirSupport(user,cgWConsts.get_entry_userlog_dir(base_client_log_dirs[user],sub_name)))
for user in base_client_proxies_dirs:
self.add_dir_obj(clientProxiesDirSupport(user,cgWConsts.get_entry_userproxies_dir(base_client_proxies_dirs[user],sub_name)))
######################################
# Redefine methods needed by parent
def load(self):
load_entry_dicts(self.dicts,self.sub_name,self.summary_signature)
def save(self,set_readonly=True):
save_entry_dicts(self.dicts,self.sub_name,self.summary_signature,set_readonly=set_readonly)
def save_final(self,set_readonly=True):
pass # nothing to do
# reuse as much of the other as possible
def reuse(self,other): # other must be of the same class
cWDictFile.fileSubDicts.reuse(self,other)
reuse_entry_dicts(self.dicts,other.dicts,self.sub_name)
####################
# Internal
####################
def get_sub_work_dir(self,base_dir):
return cgWConsts.get_entry_submit_dir(base_dir,self.sub_name)
def get_sub_log_dir(self,base_dir):
return cgWConsts.get_entry_log_dir(base_dir,self.sub_name)
def get_sub_stage_dir(self,base_dir):
return cgWConsts.get_entry_stage_dir(base_dir,self.sub_name)
def get_sub_dicts(self):
return get_entry_dicts(self.work_dir,self.stage_dir,self.sub_name)
def reuse_nocheck(self,other):
reuse_entry_dicts(self.dicts,other.dicts,self.sub_name)
################################################
#
# This Class contains both the main and
# the entry dicts
#
################################################
class glideinDicts(cWDictFile.fileDicts):
def __init__(self,work_dir,stage_dir,log_dir,
client_log_dirs,client_proxies_dirs,
entry_list=[],
workdir_name='submit'):
self.client_log_dirs=client_log_dirs
self.client_proxies_dirs=client_proxies_dirs
cWDictFile.fileDicts.__init__(self,work_dir,stage_dir,entry_list,workdir_name,
False, # simple_work_dir=False
log_dir)
###########
# PRIVATE
###########
######################################
# Redefine methods needed by parent
def new_MainDicts(self):
return glideinMainDicts(self.work_dir,self.stage_dir,self.workdir_name,self.log_dir,self.client_log_dirs,self.client_proxies_dirs)
def new_SubDicts(self,sub_name):
return glideinEntryDicts(self.work_dir,self.stage_dir,sub_name,self.main_dicts.get_summary_signature(),self.workdir_name,self.log_dir,self.client_log_dirs,self.client_proxies_dirs)
def get_sub_name_from_sub_stage_dir(self,sign_key):
return cgWConsts.get_entry_name_from_entry_stage_dir(sign_key)
| |
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__=''' $Id$ '''
__doc__='Test script for reportlab.tables'
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib import colors
from reportlab.graphics.charts.linecharts import HorizontalLineChart
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin
from reportlab.graphics.charts.barcharts import VerticalBarChart
styleSheet = getSampleStyleSheet()
def getTable():
t = Table((('','North','South','East','West'),
('Quarter 1',100,200,300,400),
('Quarter 2',100,400,600,800),
('Total',300,600,900,'1,200')),
(72,36,36,36,36),
(24, 16,16,18)
)
return t
def makeStyles():
styles = []
for i in range(5):
styles.append(TableStyle([('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('ALIGN', (0,0), (-1,0), 'CENTRE'),
('HREF', (0,0), (0,0), 'www.google.com'),
]))
for style in styles[1:]:
style.add('GRID', (0,0), (-1,-1), 0.25, colors.black)
for style in styles[2:]:
style.add('LINEBELOW', (0,0), (-1,0), 2, colors.black)
for style in styles[3:]:
style.add('LINEABOVE', (0, -1), (-1,-1), 2, colors.black)
styles[-1].add('LINEBELOW',(1,-1), (-1, -1), 2, (0.5, 0.5, 0.5))
return styles
def run():
doc = SimpleDocTemplate(outputfile('test_platypus_tables.pdf'), pagesize=(8.5*inch, 11*inch), showBoundary=1)
lst = []
from reportlab import Version
styNormal = styleSheet['Normal']
styBackground = ParagraphStyle('background', parent=styNormal, backColor=colors.pink)
styH1 = styleSheet['Heading1']
lst.append(Paragraph("First, a test of how tables align their content...", styH1))
lst.append(Paragraph("""Generated with version %s""" % Version,
styNormal))
lst.append(Paragraph("""In release 2.3, cells with plain text positioned their
text differently to cells with Paragraphs using the
same font. Hopefully now they are back on the same baseline""",
styNormal))
ts1 = TableStyle([
('ALIGN', (0,0), (-1,0), 'RIGHT'),
('BACKGROUND', (0,0), (-1,0), colors.lightgrey),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('GRID', (0,0), (-1,-1), 0.25, colors.black),
])
t1 = Table([
('plain text','plain text','shortpara','plain text', 'long para'),
('Text','more text', Paragraph('Is this para level?', styBackground), 'Back to text', Paragraph('Short para again', styBackground)),
('Text',
'more text',
Paragraph('Is this level?', styBackground),
'This is plain\ntext with line breaks\nto compare against\nthe para on right',
Paragraph('Long paragraph we expect to wrap over several lines accurately', styBackground)),
])
t1.setStyle(ts1)
lst.append(t1)
lst.append(Spacer(0,10))
lst.append(Paragraph("Now we make a table with just one cell containing a string...note how the text sits low", styNormal))
tsGrid = TableStyle([
('GRID', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(Table([['One cell of plain text']], style=tsGrid, colWidths=[200]))
lst.append(Spacer(0,10))
lst.append(Paragraph("Now we make a table with just one cell containing a para...should be same position. Note that the overall bounding box is an approximation and lies - it always did.", styNormal))
lst.append(Table([[Paragraph('One cell containing a paragraph. ÄÉ∫', styBackground)]], style=tsGrid, colWidths=[200]))
lst.append(Spacer(0,10))
lst.append(Paragraph("Paragraphs jumped up post 2.1. Ideally they should align the same.", styNormal))
lst.append(Spacer(0,30))
lst.append(Paragraph("Now for all the tests we had before. See also the much longer test_platypus_tables_2.pdf, which for reasons unknown was split into a separate file generated by the same script", styNormal))
styles = makeStyles()
for style in styles:
t = getTable()
t.setStyle(style)
## print '--------------'
## for rowstyle in t._cellstyles:
## for s in rowstyle:
## print s.alignment
lst.append(t)
lst.append(Spacer(0,12))
doc.build(lst)
class TableBarChart(_DrawingEditorMixin,Drawing):
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self.width = 136
self.height = 140
self._add(self,VerticalBarChart(),name='chart',validate=None,desc=None)
self.chart.y = 20
self.chart.width = self.width - 21
self.chart.height = self.height - 24
self.chart.categoryAxis.categoryNames = ['Spring','Summer','Autumn','Winter']
self.chart.categoryAxis.labels.fontSize = 7
def old_tables_test():
from reportlab.lib.units import inch, cm
from reportlab.platypus.flowables import Image, PageBreak, Spacer, XBox
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.flowables import Preformatted
from reportlab.platypus.doctemplate import SimpleDocTemplate
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.tables import GRID_STYLE, BOX_STYLE, LABELED_GRID_STYLE, COLORED_GRID_STYLE, LIST_STYLE, LongTable
rowheights = (24, 16, 16, 16, 16)
rowheights2 = (24, 16, 16, 16, 30)
colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
data = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
data2 = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats\nLarge', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
lst = []
lst.append(Paragraph("Tables", styleSheet['Heading1']))
lst.append(Paragraph(__doc__, styleSheet['BodyText']))
lst.append(Paragraph("The Tables (shown in different styles below) were created using the following code:", styleSheet['BodyText']))
lst.append(Preformatted("""
colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
rowheights = (24, 16, 16, 16, 16)
data = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159,
888, '1,298', 832, 453, '1,344','2,843')
)
t = Table(data, colwidths, rowheights)
""", styleSheet['Code'], dedent=4))
lst.append(Paragraph("""
You can then give the Table a TableStyle object to control its format. The first TableStyle used was
created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
GRID_STYLE = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
lst.append(Paragraph("""
TableStyles are created by passing in a list of commands. There are two types of commands - line commands
and cell formatting commands. In all cases, the first three elements of a command are the command name,
the starting cell and the ending cell.
""", styleSheet['BodyText']))
lst.append(Paragraph("""
Line commands always follow this with the weight and color of the desired lines. Colors can be names,
or they can be specified as a (R,G,B) tuple, where R, G and B are floats and (0,0,0) is black. The line
command names are: GRID, BOX, OUTLINE, INNERGRID, LINEBELOW, LINEABOVE, LINEBEFORE
and LINEAFTER. BOX and OUTLINE are equivalent, and GRID is the equivalent of applying both BOX and
INNERGRID.
""", styleSheet['BodyText']))
lst.append(Paragraph("""
Cell formatting commands are:
""", styleSheet['BodyText']))
lst.append(Paragraph("""
FONT - takes fontname, fontsize and (optional) leading.
""", styleSheet['Definition']))
lst.append(Paragraph("""
TEXTCOLOR - takes a color name or (R,G,B) tuple.
""", styleSheet['Definition']))
lst.append(Paragraph("""
ALIGNMENT (or ALIGN) - takes one of LEFT, RIGHT, CENTRE (or CENTER) or DECIMAL.
""", styleSheet['Definition']))
lst.append(Paragraph("""
LEFTPADDING - defaults to 6.
""", styleSheet['Definition']))
lst.append(Paragraph("""
RIGHTPADDING - defaults to 6.
""", styleSheet['Definition']))
lst.append(Paragraph("""
BOTTOMPADDING - defaults to 3.
""", styleSheet['Definition']))
lst.append(Paragraph("""
A tablestyle is applied to a table by calling Table.setStyle(tablestyle).
""", styleSheet['BodyText']))
t = Table(data, colwidths, rowheights)
t.setStyle(GRID_STYLE)
lst.append(PageBreak())
lst.append(Paragraph("This is GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
t = Table(data, colwidths, rowheights)
t.setStyle(BOX_STYLE)
lst.append(Paragraph("This is BOX_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
BOX_STYLE = TableStyle(
[('BOX', (0,0), (-1,-1), 0.50, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
t.setStyle(LABELED_GRID_STYLE)
lst.append(Paragraph("This is LABELED_GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
t = Table(data2, colwidths, rowheights2)
t.setStyle(LABELED_GRID_STYLE)
lst.append(Paragraph("This is LABELED_GRID_STYLE ILLUSTRATES EXPLICIT LINE SPLITTING WITH NEWLINE (different heights and data)\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
LABELED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.black),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
lst.append(PageBreak())
t = Table(data, colwidths, rowheights)
t.setStyle(COLORED_GRID_STYLE)
lst.append(Paragraph("This is COLORED_GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
COLORED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.red),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
t.setStyle(LIST_STYLE)
lst.append(Paragraph("This is LIST_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
LIST_STYLE = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
ts = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 3, colors.green,'butt'),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.white,'butt'),
('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))]
)
t.setStyle(ts)
lst.append(Paragraph("This is a custom style\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
ts = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 3, colors.green,'butt'),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.white,'butt'),
('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))]
)
""", styleSheet['Code']))
data = (
('', 'Jan\nCold', 'Feb\n', 'Mar\n','Apr\n','May\n', 'Jun\nHot', 'Jul\n', 'Aug\nThunder', 'Sep\n', 'Oct\n', 'Nov\n', 'Dec\n'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
c = list(colwidths)
c[0] = None
c[8] = None
t = Table(data, c, [None]+list(rowheights[1:]))
t.setStyle(LIST_STYLE)
lst.append(Paragraph("""
This is a LIST_STYLE table with the first rowheight set to None ie automatic.
The top row cells are split at a newline '\\n' character. The first and August
column widths were also set to None.
""", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
This demonstrates a number of features useful in financial statements. The first is decimal alignment;
with ALIGN=DECIMAL the numbers align on the points; and the points are aligned based on
the RIGHTPADDING, which is usually 3 points so you should set it higher. The second is multiple lines;
one can specify double or triple lines and control the separation if desired. Finally, the coloured
negative numbers were (we regret to say) done in the style; we don't have a way to conditionally
format numbers based on value yet.
""", styleSheet['BodyText']))
t = Table([[u'Corporate Assets','Amount'],
['Fixed Assets','1,234,567.89'],
['Company Vehicle','1,234.8901'],
['Petty Cash','42'],
[u'Intellectual Property\u00ae','(42,078,231.56)'],
['Overdraft','(12,345)'],
['Boardroom Flat Screen','60 inches'],
['Net Position','Deep Sh*t.Really']
],
[144,72])
ts = TableStyle(
[#first the top row
('ALIGN', (1,1), (-1,-1), 'CENTER'),
('LINEABOVE', (0,0), (-1,0), 1, colors.purple),
('LINEBELOW', (0,0), (-1,0), 1, colors.purple),
('FONT', (0,0), (-1,0), 'Times-Bold'),
#bottom row has a line above, and two lines below
('LINEABOVE', (0,-1), (-1,-1), 1, colors.purple), #last 2 are count, sep
('LINEBELOW', (0,-1), (-1,-1), 0.5, colors.purple, 1, None, None, 4,1),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.red),
('FONT', (0,-1), (-1,-1), 'Times-Bold'),
#numbers column
('ALIGN', (1,1), (-1,-1), 'DECIMAL'),
('RIGHTPADDING', (1,1), (-1,-1), 36),
('TEXTCOLOR', (1,4), (1,4), colors.red),
#red cell
]
)
t.setStyle(ts)
lst.append(t)
lst.append(Spacer(36,36))
lst.append(Paragraph("""
The red numbers should be aligned LEFT & BOTTOM, the blue RIGHT & TOP
and the green CENTER & MIDDLE.
""", styleSheet['BodyText']))
XY = [['X00y', 'X01y', 'X02y', 'X03y', 'X04y'],
['X10y', 'X11y', 'X12y', 'X13y', 'X14y'],
['X20y', 'X21y', 'X22y', 'X23y', 'X24y'],
['X30y', 'X31y', 'X32y', 'X33y', 'X34y']]
t=Table(XY, 5*[0.6*inch], 4*[0.6*inch])
t.setStyle([('ALIGN',(1,1),(-2,-2),'LEFT'),
('TEXTCOLOR',(1,1),(-2,-2),colors.red),
('VALIGN',(0,0),(1,-1),'TOP'),
('ALIGN',(0,0),(1,-1),'RIGHT'),
('TEXTCOLOR',(0,0),(1,-1),colors.blue),
('ALIGN',(0,-1),(-1,-1),'CENTER'),
('VALIGN',(0,-1),(-1,-1),'MIDDLE'),
('TEXTCOLOR',(0,-1),(-1,-1),colors.green),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(t)
data = [('alignment', 'align\012alignment'),
('bulletColor', 'bulletcolor\012bcolor'),
('bulletFontName', 'bfont\012bulletfontname'),
('bulletFontSize', 'bfontsize\012bulletfontsize'),
('bulletIndent', 'bindent\012bulletindent'),
('firstLineIndent', 'findent\012firstlineindent'),
('fontName', 'face\012fontname\012font'),
('fontSize', 'size\012fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent\012lindent'),
('rightIndent', 'rightindent\012rindent'),
('spaceAfter', 'spaceafter\012spacea'),
('spaceBefore', 'spacebefore\012spaceb'),
('textColor', 'fg\012textcolor\012color')]
t = Table(data)
t.setStyle([
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(t)
t = Table([ ('Attribute', 'Synonyms'),
('alignment', 'align, alignment'),
('bulletColor', 'bulletcolor, bcolor'),
('bulletFontName', 'bfont, bulletfontname'),
('bulletFontSize', 'bfontsize, bulletfontsize'),
('bulletIndent', 'bindent, bulletindent'),
('firstLineIndent', 'findent, firstlineindent'),
('fontName', 'face, fontname, font'),
('fontSize', 'size, fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent, lindent'),
('rightIndent', 'rightindent, rindent'),
('spaceAfter', 'spaceafter, spacea'),
('spaceBefore', 'spacebefore, spaceb'),
('textColor', 'fg, textcolor, color')])
t.repeatRows = 1
t.setStyle([
('FONT',(0,0),(-1,1),'Times-Bold',10,12),
('FONT',(0,1),(-1,-1),'Courier',8,8),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.green),
('BACKGROUND', (0, 1), (-1, -1), colors.pink),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('ALIGN', (0, 1), (0, -1), 'LEFT'),
('ALIGN', (-1, 1), (-1, -1), 'RIGHT'),
('FONT', (0, 0), (-1, 0), 'Times-Bold', 12),
('ALIGN', (1, 1), (1, -1), 'CENTER'),
])
lst.append(t)
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 5,6),
('GRID', (0,0), (-1,-1), 0.25, colors.blue),]))
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 10,12),
('GRID', (0,0), (-1,-1), 0.25, colors.black),]))
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 20,24),
('GRID', (0,0), (-1,-1), 0.25, colors.red),]))
lst.append(PageBreak())
data= [['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
])
lst.append(Paragraph("Illustrating splits: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(PageBreak())
data= [['00', '01', '02', '03', '04'],
['', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '', '33', '34']]
sty=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('SPAN',(2,2),(2,3)),
]
t=Table(data,style=sty)
lst.append(Paragraph("Illustrating splits with spans: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
data= [['00', '01', '02', '03', '04'],
['', '11', '12', '13', ''],
['20', '21', '22', '23', '24'],
['30', '31', '', '33', ''],
['40', '41', '', '43', '44']]
sty=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND',(-2,1),(-1,1),colors.palegreen),
('SPAN',(-2,1),(-1,1)),
('BACKGROUND',(-2,3),(-1,3),colors.yellow),
('SPAN',(-2,3),(-1,3)),
('BACKGROUND', (2, 3), (2, 4), colors.orange),
('SPAN',(2,3),(2,4)),
]
t=Table(data,style=sty,repeatRows=2)
lst.append(Paragraph("Illustrating splits with spans and repeatRows: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
if 1:
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(PageBreak())
from reportlab.lib.testutils import testsFolder
I = Image(os.path.join(os.path.dirname(testsFolder),'tools','pythonpoint','demos','leftlogo.gif'))
I.drawHeight = 1.25*inch*I.drawHeight / I.drawWidth
I.drawWidth = 1.25*inch
#I.drawWidth = 9.25*inch #uncomment to see better messaging
P = Paragraph("<para align=center spaceb=3>The <b>ReportLab Left <font color=red>Logo</font></b> Image</para>", styleSheet["BodyText"])
B = TableBarChart()
BP = Paragraph("<para align=center spaceb=3>A bar chart in a cell.</para>", styleSheet["BodyText"])
data= [['A', 'B', 'C', Paragraph("<b>A pa<font color=red>r</font>a<i>graph</i></b><super><font color=yellow>1</font></super>",styleSheet["BodyText"]), 'D'],
['00', '01', '02', [I,P], '04'],
['10', '11', '12', [I,P], '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34'],
['40', '41', '42', [B,BP], '44']]
t=Table(data,style=[('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('BOX',(0,0),(-1,-1),2,colors.black),
('GRID',(0,0),(-1,-1),0.5,colors.black),
('VALIGN',(3,0),(3,0),'BOTTOM'),
('BACKGROUND',(3,0),(3,0),colors.limegreen),
('BACKGROUND',(3,1),(3,1),colors.khaki),
('ALIGN',(3,1),(3,1),'CENTER'),
('BACKGROUND',(3,2),(3,2),colors.beige),
('ALIGN',(3,2),(3,2),'LEFT'),
])
t._argW[3]=1.5*inch
lst.append(t)
# now for an attempt at column spanning.
lst.append(PageBreak())
data= [['A', 'BBBBB', 'C', 'D', 'E'],
['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
sty = [
('ALIGN',(0,0),(-1,-1),'CENTER'),
('VALIGN',(0,0),(-1,-1),'TOP'),
('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
#span 'BBBB' across middle 3 cells in top row
('SPAN',(1,0),(3,0)),
#now color the first cell in this range only,
#i.e. the one we want to have spanned. Hopefuly
#the range of 3 will come out khaki.
('BACKGROUND',(1,0),(1,0),colors.khaki),
('SPAN',(0,2),(-1,2)),
#span 'AAA'down entire left column
('SPAN',(0,0), (0, 1)),
('BACKGROUND',(0,0),(0,0),colors.cyan),
('LINEBELOW', (0,'splitlast'), (-1,'splitlast'), 1, colors.white,'butt'),
]
t=Table(data,style=sty, colWidths = [20] * 5, rowHeights = [20]*5)
lst.append(t)
# now for an attempt at percentage widths
lst.append(Spacer(18,18))
lst.append(Paragraph("This table has colWidths=5*['14%']!", styleSheet['BodyText']))
t=Table(data,style=sty, colWidths = ['14%'] * 5, rowHeights = [20]*5)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph("This table has colWidths=['14%','10%','19%','22%','*']!", styleSheet['BodyText']))
t=Table(data,style=sty, colWidths = ['14%','10%','19%','22%','*'], rowHeights = [20]*5)
lst.append(t)
# Mike's test example
lst.append(Spacer(18,18))
lst.append(Paragraph('Mike\'s Spanning Example', styleSheet['Heading1']))
data= [[Paragraph('World Domination: The First Five Years', styleSheet['BodyText']), ''],
[Paragraph('World <font color="green">Domination</font>: The First Five Years', styleSheet['BodyText']),''],
[Paragraph('World Domination: The First Five Years', styleSheet['BodyText']), ''],
]
t=Table(data, style=[('SPAN',(0,0),(1,0)),('SPAN',(0,1),(1,1)),('SPAN',(0,2),(1,2)),], colWidths = [3*cm,8*cm], rowHeights = [None]*3)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph('Mike\'s Non-spanning Example', styleSheet['Heading1']))
data= [[Paragraph('World Domination: The First Five Years', styleSheet['BodyText'])],
[Paragraph('World <font color="magenta">Domination</font>: The First Five Years', styleSheet['BodyText'])],
[Paragraph('World Domination: The First Five Years', styleSheet['BodyText'])],
]
t=Table(data, style=[], colWidths = [11*cm], rowHeights = [None]*3)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph('xpre example', styleSheet['Heading1']))
data= [ [
XPreformatted('Account Details', styleSheet['Heading3']),
'', XPreformatted('Client Details', styleSheet['Heading3']),
], #end of row 0
]
t=Table(data, style=[], colWidths = [80,230.0,80], rowHeights = [None]*1)
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph('Trying colour cycling in background', styleSheet['Heading1']))
lst.append(Paragraph("This should alternate pale blue and uncolored by row", styleSheet['BodyText']))
data= [['001', '01', '02', '03', '04', '05'],
['002', '01', '02', '03', '04', '05'],
['003', '01', '02', '03', '04', '05'],
['004', '01', '02', '03', '04', '05'],
['005', '01', '02', '03', '04', '05'],
['006', '01', '02', '03', '04', '05'],
['007', '01', '02', '03', '04', '05'],
['008', '01', '02', '03', '04', '05'],
['009', '01', '02', '03', '04', '05'],
['010', '01', '02', '03', '04', '05'],
]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('ROWBACKGROUNDS', (0, 0), (-1, -1), (0xD0D0FF, None)),
])
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("And this should pale blue, pale pink and None by column", styleSheet['BodyText']))
data= [['001', '01', '02', '03', '04', '05'],
['002', '01', '02', '03', '04', '05'],
['003', '01', '02', '03', '04', '05'],
['004', '01', '02', '03', '04', '05'],
['005', '01', '02', '03', '04', '05'],
['006', '01', '02', '03', '04', '05'],
['007', '01', '02', '03', '04', '05'],
['008', '01', '02', '03', '04', '05'],
['009', '01', '02', '03', '04', '05'],
['010', '01', '02', '03', '04', '05'],
]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('COLBACKGROUNDS', (0, 0), (-1, -1), (0xD0D0FF, 0xFFD0D0, None)),
])
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph("This spanning example illustrates automatic removal of grids and lines in spanned cells!", styleSheet['BodyText']))
lst.append(Spacer(0,6))
data= [['Top\nLeft', '', '02', '03', '04', '05', '06', '07'],
['', '', '12', 'Span (3,1) (6,2)', '','','','17'],
['20', '21', '22', '', '','','','27'],
['30', '31', '32', '33', '34','35','36','37'],
['40', 'In The\nMiddle', '', '', '44','45','46','47'],
['50', '', '', '', '54','55','56','57'],
['60', '', '', '','64', '65', 'Bottom\nRight', ''],
['70', '71', '72', '73','74', '75', '', '']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('BACKGROUND',(0,0),(1,1),colors.palegreen),
('SPAN',(0,0),(1,1)),
('BACKGROUND',(-2,-2),(-1,-1), colors.pink),
('SPAN',(-2,-2),(-1,-1)),
('SPAN',(1,4),(3,6)),
('BACKGROUND',(1,4),(3,6), colors.lightblue),
('SPAN',(3,1),(6,2)),
('BACKGROUND',(3,1),(6,2), colors.peachpuff),
('VALIGN',(3,1),(6,2),'TOP'),
('LINEABOVE', (0,2),(-1,2), 1, colors.black, 0, None, None, 2, 2),
('LINEBEFORE', (3,0),(3,-1), 1, colors.black, 0, None, None, 2, 2),
])
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph("und jetzt noch eine Tabelle mit 5000 Zeilen:", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i), Paragraph("xx "* (i%10), styleSheet["BodyText"]), Paragraph("blah "*(i%40), styleSheet["BodyText"])] for i in xrange(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
lst.append(t)
#Yuan Hong's bug tester
lst.append(PageBreak())
lst.append(Paragraph('Yian Hong\'s Bug Case (should not blow up)', styleSheet['Heading2']))
data = ([['Col1', 'Col2', 'Col3', 'Col4', 'Col5']]+
[['01', Paragraph('This is cell one that contains a paragraph.', styleSheet['Normal']), '02', '03', '04']
for i in xrange(50)])
t = Table(data, ['20%']*5, repeatRows=1)
t.setStyle(TableStyle([
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('SPAN', (0,50), (-2,50)),
]))
lst.append(t)
lst.append(PageBreak())
#Volker Haas' example extended
#the optimal row heights are the solution of an LP similar to
#
#Objective function
# min: 3*h0+3*h1+3*h2+2*h3;
#
#constraints
# h0>=12;
# h1>=12;
# h2>=12;
# h3>=12;
# h0+h1+h2>=48;
# h0+h1>=12;
# h2+h3>=60;
#
#the solution H=[12,12,24,36]
def makeTable(x,y):
return Table([
['00', '01', '02', '03', '04', '05\nline2\nline3\nline4'],
['', '11', '12', x, '',''],
['20', '21', y, '23', '24',''],
['30', '31', '', '33', '34','35'],
],
style=[
('TOPPADDING',(0,0),(-1,-1),0),
('BOTTOMPADDING',(0,0),(-1,-1),0),
('RIGHTPADDING',(0,0),(-1,-1),0),
('LEFTPADDING',(0,0),(-1,-1),0),
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('SPAN',(2,2),(2,3)),
('SPAN',(3,1),(4,1)),
('SPAN',(5,0),(5,2)),
])
p_style= ParagraphStyle('Normal')
lst.append(makeTable(
Paragraph('This is a string',p_style),
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
lst.append(Spacer(10,10))
lst.append(makeTable(
XPreformatted('This is a string',p_style),
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
lst.append(Spacer(10,10))
lst.append(makeTable(
'This is a string',
'22\nblub\nasfd\nafd\nasdfs',
))
lst.append(Spacer(10,10))
lst.append(makeTable(
'This is a string',
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
SimpleDocTemplate(outputfile('test_platypus_tables_2.pdf'), showBoundary=1).build(lst)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test0(self):
"Make a document full of tables"
run()
def test1(self):
"Make a document full of tables"
old_tables_test()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| |
import ctypes # used for accessing the dynamic library
import graph_partitioning.partitioners.utils as putils # used for some of the utilities functions
class LibScotch(putils.CLibInterface):
def __init__(self, libraryPath = None):
super().__init__(libraryPath=libraryPath)
def _getDefaultLibPath(self):
return putils.defaultSCOTCHLibraryPath()
def _loadLibraryFunctions(self):
# *****************
# structures & data
# *****************
# These describe the type of object to be created
self.SCOTCH_Arch = ctypes.c_double*128
self.SCOTCH_Graph = ctypes.c_double*128
self.SCOTCH_Strat = ctypes.c_double*128
# These store the scotch data objects (ie. graph = SCOTCH_Graph())
self.architecture = None
self.graph = None
self.strategy = None
self.SCOTCH_version = self.clib.SCOTCH_version
self.SCOTCH_version.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
# SCOTCH_archAlloc
self.SCOTCH_archAlloc = self.clib.SCOTCH_archAlloc
#self.SCOTCH_archAlloc.argtypes = [ None ]
# SCOTCH_archInit
self.SCOTCH_archInit = self.clib.SCOTCH_archInit
self.SCOTCH_archInit.argtypes = [ctypes.POINTER(self.SCOTCH_Arch)]
# SCOTCH_archExit
self.SCOTCH_archExit = self.clib.SCOTCH_archExit
self.SCOTCH_archExit.argtypes = [ctypes.POINTER(self.SCOTCH_Arch)]
# SCOTCH_archCmplt - builds architecture for partitioning
self.SCOTCH_archCmplt = self.clib.SCOTCH_archCmplt
self.SCOTCH_archCmplt.argtypes = [ctypes.POINTER(self.SCOTCH_Arch), ctypes.c_int]
# SCOTCH_graphAlloc
self.SCOTCH_graphAlloc = self.clib.SCOTCH_graphAlloc
#self.SCOTCH_graphAlloc.argtypes = [ None ]
# SCOTCH_graphInit
self.SCOTCH_graphInit = self.clib.SCOTCH_graphInit
self.SCOTCH_graphInit.argtypes = [ctypes.POINTER(self.SCOTCH_Graph)]
# SCOTCH_graphExit
self.SCOTCH_graphExit = self.clib.SCOTCH_graphExit
self.SCOTCH_graphExit.argtypes = [ctypes.POINTER(self.SCOTCH_Graph)]
# SCOTCH_graphCheck
self.SCOTCH_graphCheck = self.clib.SCOTCH_graphCheck
self.SCOTCH_graphCheck.argtypes = [ctypes.POINTER(self.SCOTCH_Graph)]
# SCOTCH_graphBuild
self.SCOTCH_graphBuild = self.clib.SCOTCH_graphBuild
self.SCOTCH_graphBuild.argtypes = [
ctypes.POINTER(self.SCOTCH_Graph), ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p
]
# SCOTCH_stratAlloc
self.SCOTCH_stratAlloc = self.clib.SCOTCH_stratAlloc
#self.SCOTCH_stratAlloc.argtypes = [ None ]
# SCOTCH_stratInit
self.SCOTCH_stratInit = self.clib.SCOTCH_stratInit
self.SCOTCH_stratInit.argtypes = [ctypes.POINTER(self.SCOTCH_Strat)]
self.SCOTCH_stratExit = self.clib.SCOTCH_stratExit
self.SCOTCH_stratExit.argtypes = [ctypes.POINTER(self.SCOTCH_Strat)]
self.SCOTCH_stratGraphMap = self.clib.SCOTCH_stratGraphMap
self.SCOTCH_stratGraphMap.argtypes = [ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_char_p]
self.SCOTCH_stratGraphMapBuild = self.clib.SCOTCH_stratGraphMapBuild
self.SCOTCH_stratGraphMapBuild.argtypes = [ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_int, ctypes.c_int, ctypes.c_double]
# MAPPING Functions
self.SCOTCH_graphMap = self.clib.SCOTCH_graphMap
self.SCOTCH_graphMap.argtypes = [ctypes.POINTER(self.SCOTCH_Graph), ctypes.POINTER(self.SCOTCH_Arch), ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_void_p]
self.SCOTCH_graphMapFixed = self.clib.SCOTCH_graphMapFixed
self.SCOTCH_graphMapFixed.argtypes = [ctypes.POINTER(self.SCOTCH_Graph), ctypes.POINTER(self.SCOTCH_Arch), ctypes.POINTER(self.SCOTCH_Strat), ctypes.c_void_p]
def isLoaded(self):
if self.clib is None:
return False
return True
def version(self):
major_ptr = ctypes.c_int(0)
relative_ptr = ctypes.c_int(0)
patch_ptr = ctypes.c_int(0)
ret = self.SCOTCH_version(major_ptr, relative_ptr, patch_ptr)
return "{}.{}.{}".format(major_ptr.value, relative_ptr.value, patch_ptr.value)
def createSCOTCHArch(self):
#self.SCOTCH_Arch = self.SCOTCH_archAlloc()
#print(self.SCOTCH_Arch)
self.architecture = self.SCOTCH_Arch()
ret = self.SCOTCH_archInit(self.architecture)
if(ret == 0):
return True
return False
def deleteSCOTCHStrat(self):
self.SCOTCH_stratExit(self.strategy)
del self.strategy
self.strategy = None
def deleteSCOTCHArch(self):
self.SCOTCH_archExit(self.architecture)
del self.architecture
self.architecture = None
def populatePartitionArchitecture(self, numPartitions):
if(self.architecture is None):
return False
if(isinstance(numPartitions, int)):
ret = self.SCOTCH_archCmplt(self.architecture, numPartitions)
if(ret == 0):
return True
return False
def createSCOTCHGraph(self):
#self.SCOTCH_Graph = self.SCOTCH_graphAlloc()
self.graph = self.SCOTCH_Graph()
ret = self.SCOTCH_graphInit(self.graph)
if(ret == 0):
return True
return False
def buildSCOTCHGraphFromData(self, scotchData):
#if isinstance(scotchData, scotchio.ScotchGraphArrays) == False:
# return False
if self.graph is None:
if(self.createSCOTCHGraph() == False):
return False
if scotchData._vlbltab is None:
success = self.SCOTCH_graphBuild(self.graph, scotchData.baseval, scotchData.vertnbr, scotchData._verttab.ctypes, 0, scotchData._velotab.ctypes, 0, scotchData.edgenbr, scotchData._edgetab.ctypes, scotchData._edlotab.ctypes)
else:
#print('SCOTCH.py, using vlbltab array')
success = self.SCOTCH_graphBuild(self.graph, scotchData.baseval, scotchData.vertnbr, scotchData._verttab.ctypes, 0, scotchData._velotab.ctypes, scotchData._vlbltab.ctypes, scotchData.edgenbr, scotchData._edgetab.ctypes, scotchData._edlotab.ctypes)
if success == 0:
return True
return False
def deleteSCOTCHGraph(self):
# TODO write test for this
self.SCOTCH_graphExit(self.graph)
del self.graph
self.graph = None
def scotchGraphValid(self):
# TODO write test for this
ret = self.SCOTCH_graphCheck(self.graph)
if(ret == 0):
return True
return False
def createStrategy(self):
self.strategy = self.SCOTCH_Strat()
ret = self.SCOTCH_stratInit(self.strategy)
if ret == 0:
return True
return False
def setStrategyGraphMapBuild(self, straval, partitionNbr, kbalval = 0.1):
ret = self.SCOTCH_stratGraphMapBuild(self.strategy, straval, partitionNbr, kbalval)
if ret == 0:
return True
return False
def setStrategyFlags(self, strategyFlags):
if(isinstance(strategyFlags, str) == False):
strategyFlags = ''
# Note: must encode the string as that returns a bytecode equivalent
success = self.SCOTCH_stratGraphMap(self.strategy, strategyFlags.encode('utf-8'))
if(success == 0):
return True
return False
def createSCOTCHGraphMapStrategy(self, strategyFlags):
#self.strategy = self.SCOTCH_stratAlloc()
self.strategy = self.SCOTCH_Strat()
ret = self.SCOTCH_stratInit(self.strategy)
if(ret == 0):
if(isinstance(strategyFlags, str) == False):
strategyFlags = ''
# Note: must encode the string as that returns a bytecode equivalent
success = self.SCOTCH_stratGraphMap(self.strategy, strategyFlags.encode('utf-8'))
if(success == 0):
return True
return False
def graphMap(self, parttab):
ret = self.SCOTCH_graphMap(self.graph, self.architecture, self.strategy, parttab.ctypes)
if ret == 0:
return True
return False
def graphMapFixed(self, parttab):
ret = self.SCOTCH_graphMapFixed(self.graph, self.architecture, self.strategy, parttab.ctypes)
if ret == 0:
return True
return False
| |
# -*- coding: utf-8 -*-
# czifile.py
# Copyright (c) 2013-2014, Christoph Gohlke
# Copyright (c) 2013-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read image and metadata from Carl Zeiss(r) ZISRAW (CZI) files.
CZI is the native image file format of the ZEN(r) software by the Carl Zeiss
Microscopy GmbH. It stores multidimensional images and metadata from
microscopy experiments.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2013.12.04
Requirements
------------
* `CPython 2.7 or 3.3 <http://www.python.org>`_
* `Numpy 1.7 <http://www.numpy.org>`_
* `Scipy 0.13 <http://www.scipy.org>`_
* `Tifffile.py 2013.11.03 <http://www.lfd.uci.edu/~gohlke/>`_
* `Czifle.pyx 2013.12.04 <http://www.lfd.uci.edu/~gohlke/>`_
(for decoding JpegXrFile and JpgFile images)
Revisions
---------
2013.12.04
Decode JpegXrFile and JpgFile via _czifle extension module.
Attempt to reconstruct tiled mosaic images.
2013.11.20
Initial release.
Notes
-----
The API is not stable yet and might change between revisions.
The file format design specification [1] is confidential and the licence
agreement does not permit to write data into CZI files.
Only a subset of the 2012 specification is implemented in the initial release.
Specifically, multifile images are not yet supported.
Tested on Windows with a few example files only.
References
----------
(1) ZISRAW (CZI) File Format Design specification Release Version 1.1 for
ZEN 2012. DS_ZISRAW-FileFormat_Rel_ZEN2012.doc (Confidential)
Documentation can be requested at
<http://microscopy.zeiss.com/microscopy/en_us/downloads/zen.html>
(2) CZI The File Format for the Microscope | ZEISS International
<http://microscopy.zeiss.com/microscopy/en_us/products/microscope-software/
zen-2012/czi.html>
Examples
--------
>>> with CziFile('test.czi') as czi:
... image = czi.asarray()
>>> image.shape
(3, 3, 3, 250, 200, 3)
>>> image[0, 0, 0, 0, 0]
array([10, 10, 10], dtype=uint8)
"""
from __future__ import division, print_function, absolute_import
import sys
import os
import re
import uuid
import struct
import warnings
import tempfile
try:
from lxml import etree
except ImportError:
from xml.etree import cElementTree as etree
import numpy
from scipy.ndimage.interpolation import zoom
from .tifffile import decodelzw, lazyattr, stripnull
try:
import _czifile
_have_czifile = True
except ImportError:
_have_czifile = False
warnings.warn(
"failed to import the optional _czifile C extension module.\n"
"Decoding of JXR and JPG encoded images will be unavailable.\n"
"Czifile.pyx can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2013.12.04'
__docformat__ = 'restructuredtext en'
__all__ = 'imread', 'CziFile'
def imread(filename, *args, **kwargs):
"""Return image data from CZI file as numpy array.
'args' and 'kwargs' are arguments to CziFile.asarray().
Examples
--------
>>> image = imread('test.czi')
>>> image.shape
(3, 3, 3, 250, 200, 3)
>>> image.dtype
dtype('uint8')
"""
with CziFile(filename) as czi:
result = czi.asarray(*args, **kwargs)
return result
class CziFile(object):
"""Carl Zeiss Image (CZI) file.
Attributes
----------
header : FileHeaderSegment
Global file metadata such as file version and GUID.
metadata : etree.ElementTree.Element
Global image metadata in UTF-8 encoded XML format.
All attributes are read-only.
"""
def __init__(self, arg, multifile=True, filesize=None, detectmosaic=True):
"""Open CZI file and read header.
Raise ValueError if file is not a ZISRAW file.
Parameters
----------
multifile : bool
If True (default), the master file of a multifile CZI file
will be opened if applicable.
filesize : int
Size of file if arg is a file handle pointing to an
embedded CZI file.
detectmosaic : bool
If True (default), mosaic images will be reconstructed from
SubBlocks with a tile index.
Notes
-----
CziFile instances created from file name must be closed using the
'close' method, which is automatically called when using the
'with' statement.
"""
self._fh = FileHandle(arg, size=filesize)
try:
if self._fh.read(10) != b'ZISRAWFILE':
raise ValueError("not a CZI file")
self.header = Segment(self._fh, 0).data()
except Exception:
self._fh.close()
raise
if multifile and self.header.file_part and isinstance(arg, basestring):
# open master file instead
self._fh.close()
name, _ = match_filename(arg)
self._fh = FileHandle(name)
self.header = Segment(self._fh, 0).data()
assert(self.header.primary_file_guid == self.header.file_guid)
assert(self.header.file_part == 0)
if self.header.update_pending:
warnings.warn("file is pending update")
self._filter_mosaic = detectmosaic
def segments(self, kind=None):
"""Return iterator over Segment data of specified kind.
Parameters
----------
kind : bytestring or sequence thereof
Segment id(s) as listed in SEGMENT_ID.
If None (default), all segments are returned.
"""
fpos = 0
while True:
self._fh.seek(fpos)
try:
segment = Segment(self._fh)
except SegmentNotFoundError:
break
if (kind is None) or (segment.sid in kind):
yield segment.data()
fpos = segment.data_offset + segment.allocated_size
@lazyattr
def metadata(self):
"""Return data from MetadataSegment as xml.ElementTree root Element.
Return None if no Metadata segment is found.
"""
if self.header.metadata_position:
segment = Segment(self._fh, self.header.metadata_position)
if segment.sid == MetadataSegment.SID:
return etree.fromstring(segment.data().data())
warnings.warn("Metadata segment not found")
try:
metadata = next(self.segments(MetadataSegment.SID))
return etree.fromstring(metadata.data())
except StopIteration:
pass
@lazyattr
def subblock_directory(self):
"""Return list of all DirectoryEntryDV in file.
Use SubBlockDirectorySegment if exists, else find SubBlockSegments.
"""
if self.header.directory_position:
segment = Segment(self._fh, self.header.directory_position)
if segment.sid == SubBlockDirectorySegment.SID:
return segment.data().entries
warnings.warn("SubBlockDirectory segment not found")
return list(segment.directory_entry for segment in
self.segments(SubBlockSegment.SID))
@lazyattr
def attachment_directory(self):
"""Return list of all AttachmentEntryA1 in file.
Use AttachmentDirectorySegment if exists, else find AttachmentSegments.
"""
if self.header.attachment_directory_position:
segment = Segment(self._fh,
self.header.attachment_directory_position)
if segment.sid == AttachmentDirectorySegment.SID:
return segment.data().entries
warnings.warn("AttachmentDirectory segment not found")
return list(segment.attachment_entry for segment in
self.segments(AttachmentSegment.SID))
def subblocks(self):
"""Return iterator over all SubBlock segments in file."""
for entry in self.subblock_directory:
yield entry.data_segment()
def attachments(self):
"""Return iterator over all Attachment segments in file."""
for entry in self.attachment_directory:
yield entry.data_segment()
def save_attachments(self, directory=None):
"""Save all attachments to files."""
if directory is None:
directory = self._fh.filename + '.attachments'
if not os.path.exists(directory):
os.makedirs(directory)
for attachment in self.attachments():
attachment.save(directory=directory)
@lazyattr
def filtered_subblock_directory(self):
"""Return sorted list of DirectoryEntryDV if mosaic, else all."""
if not self._filter_mosaic:
return self.subblock_directory
filtered = [directory_entry
for directory_entry in self.subblock_directory
if directory_entry.mosaic_index is not None]
if not filtered:
return self.subblock_directory
return list(sorted(filtered, key=lambda x: x.mosaic_index))
@lazyattr
def shape(self):
"""Return shape of image data in file."""
shape = [[dim.start + dim.size
for dim in directory_entry.dimension_entries
if dim.dimension != b'M']
for directory_entry in self.filtered_subblock_directory]
shape = numpy.max(shape, axis=0)
shape = tuple(i-j for i, j in zip(shape, self.start[:-1]))
dtype = self.filtered_subblock_directory[0].dtype
sampleshape = numpy.dtype(dtype).shape
shape = shape + (sampleshape if sampleshape else (1,))
return shape
@lazyattr
def start(self):
"""Return minimum start indices per dimension of sub images in file."""
start = [[dim.start
for dim in directory_entry.dimension_entries
if dim.dimension != b'M']
for directory_entry in self.filtered_subblock_directory]
start = tuple(numpy.min(start, axis=0)) + (0,)
return start
@lazyattr
def axes(self):
"""Return axes of image data in file."""
return self.filtered_subblock_directory[0].axes
@lazyattr
def dtype(self):
"""Return dtype of image data in file."""
# subblock data can be of different pixel type
dtype = self.filtered_subblock_directory[0].dtype[-2:]
for directory_entry in self.filtered_subblock_directory:
dtype = numpy.promote_types(dtype, directory_entry.dtype[-2:])
return dtype
def asarray(self, bgr2rgb=False, resize=True, order=1):
"""Return image data from file(s) as numpy array.
Parameters
----------
bgr2rgb : bool
If True, exchange red and blue samples if applicable.
resize : bool
If True (default), resize sub/supersampled subblock data.
order : int
The order of spline interpolation used to resize sub/supersampled
subblock data. Default is 1 (bilinear).
"""
image = numpy.zeros(self.shape, self.dtype)
for directory_entry in self.filtered_subblock_directory:
subblock = directory_entry.data_segment()
tile = subblock.data(bgr2rgb=bgr2rgb, resize=resize, order=order)
index = [slice(i-j, i-j+k) for i, j, k in
zip(directory_entry.start, self.start, tile.shape)]
try:
image[index] = tile
except ValueError as e:
warnings.warn(str(e))
return image
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __str__(self):
return '\n '.join((
self._fh.name.capitalize(),
"(Carl Zeiss Image File)",
str(self.header),
"MetadataSegment",
str(self.axes),
str(self.shape),
str(self.dtype),
str(etree.tostring(self.metadata))))
class FileHandle(object):
"""Binary file handle that can handle a file within a file.
Only binary read, seek, tell and close are supported on embedded files.
Attributes
----------
name : str
file name
path : str
Absolute path to file.
All attributes are read-only.
"""
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is filename.
name : str
Optional name of file in case 'arg' is file handle.
offset : int
Optional start position of file in the file. By default the
current file position is used as offset.
size : int
Optional size of file in the file. By default the number of
bytes from the current file position to the end of the file
is used.
"""
if isinstance(arg, basestring): # file name
filename = os.path.abspath(arg)
self.path, self.name = os.path.split(filename)
self._fh = open(filename, mode)
self._close = True
if offset is None:
offset = 0
elif isinstance(arg, FileHandle):
if offset is None:
offset = arg.tell()
else:
offset = arg._offset + offset
self._fh = arg._fh
self._close = False
if name:
self.name = name
else:
name, ext = os.path.splitext(arg.name)
self.name = "%s@%i%s" % (name, offset, ext)
self.path = arg.path
else: # file handle
if offset is None:
offset = arg.tell()
self._fh = arg
self._close = False
self.name = name if name else self._fh.name
self.path = ''
self._offset = offset
if size is not None:
self.size = size
@property
def filename(self):
return os.path.join(self.path, self.name)
def read(self, size=-1):
if size < 0 and self._offset:
size = self.size
return self._fh.read(size)
def fromfile(self, dtype, count=-1, sep=""):
return numpy.fromfile(self._fh, dtype, count, sep)
def tell(self):
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self.size - offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
if self._close:
self._fh.close()
def __getattr__(self, name):
if name == 'size':
self._fh.seek(self._offset, 2)
self.size = self._fh.tell()
return self.size
return getattr(self._fh, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class Segment(object):
"""ZISRAW Segment."""
__slots__ = 'sid', 'allocated_size', 'used_size', 'data_offset', '_fh'
def __init__(self, fh, fpos=None):
"""Read segment header from file."""
if fpos is not None:
fh.seek(fpos)
try:
(self.sid,
self.allocated_size,
self.used_size
) = struct.unpack('<16sqq', fh.read(32))
except struct.error:
raise SegmentNotFoundError("can not read ZISRAW segment")
self.sid = stripnull(self.sid)
if self.sid not in SEGMENT_ID:
if not self.sid.startswith(b'ZISRAW'):
raise SegmentNotFoundError("not a ZISRAW segment")
warnings.warn("unknown segment type %s" % self.sid)
self.data_offset = fh.tell()
self._fh = fh
def data(self):
"""Read segment data from file and return as \*Segment instance."""
self._fh.seek(self.data_offset)
return SEGMENT_ID.get(self.sid, UnknownSegment)(self._fh)
def __str__(self):
return "Segment %s %i of %i" % (
self.sid, self.used_size, self.allocated_size)
class SegmentNotFoundError(Exception):
"""Exception to indicate that file position doesn't contain Segment."""
pass
class FileHeaderSegment(object):
"""ZISRAWFILE file header segment data.
Contains global file metadata such as file version and GUID.
"""
__slots__ = ('version', 'primary_file_guid', 'file_guid',
'file_part', 'directory_position', 'metadata_position',
'update_pending', 'attachment_directory_position')
SID = b'ZISRAWFILE'
def __init__(self, fh):
(major,
minor,
reserved1,
reserved2,
primary_file_guid,
file_guid,
self.file_part,
self.directory_position,
self.metadata_position,
self.update_pending,
self.attachment_directory_position,
) = struct.unpack('<iiii16s16siqqiq', fh.read(80))
self.version = (major, minor)
self.update_pending = bool(self.update_pending)
self.primary_file_guid = uuid.UUID(bytes=primary_file_guid)
self.file_guid = uuid.UUID(bytes=file_guid)
def __str__(self):
return "FileHeaderSegment\n " + "\n ".join(
"%s %s" % (name, str(getattr(self, name)))
for name in FileHeaderSegment.__slots__)
class MetadataSegment(object):
"""ZISRAWMETADATA segment data.
Contains global image metadata in UTF-8 encoded XML format.
"""
__slots__ = 'xml_size', 'attachment_size', 'xml_offset', '_fh'
SID = b'ZISRAWMETADATA'
def __init__(self, fh):
self.xml_size, self.attachment_size = struct.unpack('<ii', fh.read(8))
fh.seek(248, 1) # spare
self.xml_offset = fh.tell()
self._fh = fh
def data(self, raw=False):
"""Read XML from file and return as unicode string."""
self._fh.seek(self.xml_offset)
data = self._fh.read(self.xml_size)
if raw:
return data
data = data.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
return unicode(data, 'utf-8')
def __str__(self):
return "MetadataSegment\n %s" % self.data()
class SubBlockSegment(object):
"""ZISRAWSUBBLOCK segment data.
Contains XML metadata, optional attachments, and homogenous,
contiguous pixel data.
"""
__slots__ = ('metadata_size', 'attachment_size', 'data_size',
'directory_entry', 'data_offset', '_fh')
SID = b'ZISRAWSUBBLOCK'
def __init__(self, fh):
(self.metadata_size,
self.attachment_size,
self.data_size,
) = struct.unpack('<iiq', fh.read(16))
self.directory_entry = DirectoryEntryDV(fh)
#fh.seek(max(240 - self.directory_entry.storage_size, 0), 1) # fill
#self.metadata = unicode(fh.read(self.metadata_size), 'utf-8')
self.data_offset = fh.tell()
self.data_offset += max(240 - self.directory_entry.storage_size, 0)
self.data_offset += self.metadata_size
self._fh = fh
def metadata(self):
"""Read metadata from file and return as XML string."""
self._fh.seek(self.data_offset - self.metadata_size)
return unicode(self._fh.read(self.metadata_size), 'utf-8')
def data(self, raw=False, bgr2rgb=True, resize=True, order=1):
"""Read image data from file and return as numpy array."""
self._fh.seek(self.data_offset)
if raw:
return self._fh.read(self.data_size)
elif self.compression:
if self.compression not in DECOMPRESS:
raise ValueError("compression unknown or not supported")
# TODO: test this
data = self._fh.read(self.data_size)
data = DECOMPRESS[self.compression](data)
if self.compression == 2:
# LZW
data = numpy.fromstring(data, self.dtype)
else:
dtype = numpy.dtype(self.dtype)
data = self._fh.fromfile(dtype, self.data_size // dtype.itemsize)
data = data.reshape(self.stored_shape)
if self.stored_shape == self.shape or not resize:
if bgr2rgb and self.stored_shape[-1] in (3, 4):
tmp = data[..., 0].copy()
data[..., 0] = data[..., 2]
data[..., 2] = tmp
return data
# sub / supersampling
factors = [j / i for i, j in zip(self.stored_shape, self.shape)]
factors = [(1.0 if abs(1.0-f) < 0.0001 else f) for f in factors]
shape = list(self.stored_shape)
# remove leading dimensions with factor 1.0 for speed
for factor in factors:
if factor != 1.0:
break
shape = shape[1:]
factors = factors[1:]
data.shape = shape
# resize RGB components separately for speed
if shape[-1] in (3, 4) and factors[-1] == 1.0:
factors = factors[:-1]
old = data
data = numpy.empty(self.shape, self.dtype[-2:])
for i in range(shape[-1]):
j = {0: 2, 1: 1, 2: 0, 3: 3}[i] if bgr2rgb else i
data[..., i] = zoom(old[..., j], zoom=factors, order=order)
else:
data = zoom(data, zoom=factors, order=order)
data.shape = self.shape
return data
def attachments(self):
"""Read optional attachments from file and return as bytes."""
if self.attachment_size < 1:
return b''
self._fh.seek(self.data_offset + self.data_size)
return self._fh.read(self.attachment_size)
def __getattr__(self, name):
"""Directly access DirectoryEntryDV attributes."""
return getattr(self.directory_entry, name)
def __str__(self):
return "SubBlockSegment\n %s\n %s" % (
self.metadata(), str(self.directory_entry))
class DirectoryEntryDV(object):
"""Directory Entry - Schema DV."""
#__slots__ = ('file_position', 'file_part', 'compression', 'pyramid_type',
# 'dimension_entries', 'dtype', '_fh')
@staticmethod
def read_file_position(fh):
"""Return file position of associated SubBlock segment."""
(schema_type,
file_position,
dimensions_count,
) = struct.unpack('<2s4xq14xi', fh.read(32))
fh.seek(dimensions_count * 20, 1)
assert(schema_type == b'DV')
return file_position
def __init__(self, fh):
(schema_type,
pixel_type,
self.file_position,
self.file_part, # reserved
self.compression,
self.pyramid_type, # internal
reserved1,
reserved2,
dimensions_count,
) = struct.unpack('<2siqiiBB4si', fh.read(32))
if schema_type != b'DV':
raise ValueError("not a DirectoryEntryDV")
self.dtype = PIXEL_TYPE[pixel_type]
# reverse dimension_entries to match C contiguous data
self.dimension_entries = list(reversed(
[DimensionEntryDV1(fh) for _ in range(dimensions_count)]))
self._fh = fh
@lazyattr
def storage_size(self):
return 32 + len(self.dimension_entries) * 20
@lazyattr
def pixel_type(self):
return PIXEL_TYPE[self.dtype]
@lazyattr
def axes(self):
axes = b''.join(dim.dimension for dim in self.dimension_entries
if dim.dimension != b'M')
return axes + b'0'
@lazyattr
def shape(self):
shape = tuple(dim.size for dim in self.dimension_entries
if dim.dimension != b'M')
sampleshape = numpy.dtype(self.dtype).shape
return shape + (sampleshape if sampleshape else (1,))
@lazyattr
def start(self):
start = tuple(dim.start for dim in self.dimension_entries
if dim.dimension != b'M')
return start + (0,)
@lazyattr
def stored_shape(self):
shape = tuple(dim.stored_size for dim in self.dimension_entries
if dim.dimension != b'M')
sampleshape = numpy.dtype(self.dtype).shape
return shape + (sampleshape if sampleshape else (1,))
@lazyattr
def mosaic_index(self):
for dim in self.dimension_entries:
if dim.dimension == b'M':
return dim.start
def data_segment(self):
"""Read and return SubBlockSegment at file_position."""
return Segment(self._fh, self.file_position).data()
def __str__(self):
return "DirectoryEntryDV\n %s %s %s %s\n %s" % (
COMPRESSION.get(self.compression, self.compression),
self.pixel_type, self.axes, str(self.shape),
"\n ".join(str(d) for d in self.dimension_entries))
class DimensionEntryDV1(object):
"""Dimension Entry - Schema DV."""
__slots__ = 'dimension', 'start', 'size', 'start_coordinate', 'stored_size'
def __init__(self, fh):
(self.dimension,
self.start,
self.size,
self.start_coordinate,
stored_size
) = struct.unpack('<4siifi', fh.read(20))
self.dimension = stripnull(self.dimension)
self.stored_size = stored_size if stored_size else self.size
def __str__(self):
return "DimensionEntryDV1 %s %i %i %f %i" % (
self.dimension, self.start, self.size,
self.start_coordinate, self.stored_size)
class SubBlockDirectorySegment(object):
"""ZISRAWDIRECTORY segment data.
Contains entries of any kind, currently only DirectoryEntryDV.
"""
__slots__ = 'entries',
SID = b'ZISRAWDIRECTORY'
@staticmethod
def file_positions(fh):
"""Return list of file positions of associated SubBlock segments."""
entry_count = struct.unpack('<i', fh.read(4))[0]
fh.seek(124, 1) # reserved
return tuple(DirectoryEntryDV.read_file_position(fh)
for _ in range(entry_count))
def __init__(self, fh):
entry_count = struct.unpack('<i', fh.read(4))[0]
fh.seek(124, 1) # reserved
self.entries = tuple(DirectoryEntryDV(fh) for _ in range(entry_count))
def __len__(self):
return len(self.entries)
def __getitem__(self, key):
return self.entries[key]
def __iter__(self):
return iter(self.entries)
def __str__(self):
return "SubBlockDirectorySegment\n %s" % (
"\n ".join(str(e) for e in self.entries))
class AttachmentSegment(object):
"""ZISRAWATTACH segment data.
Contains binary or text data as specified in attachment_entry.
"""
__slots__ = 'data_size', 'attachment_entry', 'data_offset', '_fh'
SID = b'ZISRAWATTACH'
def __init__(self, fh):
self.data_size = struct.unpack('<i', fh.read(4))[0]
fh.seek(12, 1) # reserved
self.attachment_entry = AttachmentEntryA1(fh)
fh.seek(112, 1) # reserved
self.data_offset = fh.tell()
self._fh = fh
def save(self, filename=None, directory='.'):
"""Save attachment to file in directory."""
self._fh.seek(self.data_offset)
if not filename:
filename = self.attachment_entry.filename
filename = os.path.join(directory, filename)
with open(filename, 'wb') as fh:
fh.write(self._fh.read(self.data_size))
def data(self, raw=False):
"""Read embedded file and return content.
If 'raw' is False (default), try return content according to
CONTENT_FILE_TYPE, else return raw bytes.
"""
self._fh.seek(self.data_offset)
cotype = self.attachment_entry.content_file_type
if not raw and cotype in CONTENT_FILE_TYPE:
return CONTENT_FILE_TYPE[cotype](self._fh, filesize=self.data_size)
else:
return self._fh.read(self.data_size)
def __str__(self):
return "AttachmentSegment\n %s" % self.attachment_entry
class AttachmentEntryA1(object):
"""AttachmentEntry - Schema A1."""
__slots__ = ('content_guid', 'content_file_type', 'name',
'file_position', '_fh')
@staticmethod
def read_file_position(fh):
"""Return file position of associated Attachment segment."""
schema_type, file_position = struct.unpack('<2s10xq', fh.read(20))
fh.seek(108, 1)
assert(schema_type == b'A1')
return file_position
def __init__(self, fh):
(shema_type,
reserved,
self.file_position,
file_part, # reserved
content_guid,
content_file_type,
name
) = struct.unpack('<2s10sqi16s8s80s', fh.read(128))
if shema_type != b'A1':
raise ValueError("not a AttachmentEntryA1")
self.content_guid = uuid.UUID(bytes=content_guid)
self.content_file_type = stripnull(content_file_type)
self.name = unicode(stripnull(name), 'utf-8')
self._fh = fh
@property
def filename(self):
"""Return unique file name for attachment."""
return "%s@%i.%s" % (self.name, self.file_position,
unicode(self.content_file_type, 'utf-8').lower())
def data_segment(self):
"""Read and return AttachmentSegment at file_position."""
return Segment(self._fh, self.file_position).data()
def __str__(self):
return " ".join(str(i) for i in (
"AttachmentEntryA1", self.name, self.content_file_type,
self.content_guid))
class AttachmentDirectorySegment(object):
"""ZISRAWATTDIR segment data. Sequence of AttachmentEntryA1."""
__slots__ = 'entries',
SID = b'ZISRAWATTDIR'
@staticmethod
def file_positions(fh):
"""Return list of file positions of associated Attachment segments."""
entry_count = struct.unpack('<i', fh.read(4))[0]
fh.seek(252, 1)
return tuple(AttachmentEntryA1.read_file_position(fh)
for _ in range(entry_count))
def __init__(self, fh):
entry_count = struct.unpack('<i', fh.read(4))[0]
fh.seek(252, 1)
self.entries = tuple(AttachmentEntryA1(fh) for _ in range(entry_count))
def __len__(self):
return len(self.entries)
def __getitem__(self, key):
return self.entries[key]
def __iter__(self):
return iter(self.entries)
def __str__(self):
return "AttachmentDirectorySegment\n %s" % (
"\n ".join(str(i) for i in self.entries))
class DeletedSegment(object):
"""DELETED segment data. Ignore."""
__slots__ = ()
SID = b'DELETED'
def __init__(self, fh):
pass
def __str__(self):
return "DeletedSegment"
class UnknownSegment(object):
"""Unknown segment data. Ignore."""
__slots__ = ()
def __init__(self, fh):
pass
def __str__(self):
return "UnknownSegment"
class TimeStamps(object):
"""CZTIMS TimeStamps content schema.
Contains sequence of floting point numbers, i.e. seconds relative
to start time of acquisition.
"""
__slots__ = 'time_stamps',
def __init__(self, fh, filesize=None):
size, number = struct.unpack('<ii', fh.read(8))
self.time_stamps = struct.unpack('<%id' % number, fh.read(8 * number))
def __len__(self):
return len(self.time_stamps)
def __getitem__(self, key):
return self.time_stamps[key]
def __iter__(self):
return iter(self.time_stamps)
def __str__(self):
return str(self.time_stamps)
class FocusPositions(object):
"""CZFOC FocusPositions content schema.
Contains sequence of floting point numbers, i.e. micrometers relative
to Z start position of acquisition.
"""
__slots__ = 'positions',
def __init__(self, fh, filesize=None):
size, number = struct.unpack('<ii', fh.read(8))
self.positions = struct.unpack('<%id' % number, fh.read(8 * number))
def __len__(self):
return len(self.positions)
def __getitem__(self, key):
return self.positions[key]
def __iter__(self):
return iter(self.positions)
def __str__(self):
return str(self.positions)
class EventList(object):
"""CZEVL EventList content schema. Sequence of EventListEntry."""
__slots__ = 'events',
def __init__(self, fh, filesize=None):
size, number = struct.unpack('<ii', fh.read(8))
self.events = [EventListEntry(fh) for _ in range(number)]
def __len__(self):
return len(self.events)
def __getitem__(self, key):
return self.events[key]
def __iter__(self):
return iter(self.events)
def __str__(self):
return "\n ".join(str(event) for event in self.events)
class EventListEntry(object):
"""EventListEntry content schema."""
__slots__ = 'time', 'event_type', 'description'
EV_TYPE = {0: 'MARKER', 1: 'TIME_CHANGE', 2: 'BLEACH_START',
3: 'BLEACH_STOP', 4: 'TRIGGER'}
def __init__(self, fh):
(size,
self.time,
self.event_type,
description_size,
) = struct.unpack('<idii', fh.read(20))
description = stripnull(fh.read(description_size))
self.description = unicode(description, 'utf-8')
def __str__(self):
return "%s @ %s (%s)" % (EventListEntry.EV_TYPE[self.event_type],
self.time, self.description)
class LookupTables(object):
"""CZLUT LookupTables content schema. Sequence of LookupTableEntry."""
__slots__ = 'lookup_tables',
def __init__(self, fh, filesize=None):
size, number = struct.unpack('<ii', fh.read(8))
self.lookup_tables = [LookupTableEntry(fh) for _ in range(number)]
def __len__(self):
return len(self.lookup_tables)
def __getitem__(self, key):
return self.lookup_tables[key]
def __iter__(self):
return iter(self.lookup_tables)
def __str__(self):
return "LookupTables\n %s" % str(self.lookup_tables)
class LookupTableEntry(object):
"""LookupTableEntry content schema. Sequence of ComponentEntry."""
__slots__ = 'identifier', 'components'
def __init__(self, fh):
size, identifier, number = struct.unpack('<i80si', fh.read(88))
self.identifier = unicode(stripnull(identifier), 'utf-8')
self.components = [ComponentEntry(fh) for _ in range(number)]
def __len__(self):
return len(self.components)
def __getitem__(self, key):
return self.components[key]
def __iter__(self):
return iter(self.components)
def __str__(self):
return "LookupTableEntry\n %s\n %s" % (
self.identifier, "\n ".join(str(i) for i in self.components))
class ComponentEntry(object):
"""ComponentEntry content schema."""
__slots__ = 'component_type', 'intensity'
CO_TYPE = {-1: 'RGB', 1: 'RED', 2: 'GREEN', 3: 'BLUE'}
def __init__(self, fh):
size, self.component_type, number = struct.unpack('<iii', fh.read(12))
self.intensity = fh.fromfile(dtype='<i2', count=number//2)
if self.component_type == -1:
self.intensity = self.intensity.reshape(-1, 3)
def __str__(self):
return "ComponentEntry %s %s" % (
ComponentEntry.CO_TYPE[self.component_type],
str(self.intensity.shape))
def xml_reader(fh, filesize):
"""Read XML from file and return as xml.ElementTree root Element."""
xml = unicode(stripnull(fh.read(filesize)), 'utf-8')
return etree.fromstring(xml)
def match_filename(filename):
"""Return master file name and file part number from CZI file name."""
match = re.search(r'(.*?)(?:\((\d+)\))?\.czi$',
filename, re.IGNORECASE).groups()
name = match[0] + '.czi'
part = int(match[1]) if len(match) > 1 else 0
return name, part
def decodejxr(data):
"""Decode JXR data stream into ndarray via temporary file."""
fd, filename = tempfile.mkstemp(suffix='.jxr')
with os.fdopen(fd, 'wb') as fh:
fh.write(data)
if isinstance(filename, unicode):
filename = filename.encode('ascii')
try:
out = _czifile.decodejxr(filename)
finally:
os.remove(filename)
return out
def decodejpg(data):
"""Decode JPG data stream into ndarray."""
return _czifile.decodejpg(data, len(data))
# map Segment.sid to data reader
SEGMENT_ID = {
FileHeaderSegment.SID: FileHeaderSegment,
SubBlockDirectorySegment.SID: SubBlockDirectorySegment,
SubBlockSegment.SID: SubBlockSegment,
MetadataSegment.SID: MetadataSegment,
AttachmentSegment.SID: AttachmentSegment,
AttachmentDirectorySegment.SID: AttachmentDirectorySegment,
DeletedSegment.SID: DeletedSegment,
}
# map AttachmentEntryA1.content_file_type to attachment reader.
CONTENT_FILE_TYPE = {
b'CZI': CziFile,
b'ZISRAW': CziFile,
b'CZTIMS': TimeStamps,
b'CZEVL': EventList,
b'CZLUT': LookupTables,
b'CZFOC': FocusPositions,
b'CZEXP': xml_reader, # Experiment
b'CZHWS': xml_reader, # HardwareSetting
b'CZMVM': xml_reader, # MultiviewMicroscopy
# b'CZPML': PalMoleculeList, # undocumented
# b'ZIP'
# b'JPG'
}
# map DirectoryEntryDV.pixeltype to numpy dtypes
PIXEL_TYPE = {
0: '<u1', 'Gray8': '<u1', '<u1': 'Gray8',
1: '<u2', 'Gray16': '<u2', '<u2': 'Gray16',
2: '<f4', 'Gray32Float': '<f4', '<f4': 'Gray32Float',
3: '<3u1', 'Bgr24': '<3u1', '<3u1': 'Bgr24',
4: '<3u2', 'Bgr48': '<3u2', '<3u2': 'Bgr48',
8: '<3f4', 'Bgr96Float': '<3f4', '<3f4': 'Bgr96Float',
9: '<4u1', 'Bgra32': '<4u1', '<4u1': 'Bgra32',
10: '<F8', 'Gray64ComplexFloat': '<F8', '<F8': 'Gray64ComplexFloat',
11: '<3F8', 'Bgr192ComplexFloat': '<3F8', '<3F8': 'Bgr192ComplexFloat',
12: '<i4', 'Gray32': '<i4', '<i4': 'Gray32',
13: '<i8', 'Gray64': '<i8', '<i8': 'Gray64',
}
# map dimension character to description
DIMENSIONS = {
b'0': 'Sample', # e.g. RGBA
b'X': 'Width',
b'Y': 'Height',
b'C': 'Channel',
b'Z': 'Slice', # depth
b'T': 'Time',
b'R': 'Rotation',
b'S': 'Scene',
b'I': 'Illumination', # direction
b'B': 'Block', # acquisition
b'M': 'Mosaic', # tile
b'H': 'Phase',
b'V': 'View',
}
# map DirectoryEntryDV.compression to description
COMPRESSION = {
0: "Uncompressed",
1: "JpgFile",
2: "LZW",
4: "JpegXrFile",
# 100 and up: camera/system specific specific RAW data
}
# map DirectoryEntryDV.compression to decompression function
DECOMPRESS = {
0: lambda x: x, # uncompressed
2: decodelzw, # LZW
}
if _have_czifile:
DECOMPRESS[1] = decodejpg
DECOMPRESS[4] = decodejxr
if sys.version_info[0] > 2:
unicode = str
basestring = str, bytes
if __name__ == "__main__":
import doctest
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
| |
import functools
import os
from django.apps import apps
from django.conf import settings
from django.contrib.staticfiles import utils
from django.core.checks import Error
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import (
FileSystemStorage, Storage, default_storage,
)
from django.utils._os import safe_join
from django.utils.functional import LazyObject, empty
from django.utils.module_loading import import_string
# To keep track on which directories the finder has searched the static files.
searched_locations = []
class BaseFinder:
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def check(self, **kwargs):
raise NotImplementedError(
'subclasses may provide a check() method to verify the finder is '
'configured correctly.'
)
def find(self, path, all=False):
"""
Given a relative file path, find an absolute file path.
If the ``all`` parameter is False (default) return only the first found
file path; if True, return a list of all found files paths.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a find() method')
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, return a two item iterable
consisting of the relative path and storage instance.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a list() method')
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, app_names=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = {}
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super().__init__(*args, **kwargs)
def check(self, **kwargs):
errors = []
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
errors.append(Error(
'The STATICFILES_DIRS setting is not a tuple or list.',
hint='Perhaps you forgot a trailing comma?',
id='staticfiles.E001',
))
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
if prefix.endswith('/'):
errors.append(Error(
'The prefix %r in the STATICFILES_DIRS setting must '
'not end with a slash.' % prefix,
id='staticfiles.E003',
))
if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
errors.append(Error(
'The STATICFILES_DIRS setting should not contain the '
'STATIC_ROOT setting.',
id='staticfiles.E002',
))
return errors
def find(self, path, all=False):
"""
Look for files in the extra locations as defined in STATICFILES_DIRS.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Find a requested static file in a location and return the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute.
"""
storage_class = FileSystemStorage
source_dir = 'static'
def __init__(self, app_names=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app names to storage instances
self.storages = {}
app_configs = apps.get_app_configs()
if app_names:
app_names = set(app_names)
app_configs = [ac for ac in app_configs if ac.name in app_names]
for app_config in app_configs:
app_storage = self.storage_class(
os.path.join(app_config.path, self.source_dir))
if os.path.isdir(app_storage.location):
self.storages[app_config.name] = app_storage
if app_config.name not in self.apps:
self.apps.append(app_config.name)
super().__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in self.storages.values():
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Look for files in the app directories.
"""
matches = []
for app in self.apps:
app_location = self.storages[app].location
if app_location not in searched_locations:
searched_locations.append(app_location)
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app)
if storage:
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have a storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super().__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Look for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.location not in searched_locations:
searched_locations.append(self.storage.location)
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
searched_locations[:] = []
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return [] if all else None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
@functools.lru_cache(maxsize=None)
def get_finder(import_path):
"""
Import the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
| |
# -*- coding: utf-8 -*-
"""This module provides an implementation of Quasi-Newton methods
(BFGS, sBFGS and l-BFGS).
The Taylor expansion up to second order of a function :math:`f(\\theta_t)`
allows a local quadratic approximiation of :math:`f(\\theta_t + d_t)`:
.. math::
f(\\theta_t + d_t) \\approx f(\\theta_t) + d_t^Tf'(\\theta_t) + \\frac{1}{2}d_t^TH_td_t
where the symmetric positive definite matrix :math:`H_t` is the Hessian at :math:`\\theta_t`.
The minimizer :math:`d_t` of this convex quadratic model is:
.. math::
d_t = -H^{-1}f'(\\theta_t).
For large scale problems both computing/storing the Hessian and solving the above linear
system is computationally demanding. Instead of recomputing the Hessian from scratch at every
iteration, quasi-Newton methods utilize successive measurements of the gradient
to build a sufficiently good quadratic model of the objective function. The above formula
is then applied to yield a direction :math:`d_t`. The update done is then of the form
.. math::
\\theta_{t+1} = \\alpha_t d_t + \\theta_t
where :math:`\\alpha_t` is obtained with a line search.
.. note::
The classes presented here are not working with gnumpy.
"""
from __future__ import absolute_import
import warnings
import scipy
import numpy as np
import scipy.linalg
import scipy.optimize
from .base import Minimizer, is_nonzerofinite
from .linesearch import WolfeLineSearch
class Bfgs(Minimizer):
"""BFGS (Broyden-Fletcher-Goldfarb-Shanno) is one of the most well-knwon
quasi-Newton methods. The main idea is to iteratively construct an approximate inverse
Hessian :math:`B^{-1}_t` by a rank-2 update:
.. math::
B^{-1}_{t+1} = B^{-1}_t + (1 + \\frac{y_t^TB^{-1}_ty_t}{y_t^Ts_t})\\frac{s_ts_t^T}{s_t^Ty_t} - \\frac{s_ty_t^TB^{-1}_t + B^{-1}_ty_ts_t^T}{s_t^Ty_t},
where :math:`y_t = f(\\theta_{t+1}) - f(\\theta_{t})` and :math:`s_t = \\theta_{t+1} - \\theta_t`.
The storage requirements for BFGS scale quadratically with the number of
variables. For detailed derivations, see [nocedal2006a]_, chapter 6.
.. [nocedal2006a] Nocedal, J. and Wright, S. (2006),
Numerical Optimization, 2nd edition, Springer.
Attributes
----------
wrt : array_like
Current solution to the problem. Can be given as a first argument to \
``.f`` and ``.fprime``.
f : Callable
The object function.
fprime : Callable
First derivative of the objective function. Returns an array of the \
same shape as ``.wrt``.
initial_inv_hessian : array_like
The initial estimate of the approximiate Hessian.
line_search : LineSearch object.
Line search object to perform line searches with.
args : iterable
Iterator over arguments which ``fprime`` will be called with.
"""
def __init__(self, wrt, f, fprime, initial_inv_hessian=None,
line_search=None, args=None):
"""Create a BFGS object.
Parameters
----------
wrt : array_like
Array that represents the solution. Will be operated upon in
place. ``f`` and ``fprime`` should accept this array as a first argument.
f : callable
The objective function.
fprime : callable
Callable that given a solution vector as first parameter and *args
and **kwargs drawn from the iterations ``args`` returns a
search direction, such as a gradient.
initial_inv_hessian : array_like
The initial estimate of the approximiate Hessian.
line_search : LineSearch object.
Line search object to perform line searches with.
args : iterable
Iterator over arguments which ``fprime`` will be called with.
"""
super(Bfgs, self).__init__(wrt, args=args)
self.f = f
self.fprime = fprime
self.inv_hessian = initial_inv_hessian
if line_search is not None:
self.line_search = line_search
else:
self.line_search = WolfeLineSearch(wrt, self.f, self.fprime)
def set_from_info(self, info):
raise NotImplemented('nobody has found the time to implement this yet')
def extended_info(self, **kwargs):
raise NotImplemented('nobody has found the time to implement this yet')
def find_direction(self, grad_m1, grad, step, inv_hessian):
H = self.inv_hessian
grad_diff = grad - grad_m1
ys = np.inner(grad_diff, step)
Hy = np.dot(H, grad_diff)
yHy = np.inner(grad_diff, Hy)
H += (ys + yHy) * np.outer(step, step) / ys ** 2
H -= (np.outer(Hy, step) + np.outer(step, Hy)) / ys
direction = -np.dot(H, grad)
return direction, {'gradient_diff': grad_diff}
def __iter__(self):
args, kwargs = next(self.args)
grad = self.fprime(self.wrt, *args, **kwargs)
grad_m1 = scipy.zeros(grad.shape)
if self.inv_hessian is None:
self.inv_hessian = scipy.eye(grad.shape[0])
for i, (next_args, next_kwargs) in enumerate(self.args):
if i == 0:
direction, info = -grad, {}
else:
direction, info = self.find_direction(
grad_m1, grad, step, self.inv_hessian)
if not is_nonzerofinite(direction):
# TODO: inform the user here.
break
step_length = self.line_search.search(
direction, None, args, kwargs)
if step_length != 0:
step = step_length * direction
self.wrt += step
else:
self.logfunc(
{'message': 'step length is 0--need to bail out.'})
break
# Prepare everything for the next loop.
args, kwargs = next_args, next_kwargs
# TODO: not all line searches have .grad!
grad_m1[:], grad[:] = grad, self.line_search.grad
info.update({
'step_length': step_length,
'n_iter': i,
'args': args,
'kwargs': kwargs,
})
yield info
class Sbfgs(Bfgs):
# TODO document
def __init__(self, wrt, f, fprime, initial_inv_hessian=None,
line_search=None, args=None):
# TODO document
super(Sbfgs, self).__init__(
wrt, f, fprime, line_search, args=args)
def set_from_info(self, info):
raise NotImplemented('nobody has found the time to implement this yet')
def extended_info(self, **kwargs):
raise NotImplemented('nobody has found the time to implement this yet')
def find_direction(self, grad_m1, grad, step, inv_hessian):
# TODO document
H = inv_hessian
grad_diff = grad - grad_m1
ys = np.inner(grad_diff, step)
Hy = np.dot(H, grad_diff)
yHy = np.inner(grad_diff, Hy)
gamma = ys / yHy
v = scipy.sqrt(yHy) * (step / ys - Hy / yHy)
v = scipy.real(v)
H[:] = gamma * (H - np.outer(Hy, Hy) / yHy + np.outer(v, v))
H += np.outer(step, step) / ys
direction = -np.dot(H, grad)
return direction, {}
class Lbfgs(Minimizer):
"""l-BFGS (limited-memory BFGS) is a limited memory variation of the well-known
BFGS algorithm. The storage requirement for BFGS scale quadratically with the number of variables,
and thus it tends to be used only for smaller problems. Limited-memory BFGS reduces the
storage by only using the :math:`l` latest updates (factors) in computing the approximate Hessian inverse
and representing this approximation only implicitly. More specifically, it stores the last
:math:`l` BFGS update vectors :math:`y_t` and :math:`s_t` and uses these to implicitly perform
the matrix operations of BFGS (see [nocedal2006a]_).
.. note::
In order to handle simple box constraints, consider ``scipy.optimize.fmin_l_bfgs_b``.
Attributes
----------
wrt : array_like
Current solution to the problem. Can be given as a first argument to \
``.f`` and ``.fprime``.
f : Callable
The object function.
fprime : Callable
First derivative of the objective function. Returns an array of the \
same shape as ``.wrt``.
initial_hessian_diag : array_like
The initial estimate of the diagonal of the Hessian.
n_factors : int
The number of factors that should be used to implicitly represent the inverse Hessian.
line_search : LineSearch object.
Line search object to perform line searches with.
args : iterable
Iterator over arguments which ``fprime`` will be called with.
"""
def __init__(self, wrt, f, fprime, initial_hessian_diag=1,
n_factors=10, line_search=None,
args=None):
"""
Create an Lbfgs object.
Attributes
----------
wrt : array_like
Current solution to the problem. Can be given as a first argument to \
``.f`` and ``.fprime``.
f : Callable
The object function.
fprime : Callable
First derivative of the objective function. Returns an array of the \
same shape as ``.wrt``.
initial_hessian_diag : array_like
The initial estimate of the diagonal of the Hessian.
n_factors : int
The number of factors that should be used to implicitly represent the inverse Hessian.
line_search : LineSearch object.
Line search object to perform line searches with.
args : iterable
Iterator over arguments which ``fprime`` will be called with.
"""
super(Lbfgs, self).__init__(wrt, args=args)
self.f = f
self.fprime = fprime
self.initial_hessian_diag = initial_hessian_diag
self.n_factors = n_factors
if line_search is not None:
self.line_search = line_search
else:
self.line_search = WolfeLineSearch(wrt, self.f, self.fprime)
def set_from_info(self, info):
raise NotImplemented('nobody has found the time to implement this yet')
def extended_info(self, **kwargs):
raise NotImplemented('nobody has found the time to implement this yet')
def find_direction(self, grad_diffs, steps, grad, hessian_diag, idxs):
grad = grad.copy() # We will change this.
n_current_factors = len(idxs)
# TODO: find a good name for this variable.
rho = scipy.empty(n_current_factors)
# TODO: vectorize this function
for i in idxs:
rho[i] = 1 / scipy.inner(grad_diffs[i], steps[i])
# TODO: find a good name for this variable as well.
alpha = scipy.empty(n_current_factors)
for i in idxs[::-1]:
alpha[i] = rho[i] * scipy.inner(steps[i], grad)
grad -= alpha[i] * grad_diffs[i]
z = hessian_diag * grad
# TODO: find a good name for this variable (surprise!)
beta = scipy.empty(n_current_factors)
for i in idxs:
beta[i] = rho[i] * scipy.inner(grad_diffs[i], z)
z += steps[i] * (alpha[i] - beta[i])
return z, {}
def __iter__(self):
args, kwargs = next(self.args)
grad = self.fprime(self.wrt, *args, **kwargs)
grad_m1 = scipy.zeros(grad.shape)
factor_shape = self.n_factors, self.wrt.shape[0]
grad_diffs = scipy.zeros(factor_shape)
steps = scipy.zeros(factor_shape)
hessian_diag = self.initial_hessian_diag
step_length = None
step = scipy.empty(grad.shape)
grad_diff = scipy.empty(grad.shape)
# We need to keep track in which order the different statistics
# from different runs are saved.
#
# Why?
#
# Each iteration, we save statistics such as the difference between
# gradients and the actual steps taken. These are then later combined
# into an approximation of the Hessian. We call them factors. Since we
# don't want to create a new matrix of factors each iteration, we
# instead keep track externally, which row of the matrix corresponds
# to which iteration. `idxs` now is a list which maps its i'th element
# to the corresponding index for the array. Thus, idx[i] contains the
# rowindex of the for the (n_factors - i)'th iteration prior to the
# current one.
idxs = []
for i, (next_args, next_kwargs) in enumerate(self.args):
if i == 0:
direction = -grad
info = {}
else:
sTgd = scipy.inner(step, grad_diff)
if sTgd > 1E-10:
# Don't do an update if this value is too small.
# Determine index for the current update.
if not idxs:
# First iteration.
this_idx = 0
elif len(idxs) < self.n_factors:
# We are not "full" yet. Thus, append the next idxs.
this_idx = idxs[-1] + 1
else:
# we are full and discard the first index.
this_idx = idxs.pop(0)
idxs.append(this_idx)
grad_diffs[this_idx] = grad_diff
steps[this_idx] = step
hessian_diag = sTgd / scipy.inner(grad_diff, grad_diff)
direction, info = self.find_direction(
grad_diffs, steps, -grad, hessian_diag, idxs)
if not is_nonzerofinite(direction):
warnings.warn('search direction is either 0, nan or inf')
break
step_length = self.line_search.search(
direction, None, args, kwargs)
step[:] = step_length * direction
if step_length != 0:
self.wrt += step
else:
warnings.warn('step length is 0')
pass
# Prepare everything for the next loop.
args, kwargs = next_args, next_kwargs
# TODO: not all line searches have .grad!
grad_m1[:], grad[:] = grad, self.line_search.grad
grad_diff = grad - grad_m1
info.update({
'step_length': step_length,
'n_iter': i,
'args': args,
'kwargs': kwargs,
'loss': self.line_search.val,
'gradient': grad,
'gradient_m1': grad_m1,
})
yield info
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import OrderedDict
from contextlib import contextmanager
from textwrap import dedent
from mock import Mock
from pex.package import Package
from twitter.common.collections import OrderedSet
from twitter.common.dirutil.chroot import Chroot
from pants.backend.python.subsystems.python_setup import PythonSetup
from pants.backend.python.tasks.setup_py import SetupPy
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.prep_command import PrepCommand
from pants.build_graph.resources import Resources
from pants.build_graph.target import Target
from pants.fs.archive import TGZ
from pants.util.contextutil import environment_as, temporary_dir, temporary_file
from pants.util.dirutil import safe_mkdir
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
from pants_test.subsystem.subsystem_util import init_subsystem
class SetupPyTestBase(PythonTaskTestBase):
@classmethod
def task_type(cls):
return SetupPy
def setUp(self):
super(SetupPyTestBase, self).setUp()
self.distdir = os.path.join(self.build_root, 'dist')
self.set_options(pants_distdir=self.distdir)
init_subsystem(Target.Arguments)
@contextmanager
def run_execute(self, target, recursive=False):
self.set_options(recursive=recursive)
context = self.context(target_roots=[target])
setup_py = self.create_task(context)
setup_py.execute()
yield context.products.get_data(SetupPy.PYTHON_DISTS_PRODUCT)
class TestSetupPyInterpreter(SetupPyTestBase):
class PythonPathInspectableSetupPy(SetupPy):
def _setup_boilerplate(self):
return dedent("""
# DO NOT EDIT THIS FILE -- AUTOGENERATED BY PANTS
# Target: {setup_target}
from setuptools import setup
from foo.commands.print_sys_path import PrintSysPath
setup(
cmdclass={{'print_sys_path': PrintSysPath}},
**{setup_dict}
)
""")
@classmethod
def task_type(cls):
return cls.PythonPathInspectableSetupPy
def test_setuptools_version(self):
self.create_file('src/python/foo/__init__.py')
self.create_python_library(
relpath='src/python/foo/commands',
name='commands',
source_contents_map={
'print_sys_path.py': dedent("""
import os
import sys
from setuptools import Command
class PrintSysPath(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
with open(os.path.join(os.path.dirname(__file__), 'sys_path.txt'), 'w') as fp:
fp.write(os.linesep.join(sys.path))
""")
},
)
foo = self.create_python_library(
relpath='src/python/foo',
name='foo',
dependencies=[
'src/python/foo/commands',
],
provides=dedent("""
setup_py(
name='foo',
version='0.0.0',
)
""")
)
self.set_options(run='print_sys_path')
# Make sure setup.py can see our custom distutils Command 'print_sys_path'.
sdist_srcdir = os.path.join(self.distdir, 'foo-0.0.0', 'src')
with environment_as(PYTHONPATH=sdist_srcdir):
with self.run_execute(foo):
with open(os.path.join(sdist_srcdir, 'foo', 'commands', 'sys_path.txt')) as fp:
def assert_extra(name, expected_version):
package = Package.from_href(fp.readline().strip())
self.assertEqual(name, package.name)
self.assertEqual(expected_version, package.raw_version)
# The 1st two elements of the sys.path should be our custom SetupPyRunner Installer's
# setuptools and wheel mixins, which should match the setuptools and wheel versions
# specified by the PythonSetup subsystem.
init_subsystem(PythonSetup)
python_setup = PythonSetup.global_instance()
assert_extra('setuptools', python_setup.setuptools_version)
assert_extra('wheel', python_setup.wheel_version)
class TestSetupPy(SetupPyTestBase):
def setUp(self):
super(TestSetupPy, self).setUp()
self.dependency_calculator = SetupPy.DependencyCalculator(self.build_graph)
@classmethod
def alias_groups(cls):
extra_aliases = BuildFileAliases(targets={'prep_command': PrepCommand,
'resources': Resources,
'target': Target})
return super(TestSetupPy, cls).alias_groups().merge(extra_aliases)
def create_dependencies(self, depmap):
target_map = {}
for name, deps in depmap.items():
target_map[name] = self.create_python_library(
relpath=name,
name=name,
provides='setup_py(name="{name}", version="0.0.0")'.format(name=name)
)
for name, deps in depmap.items():
target = target_map[name]
dep_targets = [target_map[name] for name in deps]
for dep in dep_targets:
self.build_graph.inject_dependency(target.address, dep.address)
return target_map
def assert_requirements(self, target, expected):
reduced_dependencies = self.dependency_calculator.reduced_dependencies(target)
self.assertEqual(SetupPy.install_requires(reduced_dependencies), expected)
def test_reduced_dependencies_1(self):
# foo -> bar -> baz
dep_map = OrderedDict(foo=['bar'], bar=['baz'], baz=[])
target_map = self.create_dependencies(dep_map)
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['foo']),
OrderedSet([target_map['bar']]))
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['bar']),
OrderedSet([target_map['baz']]))
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['baz']),
OrderedSet())
self.assert_requirements(target_map['foo'], {'bar==0.0.0'})
self.assert_requirements(target_map['bar'], {'baz==0.0.0'})
self.assert_requirements(target_map['baz'], set())
def test_execution_reduced_dependencies_1(self):
dep_map = OrderedDict(foo=['bar'], bar=['baz'], baz=[])
target_map = self.create_dependencies(dep_map)
with self.run_execute(target_map['foo'], recursive=False) as created:
self.assertEqual([target_map['foo']], created.keys())
with self.run_execute(target_map['foo'], recursive=True) as created:
self.assertEqual({target_map['baz'], target_map['bar'], target_map['foo']},
set(created.keys()))
def test_reduced_dependencies_2(self):
# foo --> baz
# | ^
# v |
# bar ----'
dep_map = OrderedDict(foo=['bar', 'baz'], bar=['baz'], baz=[])
target_map = self.create_dependencies(dep_map)
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['foo']),
OrderedSet([target_map['bar'], target_map['baz']]))
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['bar']),
OrderedSet([target_map['baz']]))
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['baz']),
OrderedSet())
def test_reduced_dependencies_diamond(self):
# bar <-- foo --> baz
# | |
# `----> bak <----'
dep_map = OrderedDict(foo=['bar', 'baz'], bar=['bak'], baz=['bak'], bak=[])
target_map = self.create_dependencies(dep_map)
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['foo']),
OrderedSet([target_map['bar'], target_map['baz']]))
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['bar']),
OrderedSet([target_map['bak']]))
self.assertEqual(self.dependency_calculator.reduced_dependencies(target_map['baz']),
OrderedSet([target_map['bak']]))
self.assert_requirements(target_map['foo'], {'bar==0.0.0', 'baz==0.0.0'})
self.assert_requirements(target_map['bar'], {'bak==0.0.0'})
self.assert_requirements(target_map['baz'], {'bak==0.0.0'})
def test_binary_target_injected_into_reduced_dependencies(self):
foo_bin_dep = self.create_python_library(relpath='foo/dep', name='dep')
foo_bin = self.create_python_binary(
relpath='foo/bin',
name='bin',
entry_point='foo.bin:foo',
dependencies=[
'foo/dep',
]
)
foo = self.create_python_library(
relpath='foo',
name='foo',
provides=dedent("""
setup_py(
name='foo',
version='0.0.0'
).with_binaries(
foo_binary='foo/bin'
)
""")
)
self.assertEqual(self.dependency_calculator.reduced_dependencies(foo),
OrderedSet([foo_bin, foo_bin_dep]))
entry_points = dict(SetupPy.iter_entry_points(foo))
self.assertEqual(entry_points, {'foo_binary': 'foo.bin:foo'})
with self.run_execute(foo, recursive=False) as created:
self.assertEqual([foo], created.keys())
with self.run_execute(foo, recursive=True) as created:
self.assertEqual([foo], created.keys())
def test_binary_target_injected_into_reduced_dependencies_with_provider(self):
bar_bin_dep = self.create_python_library(
relpath='bar/dep',
name='dep',
provides=dedent("""
setup_py(
name='bar_bin_dep',
version='0.0.0'
)
""")
)
bar_bin = self.create_python_binary(
relpath='bar/bin',
name='bin',
entry_point='bar.bin:bar',
dependencies=[
'bar/dep'
],
)
bar = self.create_python_library(
relpath='bar',
name='bar',
provides=dedent("""
setup_py(
name='bar',
version='0.0.0'
).with_binaries(
bar_binary='bar/bin'
)
""")
)
self.assertEqual(self.dependency_calculator.reduced_dependencies(bar),
OrderedSet([bar_bin, bar_bin_dep]))
self.assert_requirements(bar, {'bar_bin_dep==0.0.0'})
entry_points = dict(SetupPy.iter_entry_points(bar))
self.assertEqual(entry_points, {'bar_binary': 'bar.bin:bar'})
with self.run_execute(bar, recursive=False) as created:
self.assertEqual([bar], created.keys())
with self.run_execute(bar, recursive=True) as created:
self.assertEqual({bar_bin_dep, bar}, set(created.keys()))
def test_pants_contrib_case(self):
def create_requirement_lib(name):
return self.create_python_requirement_library(
relpath=name,
name=name,
requirements=[
'{}==1.1.1'.format(name)
]
)
req1 = create_requirement_lib('req1')
create_requirement_lib('req2')
req3 = create_requirement_lib('req3')
self.create_python_library(
relpath='src/python/pants/base',
name='base',
dependencies=[
'req1',
'req2',
]
)
self.create_python_binary(
relpath='src/python/pants/bin',
name='bin',
entry_point='pants.bin.pants_loader:main',
dependencies=[
# Should be stripped in reduced_dependencies since pants_packaged provides these sources.
'src/python/pants/base',
]
)
pants_packaged = self.create_python_library(
relpath='src/python/pants',
name='pants_packaged',
provides=dedent("""
setup_py(
name='pants_packaged',
version='0.0.0'
).with_binaries(
# Should be stripped in reduced_dependencies since pants_packaged provides this.
pants_bin='src/python/pants/bin'
)
""")
)
contrib_lib = self.create_python_library(
relpath='contrib/lib/src/python/pants/contrib/lib',
name='lib',
dependencies=[
'req3',
# Should be stripped in reduced_dependencies since pants_packaged provides these sources.
'src/python/pants/base',
]
)
contrib_plugin = self.create_python_library(
relpath='contrib/lib/src/python/pants/contrib',
name='plugin',
provides=dedent("""
setup_py(
name='contrib',
version='0.0.0'
)
"""),
dependencies=[
'contrib/lib/src/python/pants/contrib/lib',
'src/python/pants:pants_packaged',
'req1'
]
)
reduced_dependencies = self.dependency_calculator.reduced_dependencies(contrib_plugin)
self.assertEqual(reduced_dependencies, OrderedSet([contrib_lib, req3, pants_packaged, req1]))
def test_no_exported(self):
foo = self.create_python_library(relpath='foo', name='foo')
with self.assertRaises(TaskError):
with self.run_execute(foo):
self.fail('Should not have gotten past run_execute.')
def test_no_owner(self):
self.create_python_library(relpath='foo', name='foo')
exported = self.create_python_library(
relpath='bar',
name='bar',
dependencies=[
'foo'
],
provides=dedent("""
setup_py(
name='bar',
version='0.0.0'
)
"""),
)
# `foo` is not in `bar`'s address space and has no owner in its own address space.
with self.assertRaises(self.dependency_calculator.NoOwnerError):
self.dependency_calculator.reduced_dependencies(exported)
def test_ambiguous_owner(self):
self.create_python_library(relpath='foo/bar', name='bar')
self.add_to_build_file('foo', dedent("""
python_library(
name='foo1',
dependencies=[
'foo/bar'
],
provides=setup_py(
name='foo1',
version='0.0.0'
)
)
python_library(
name='foo2',
dependencies=[
'foo/bar'
],
provides=setup_py(
name='foo2',
version='0.0.0'
)
)
"""))
with self.assertRaises(self.dependency_calculator.AmbiguousOwnerError):
self.dependency_calculator.reduced_dependencies(self.target('foo:foo1'))
with self.assertRaises(self.dependency_calculator.AmbiguousOwnerError):
self.dependency_calculator.reduced_dependencies(self.target('foo:foo2'))
@contextmanager
def extracted_sdist(self, sdist, expected_prefix, collect_suffixes=None):
collect_suffixes = collect_suffixes or ('.py',)
def collect(path):
for suffix in collect_suffixes:
if path.endswith(suffix):
return True
return False
with temporary_dir() as chroot:
TGZ.extract(sdist, chroot)
all_py_files = set()
for root, _, files in os.walk(chroot):
all_py_files.update(os.path.join(root, f) for f in files if collect(f))
def as_full_path(p):
return os.path.join(chroot, expected_prefix, p)
yield all_py_files, as_full_path
def test_resources(self):
self.create_file(relpath='src/python/monster/j-function.res', contents='196884')
self.create_file(relpath='src/python/monster/__init__.py', contents='')
self.create_file(relpath='src/python/monster/research_programme.py',
contents='# Look for more off-by-one "errors"!')
# NB: We have to resort to BUILD files on disk here due to the target ownership algorithm in
# SetupPy needing to walk ancestors in this case which currently requires BUILD files on disk.
self.add_to_build_file('src/python/monster', dedent("""
python_library(
name='conway',
sources=['__init__.py', 'research_programme.py'],
dependencies=[
':j-function',
],
provides=setup_py(
name='monstrous.moonshine',
version='0.0.0',
)
)
resources(
name='j-function',
sources=['j-function.res']
)
"""))
conway = self.target('src/python/monster:conway')
with self.run_execute(conway) as created:
self.assertEqual([conway], created.keys())
sdist = created[conway]
with self.extracted_sdist(sdist=sdist,
expected_prefix='monstrous.moonshine-0.0.0',
collect_suffixes=('.py', '.res')) as (py_files, path):
self.assertEqual({path('setup.py'),
path('src/monster/__init__.py'),
path('src/monster/research_programme.py'),
path('src/monster/j-function.res')},
py_files)
with open(path('src/monster/j-function.res')) as fp:
self.assertEqual('196884', fp.read())
def test_symlinks_issues_2815(self):
res = self.create_file(relpath='src/python/monster/j-function.res', contents='196884')
self.create_link(res, 'src/python/monster/group.res')
self.create_file(relpath='src/python/monster/__init__.py', contents='')
self.create_file(relpath='src/python/monster/research_programme.py',
contents='# Look for more off-by-one "errors"!')
# NB: We have to resort to BUILD files on disk here due to the target ownership algorithm in
# SetupPy needing to walk ancestors in this case which currently requires BUILD files on disk.
self.add_to_build_file('src/python/monster', dedent("""
python_library(
name='conway',
sources=['__init__.py', 'research_programme.py'],
dependencies=[
':group_res',
],
provides=setup_py(
name='monstrous.moonshine',
version='0.0.0',
)
)
resources(
name='group_res',
sources=['group.res']
)
"""))
conway = self.target('src/python/monster:conway')
with self.run_execute(conway) as created:
self.assertEqual([conway], created.keys())
# Now that we've created the sdist tarball, delete the symlink destination to ensure the
# unpacked sdist can't get away with unpacking a symlink that happens to have local
# resolution.
os.unlink(res)
sdist = created[conway]
with self.extracted_sdist(sdist=sdist,
expected_prefix='monstrous.moonshine-0.0.0',
collect_suffixes=('.py', '.res')) as (py_files, path):
res_link_path = path('src/monster/group.res')
self.assertFalse(os.path.islink(res_link_path))
self.assertEqual({path('setup.py'),
path('src/monster/__init__.py'),
path('src/monster/research_programme.py'),
res_link_path},
py_files)
with open(res_link_path) as fp:
self.assertEqual('196884', fp.read())
def test_prep_command_case(self):
PrepCommand.add_allowed_goal('compile')
PrepCommand.add_allowed_goal('test')
self.add_to_build_file('build-support/thrift',
dedent("""
prep_command(
name='prepare_binary_compile',
goals=['compile'],
prep_executable='/bin/true',
)
prep_command(
name='prepare_binary_test',
goals=['test'],
prep_executable='/bin/true',
)
target(
name='prepare_binary',
dependencies=[
':prepare_binary_compile',
':prepare_binary_test',
],
)
"""))
prepare_binary_compile = self.make_target(spec='build-support/thrift:prepare_binary_compile',
target_type=PrepCommand,
prep_executable='/bin/true',
goals=['compile'])
prepare_binary_test = self.make_target(spec='build-support/thrift:prepare_binary_test',
target_type=PrepCommand,
prep_executable='/bin/true',
goals=['test'])
self.make_target(spec='build-support/thrift:prepare_binary',
dependencies=[
prepare_binary_compile,
prepare_binary_test
])
pants = self.create_python_library(
relpath='src/python/pants',
name='pants',
provides="setup_py(name='pants', version='0.0.0')",
dependencies=[
'build-support/thrift:prepare_binary'
]
)
with self.run_execute(pants) as created:
self.assertEqual([pants], created.keys())
def test_detect_namespace_packages():
def has_ns(stmt):
with temporary_file() as fp:
fp.write(stmt)
fp.flush()
return SetupPy.declares_namespace_package(fp.name)
assert not has_ns('')
assert not has_ns('add(1, 2); foo(__name__); self.shoot(__name__)')
assert not has_ns('declare_namespace(bonk)')
assert has_ns('__import__("pkg_resources").declare_namespace(__name__)')
assert has_ns('import pkg_resources; pkg_resources.declare_namespace(__name__)')
assert has_ns('from pkg_resources import declare_namespace; declare_namespace(__name__)')
@contextmanager
def yield_chroot(packages, namespace_packages, resources):
def to_path(package):
return package.replace('.', os.path.sep)
with temporary_dir() as td:
def write(package, name, content):
package_path = os.path.join(td, SetupPy.SOURCE_ROOT, to_path(package))
safe_mkdir(os.path.dirname(os.path.join(package_path, name)))
with open(os.path.join(package_path, name), 'w') as fp:
fp.write(content)
for package in packages:
write(package, '__init__.py', '')
for package in namespace_packages:
write(package, '__init__.py', '__import__("pkg_resources").declare_namespace(__name__)')
for package, resource_list in resources.items():
for resource in resource_list:
write(package, resource, 'asdfasdf')
chroot_mock = Mock(spec=Chroot)
chroot_mock.path.return_value = td
yield chroot_mock
def test_find_packages():
def assert_single_chroot(packages, namespace_packages, resources):
with yield_chroot(packages, namespace_packages, resources) as chroot:
p, n_p, r = SetupPy.find_packages(chroot)
assert p == set(packages + namespace_packages)
assert n_p == set(namespace_packages)
assert r == dict((k, set(v)) for (k, v) in resources.items())
# assert both packages and namespace packages work
assert_single_chroot(['foo'], [], {})
assert_single_chroot(['foo'], ['foo'], {})
# assert resources work
assert_single_chroot(['foo'], [], {'foo': ['blork.dat']})
resources = {
'foo': [
'f0',
os.path.join('bar', 'baz', 'f1'),
os.path.join('bar', 'baz', 'f2'),
]
}
assert_single_chroot(['foo'], [], resources)
# assert that nearest-submodule is honored
with yield_chroot(['foo', 'foo.bar'], [], resources) as chroot:
_, _, r = SetupPy.find_packages(chroot)
assert r == {
'foo': {'f0'},
'foo.bar': {os.path.join('baz', 'f1'), os.path.join('baz', 'f2')}
}
# assert that nearest submodule splits on module prefixes
with yield_chroot(
['foo', 'foo.bar'],
[],
{'foo.bar1': ['f0']}) as chroot:
_, _, r = SetupPy.find_packages(chroot)
assert r == {'foo': {'bar1/f0'}}
def test_nearest_subpackage():
# degenerate
assert SetupPy.nearest_subpackage('foo', []) == 'foo'
assert SetupPy.nearest_subpackage('foo', ['foo']) == 'foo'
assert SetupPy.nearest_subpackage('foo', ['bar']) == 'foo'
# common prefix
assert 'foo' == SetupPy.nearest_subpackage('foo.bar', ['foo'])
assert 'foo.bar' == SetupPy.nearest_subpackage(
'foo.bar', ['foo', 'foo.bar'])
assert 'foo.bar' == SetupPy.nearest_subpackage(
'foo.bar.topo', ['foo', 'foo.bar'])
assert 'foo' == SetupPy.nearest_subpackage(
'foo.barization', ['foo', 'foo.bar'])
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Test tools package alone which don't fit into other tests."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import collections
import decimal
import inspect
import os.path
import subprocess
import tempfile
import warnings
from pywikibot import tools
from tests import join_xml_data_path
from tests.aspects import (
unittest, require_modules, DeprecationTestCase, TestCase, MetaTestCaseClass
)
from tests.utils import expected_failure_if, add_metaclass
class ContextManagerWrapperTestCase(TestCase):
"""Test that ContextManagerWrapper is working correctly."""
class DummyClass(object):
"""A dummy class which has some values and a close method."""
class_var = 42
def __init__(self):
"""Create instance with dummy values."""
self.instance_var = 1337
self.closed = False
def close(self):
"""Just store that it has been closed."""
self.closed = True
net = False
def test_wrapper(self):
"""Create a test instance and verify the wrapper redirects."""
obj = self.DummyClass()
wrapped = tools.ContextManagerWrapper(obj)
self.assertIs(wrapped.class_var, obj.class_var)
self.assertIs(wrapped.instance_var, obj.instance_var)
self.assertIs(wrapped._wrapped, obj)
self.assertFalse(obj.closed)
with wrapped as unwrapped:
self.assertFalse(obj.closed)
self.assertIs(unwrapped, obj)
unwrapped.class_var = 47
self.assertTrue(obj.closed)
self.assertEqual(wrapped.class_var, 47)
def test_exec_wrapper(self):
"""Check that the wrapper permits exceptions."""
wrapper = tools.ContextManagerWrapper(self.DummyClass())
self.assertFalse(wrapper.closed)
with self.assertRaises(ZeroDivisionError):
with wrapper:
1 / 0
self.assertTrue(wrapper.closed)
class OpenArchiveTestCase(TestCase):
"""
Unit test class for tools.
The tests for open_archive requires that article-pyrus.xml* contain all
the same content after extraction. The content itself is not important.
The file article-pyrus.xml_invalid.7z is not a valid 7z file and
open_archive will fail extracting it using 7za.
"""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _get_content(self, *args, **kwargs):
"""Use open_archive and return content using a with-statement."""
with tools.open_archive(*args, **kwargs) as f:
return f.read()
def test_open_archive_normal(self):
"""Test open_archive with no compression in the standard library."""
self.assertEqual(self._get_content(self.base_file), self.original_content)
def test_open_archive_bz2(self):
"""Test open_archive with bz2 compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.bz2'), self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2', use_extension=False),
self.original_content)
@require_modules('bz2file')
def test_open_archive_with_bz2file(self):
"""Test open_archive when bz2file library."""
old_bz2 = tools.bz2
try:
tools.bz2 = __import__('bz2file')
self.assertEqual(self._get_content(self.base_file + '.bz2'),
self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2',
use_extension=False),
self.original_content)
finally:
tools.bz2 = old_bz2
def test_open_archive_without_bz2(self):
"""Test open_archive when bz2 and bz2file are not available."""
old_bz2 = tools.bz2
try:
tools.bz2 = ImportError()
self.assertRaises(ImportError, self._get_content, self.base_file + '.bz2')
finally:
tools.bz2 = old_bz2
def test_open_archive_gz(self):
"""Test open_archive with gz compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.gz'), self.original_content)
def test_open_archive_7z(self):
"""Test open_archive with 7za if installed."""
try:
subprocess.Popen(['7za'], stdout=subprocess.PIPE).stdout.close()
except OSError:
raise unittest.SkipTest('7za not installed')
self.assertEqual(self._get_content(self.base_file + '.7z'), self.original_content)
self.assertRaises(OSError, self._get_content, self.base_file + '_invalid.7z',
use_extension=True)
class OpenCompressedTestCase(OpenArchiveTestCase, DeprecationTestCase):
"""Test opening files with the deprecated open_compressed."""
net = False
def _get_content(self, *args, **kwargs):
"""Use open_compressed and return content using a with-statement."""
# open_archive default is True, so if it's False it's not the default
# so use the non-default of open_compressed (which is True)
if kwargs.get('use_extension') is False:
kwargs['use_extension'] = True
with tools.open_compressed(*args, **kwargs) as f:
content = f.read()
self.assertOneDeprecation(self.INSTEAD)
return content
class OpenArchiveWriteTestCase(TestCase):
"""Test writing with open_archive."""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveWriteTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _write_content(self, suffix):
try:
fh, fn = tempfile.mkstemp(suffix)
with tools.open_archive(fn, 'wb') as f:
f.write(self.original_content)
with tools.open_archive(fn, 'rb') as f:
self.assertEqual(f.read(), self.original_content)
with open(fn, 'rb') as f:
return f.read()
finally:
os.close(fh)
os.remove(fn)
def test_invalid_modes(self):
"""Test various invalid mode configurations."""
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'ra') # two modes besides
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'rt') # text mode
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'br') # binary at front
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'wb', False) # writing without extension
def test_binary_mode(self):
"""Test that it uses binary mode."""
with tools.open_archive(self.base_file, 'r') as f:
self.assertEqual(f.mode, 'rb')
self.assertIsInstance(f.read(), bytes)
def test_write_archive_bz2(self):
"""Test writing a bz2 archive."""
content = self._write_content('.bz2')
with open(self.base_file + '.bz2', 'rb') as f:
self.assertEqual(content, f.read())
def test_write_archive_gz(self):
"""Test writing a gz archive."""
content = self._write_content('.gz')
self.assertEqual(content[:3], b'\x1F\x8B\x08')
def test_write_archive_7z(self):
"""Test writing an archive as a 7z archive."""
self.assertRaises(NotImplementedError, tools.open_archive,
'/dev/null.7z', mode='wb')
class MergeUniqueDicts(TestCase):
"""Test merge_unique_dicts."""
net = False
dct1 = {'foo': 'bar', '42': 'answer'}
dct2 = {47: 'Star', 74: 'Trek'}
dct_both = dct1.copy()
dct_both.update(dct2)
def test_single(self):
"""Test that it returns the dict itself when there is only one."""
self.assertEqual(tools.merge_unique_dicts(self.dct1), self.dct1)
self.assertEqual(tools.merge_unique_dicts(**self.dct1), self.dct1)
def test_multiple(self):
"""Test that it actually merges dicts."""
self.assertEqual(tools.merge_unique_dicts(self.dct1, self.dct2),
self.dct_both)
self.assertEqual(tools.merge_unique_dicts(self.dct2, **self.dct1),
self.dct_both)
def test_different_type(self):
"""Test that the keys can be different types."""
self.assertEqual(tools.merge_unique_dicts({'1': 'str'}, {1: 'int'}),
{'1': 'str', 1: 'int'})
def test_conflict(self):
"""Test that it detects conflicts."""
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **{'42': 'bad'})
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, self.dct1)
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **self.dct1)
def passthrough(x):
"""Return x."""
return x
class SkipList(set):
"""Container that ignores items."""
skip_list = [1, 3]
def __contains__(self, item):
"""Override to not process some items."""
if item in self.skip_list:
return True
else:
return super(SkipList, self).__contains__(item)
class ProcessAgainList(set):
"""Container that keeps processing certain items."""
process_again_list = [1, 3]
def add(self, item):
"""Override to not add some items."""
if item in self.process_again_list:
return
else:
return super(ProcessAgainList, self).add(item)
class ContainsStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def __contains__(self, item):
"""Override to stop on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
return super(ContainsStopList, self).__contains__(item)
class AddStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def add(self, item):
"""Override to not continue on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
super(AddStopList, self).add(item)
class TestFilterUnique(TestCase):
"""Test filter_unique."""
net = False
ints = [1, 3, 2, 1, 2, 1, 2, 4, 2]
strs = [str(i) for i in ints]
decs = [decimal.Decimal(i) for i in ints]
def _test_dedup_int(self, deduped, deduper, key=None):
"""Test filter_unique results for int."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), 1)
self.assertEqual(next(deduper), 3)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 3])
else:
self.assertEqual(deduped, set([1, 3]))
self.assertEqual(next(deduper), 2)
self.assertEqual(next(deduper), 4)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3, 2, 4])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 2, 3, 4])
else:
self.assertEqual(deduped, set([1, 2, 3, 4]))
self.assertRaises(StopIteration, next, deduper)
def _test_dedup_str(self, deduped, deduper, key=None):
"""Test filter_unique results for str."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), '1')
self.assertEqual(next(deduper), '3')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key('1'), key('3')])
else:
self.assertEqual(deduped, set([key('1'), key('3')]))
self.assertEqual(next(deduper), '2')
self.assertEqual(next(deduper), '4')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key(i) for i in self.strs])
else:
self.assertEqual(deduped, set(key(i) for i in self.strs))
self.assertRaises(StopIteration, next, deduper)
def test_set(self):
"""Test filter_unique with a set."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_dict(self):
"""Test filter_unique with a dict."""
deduped = dict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_OrderedDict(self):
"""Test filter_unique with a OrderedDict."""
deduped = tools.OrderedDict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_int_hash(self):
"""Test filter_unique with ints using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_int_id(self):
"""Test filter_unique with ints using id as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=id)
self._test_dedup_int(deduped, deduper, id)
def test_obj(self):
"""Test filter_unique with objects."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_obj_hash(self):
"""Test filter_unique with objects using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
@unittest.expectedFailure
def test_obj_id(self):
"""Test filter_unique with objects using id as key, which fails."""
# Two objects which may be equal do not have the same id.
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=id)
self._test_dedup_int(deduped, deduper, id)
def test_str(self):
"""Test filter_unique with str."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped)
self._test_dedup_str(deduped, deduper)
def test_str_hash(self):
"""Test filter_unique with str using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=hash)
self._test_dedup_str(deduped, deduper, hash)
@expected_failure_if(not tools.PY2)
def test_str_id(self):
"""Test str using id as key fails on Python 3."""
# str in Python 3 behave like objects.
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=id)
self._test_dedup_str(deduped, deduper, id)
def test_for_resumable(self):
"""Test filter_unique is resumable after a for loop."""
gen2 = tools.filter_unique(self.ints)
deduped = []
for item in gen2:
deduped.append(item)
if len(deduped) == 3:
break
self.assertEqual(deduped, [1, 3, 2])
last = next(gen2)
self.assertEqual(last, 4)
self.assertRaises(StopIteration, next, gen2)
def test_skip(self):
"""Test filter_unique with a container that skips items."""
deduped = SkipList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([2, 4]))
def test_process_again(self):
"""Test filter_unique with an ignoring container."""
deduped = ProcessAgainList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertEqual(deduped_out, [1, 3, 2, 1, 1, 4])
self.assertEqual(deduped, set([2, 4]))
def test_stop(self):
"""Test filter_unique with an ignoring container."""
deduped = ContainsStopList()
deduped.stop_list = [2]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
deduped = AddStopList()
deduped.stop_list = [4]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 2, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
class MetaTestArgSpec(MetaTestCaseClass):
"""Metaclass to create dynamically the tests. Set the net flag to false."""
def __new__(cls, name, bases, dct):
"""Create a new test case class."""
def create_test(method):
def test_method(self):
"""Test getargspec."""
# all expect at least self and param
expected = method(1, 2)
returned = self.getargspec(method)
self.assertEqual(returned, expected)
self.assertIsInstance(returned, self.expected_class)
self.assertNoDeprecation()
return test_method
for attr, tested_method in list(dct.items()):
if attr.startswith('_method_test_'):
suffix = attr[len('_method_test_'):]
cls.add_method(dct, 'test_method_' + suffix,
create_test(tested_method),
doc_suffix='on {0}'.format(suffix))
dct['net'] = False
return super(MetaTestArgSpec, cls).__new__(cls, name, bases, dct)
@add_metaclass
class TestArgSpec(DeprecationTestCase):
"""Test getargspec and ArgSpec from tools."""
__metaclass__ = MetaTestArgSpec
expected_class = tools.ArgSpec
def _method_test_args(self, param):
"""Test method with two positional arguments."""
return (['self', 'param'], None, None, None)
def _method_test_kwargs(self, param=42):
"""Test method with one positional and one keyword argument."""
return (['self', 'param'], None, None, (42,))
def _method_test_varargs(self, param, *var):
"""Test method with two positional arguments and var args."""
return (['self', 'param'], 'var', None, None)
def _method_test_varkwargs(self, param, **var):
"""Test method with two positional arguments and var kwargs."""
return (['self', 'param'], None, 'var', None)
def _method_test_vars(self, param, *args, **kwargs):
"""Test method with two positional arguments and both var args."""
return (['self', 'param'], 'args', 'kwargs', None)
def getargspec(self, method):
"""Call tested getargspec function."""
return tools.getargspec(method)
@unittest.skipIf(tools.PYTHON_VERSION >= (3, 6), 'removed in Python 3.6')
class TestPythonArgSpec(TestArgSpec):
"""Test the same tests using Python's implementation."""
expected_class = inspect.ArgSpec
def getargspec(self, method):
"""Call inspect's getargspec function."""
with warnings.catch_warnings():
if tools.PYTHON_VERSION >= (3, 5):
warnings.simplefilter('ignore', DeprecationWarning)
return inspect.getargspec(method)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import gc
import weakref
import unittest
import threading
import six
import IECore
import Gaffer
import GafferTest
class GraphComponentTest( GafferTest.TestCase ) :
def testName( self ) :
c = Gaffer.GraphComponent()
self.assertEqual( c.getName(), "GraphComponent" )
self.assertEqual( c.fullName(), "GraphComponent" )
def f( c ) :
GraphComponentTest.name = c.getName()
con = c.nameChangedSignal().connect( f, scoped = True )
GraphComponentTest.name = "xxx"
c.setName( "newName" )
self.assertEqual( GraphComponentTest.name, "newName" )
# slot shouldn't be called this time, as the name
# doesn't change (it's the same value)
c.setName( "newName" )
self.assertEqual( self.name, "newName" )
self.assertEqual( c.getName(), "newName" )
child1 = Gaffer.GraphComponent()
child2 = Gaffer.GraphComponent()
self.assertEqual( child1.getName(), "GraphComponent" )
self.assertEqual( child2.getName(), "GraphComponent" )
self.assertEqual( child1.fullName(), "GraphComponent" )
self.assertEqual( child2.fullName(), "GraphComponent" )
c.addChild( child1 )
self.assertEqual( child1.getName(), "GraphComponent" )
self.assertEqual( child1.fullName(), "newName.GraphComponent" )
con = child2.nameChangedSignal().connect( f, scoped = True )
GraphComponentTest.name = "xxx"
c.addChild( child2 )
self.assertEqual( child2.getName(), "GraphComponent1" )
self.assertEqual( child2.fullName(), "newName.GraphComponent1" )
self.assertEqual( child2.relativeName( None ), "newName.GraphComponent1" )
self.assertEqual( child2.relativeName( c ), "GraphComponent1" )
self.assertEqual( GraphComponentTest.name, "GraphComponent1" )
def testParenting( self ) :
parent1 = Gaffer.GraphComponent()
self.assertIsNone( parent1.parent() )
self.assertEqual( len( parent1.children() ), 0 )
child1 = Gaffer.GraphComponent()
self.assertIsNone( child1.parent() )
self.assertEqual( len( child1.children() ), 0 )
parent1.addChild( child1 )
self.assertIsNone( parent1.parent() )
self.assertTrue( parent1.getChild( "GraphComponent" ).isSame( child1 ) )
self.assertTrue( parent1["GraphComponent"].isSame( child1 ) )
self.assertTrue( child1.parent().isSame( parent1 ) )
parent1.removeChild( child1 )
self.assertEqual( parent1.children(), () )
self.assertEqual( child1.parent(), None )
self.assertRaises( RuntimeError, parent1.removeChild, child1 )
def testParentingSignals( self ) :
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
def f( c, oldParent ) :
GraphComponentTest.newParent = c.parent()
GraphComponentTest.oldParent = oldParent
def ff( p, c ) :
GraphComponentTest.parenting = ( p, c )
c1 = child.parentChangedSignal().connect( f, scoped = True )
c2 = parent.childAddedSignal().connect( ff, scoped = True )
GraphComponentTest.newParent = None
GraphComponentTest.oldParent = None
GraphComponentTest.parenting = None
parent.addChild( child )
self.assertTrue( GraphComponentTest.newParent.isSame( parent ) )
self.assertIsNone( GraphComponentTest.oldParent )
self.assertTrue( GraphComponentTest.parenting[0].isSame( parent ) )
self.assertTrue( GraphComponentTest.parenting[1].isSame( child ) )
GraphComponentTest.newParent = "xxx"
GraphComponentTest.oldParent = None
GraphComponentTest.parenting = None
c2 = parent.childRemovedSignal().connect( ff, scoped = True )
parent.removeChild( child )
self.assertIsNone( GraphComponentTest.newParent )
self.assertTrue( GraphComponentTest.oldParent.isSame( parent ) )
self.assertTrue( GraphComponentTest.parenting[0].isSame( parent ) )
self.assertTrue( GraphComponentTest.parenting[1].isSame( child ) )
def testReparentingEmitsOnlyOneParentChangedSignal( self ) :
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
def f( child, previousParent ) :
GraphComponentTest.newParent = child.parent()
GraphComponentTest.oldParent = previousParent
GraphComponentTest.child = child
GraphComponentTest.numSignals += 1
GraphComponentTest.newParent = None
GraphComponentTest.oldParent = None
GraphComponentTest.child = None
GraphComponentTest.numSignals = 0
p1["c"] = c
c.parentChangedSignal().connect( f, scoped = False )
p2["c"] = c
self.assertTrue( GraphComponentTest.newParent.isSame( p2 ) )
self.assertTrue( GraphComponentTest.oldParent.isSame( p1 ) )
self.assertTrue( GraphComponentTest.child.isSame( c ) )
self.assertEqual( GraphComponentTest.numSignals, 1 )
def testParentChangedBecauseParentDied( self ) :
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
parent["child"] = child
def f( child, previousParent ) :
GraphComponentTest.newParent = child.parent()
GraphComponentTest.previousParent = previousParent
child.parentChangedSignal().connect( f, scoped = False )
GraphComponentTest.newParent = "XXX"
GraphComponentTest.previousParent = "XXX"
w = weakref.ref( parent )
del parent
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
self.assertEqual( w(), None )
self.assertIsNone( GraphComponentTest.newParent )
self.assertIsNone( GraphComponentTest.previousParent )
def testReparentingDoesntSignal( self ) :
"""Adding a child to a parent who already owns that child should do nothing."""
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
parent.addChild( child )
self.assertTrue( child.parent().isSame( parent ) )
GraphComponentTest.numSignals = 0
def f( a, b=None ) :
GraphComponentTest.numSignals += 1
child.parentChangedSignal().connect( f, scoped = False )
parent.childAddedSignal().connect( f, scoped = False )
parent.addChild( child )
self.assertEqual( GraphComponentTest.numSignals, 0 )
def testMany( self ) :
l = []
for i in range( 0, 100000 ) :
l.append( Gaffer.GraphComponent() )
def testDictionarySemantics( self ) :
# check setitem and getitem
p = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
p["c"] = c
self.assertTrue( p.getChild( "c" ).isSame( c ) )
self.assertTrue( p["c"].isSame( c ) )
self.assertRaises( KeyError, p.__getitem__, "notAChild" )
# check that setitem removes items with clashing names
c2 = Gaffer.GraphComponent()
p["c"] = c2
self.assertTrue( p.getChild( "c" ).isSame( c2 ) )
self.assertTrue( c2.parent().isSame( p ) )
self.assertIsNone( c.parent() )
# check delitem
c3 = Gaffer.GraphComponent()
p["c3"] = c3
self.assertTrue( p.getChild( "c3" ).isSame( c3 ) )
self.assertTrue( p["c3"].isSame( c3 ) )
self.assertIn( "c3", p )
del p["c3"]
self.assertNotIn( "c3", p )
self.assertRaises( KeyError, p.__delitem__, "xxxx" )
def testUniqueNaming( self ) :
p = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
c3 = Gaffer.GraphComponent()
c1.setName( "a" )
c2.setName( "a" )
c3.setName( "a" )
p.addChild( c1 )
self.assertEqual( c1.getName(), "a" )
p.addChild( c2 )
self.assertEqual( c2.getName(), "a1" )
p.addChild( c3 )
self.assertEqual( c3.getName(), "a2" )
c4 = Gaffer.GraphComponent( "a1" )
p.addChild( c4 )
self.assertEqual( c4.getName(), "a3" )
c1.setName( "b" )
c2.setName( "b" )
c3.setName( "b" )
c4.setName( "b" )
self.assertEqual( c1.getName(), "b" )
self.assertEqual( c2.getName(), "b1" )
self.assertEqual( c3.getName(), "b2" )
self.assertEqual( c4.getName(), "b3" )
def testParallelUniqueNaming( self ):
# At one point setName was using a non-threadsafe static formatter which would throw
# exceptions when used from multiple threads
def f( q ) :
try:
g = Gaffer.GraphComponent()
for i in range( 500 ):
g.addChild( Gaffer.GraphComponent( "a" ) )
self.assertEqual( set(g.keys()), set( [ "a" ] + [ "a%i" % i for i in range( 1, 500 ) ] ) )
except Exception as e:
q.put( e )
threads = []
q = six.moves.queue.Queue()
for i in range( 0, 500 ) :
t = threading.Thread( target = f, args = (q,) )
t.start()
threads.append( t )
for t in threads :
t.join()
if not q.empty():
raise q.get( False )
def testAncestor( self ) :
a = Gaffer.ApplicationRoot()
s = Gaffer.ScriptNode()
a["scripts"]["one"] = s
n = GafferTest.AddNode()
s["node"] = n
self.assertTrue( n.ancestor( Gaffer.ScriptNode ).isSame( s ) )
self.assertTrue( n.ancestor( Gaffer.ApplicationRoot ).isSame( a ) )
def testCommonAncestor( self ) :
a = Gaffer.ApplicationRoot()
s = Gaffer.ScriptNode()
a["scripts"]["one"] = s
s["n1"] = Gaffer.Node()
s["n2"] = Gaffer.Node()
self.assertTrue( s["n1"].commonAncestor( s["n2"], Gaffer.ScriptNode ).isSame( s ) )
self.assertTrue( s["n2"].commonAncestor( s["n1"], Gaffer.ScriptNode ).isSame( s ) )
def testCommonAncestorType( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["p1"] = Gaffer.IntPlug()
s["n"]["user"]["p2"] = Gaffer.Color3fPlug()
self.assertEqual( s["n"]["user"]["p1"].commonAncestor( s["n"]["user"]["p2"]["r"] ), s["n"]["user"] )
self.assertEqual( s["n"]["user"]["p1"].commonAncestor( s["n"]["user"]["p2"]["r"], Gaffer.Plug ), s["n"]["user"] )
self.assertEqual( s["n"]["user"]["p1"].commonAncestor( s["n"]["user"]["p2"]["r"], Gaffer.Node ), s["n"] )
def testRenameThenRemove( self ) :
p = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
p.addChild( c )
c.setName( "c" )
p.removeChild( c )
def testDescendant( self ) :
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
p3 = Gaffer.GraphComponent()
p1["p2"] = p2
p2["p3"] = p3
self.assertTrue( p1.descendant( "p2" ).isSame( p2 ) )
self.assertTrue( p1.descendant( "p2.p3" ).isSame( p3 ) )
def testNameConstraints( self ) :
n = Gaffer.GraphComponent()
for name in ( "0", "0a", "@A", "a.A", ".", "A:", "a|", "a(" ) :
self.assertRaises( Exception, n.setName, name )
self.assertRaises( Exception, Gaffer.GraphComponent, name )
for name in ( "hello", "_1", "brdf_0_degree_refl" ) :
n.setName( name )
def testContains( self ) :
n = Gaffer.GraphComponent()
self.assertNotIn( "c", n )
n["c"] = Gaffer.GraphComponent()
self.assertIn( "c", n )
def testIsAncestorOf( self ) :
n = Gaffer.GraphComponent()
n["c"] = Gaffer.GraphComponent()
n["c"]["c"] = Gaffer.GraphComponent()
n2 = Gaffer.GraphComponent()
self.assertTrue( n.isAncestorOf( n["c"]["c"] ) )
self.assertTrue( n.isAncestorOf( n["c"] ) )
self.assertFalse( n.isAncestorOf( n ) )
self.assertFalse( n2.isAncestorOf( n ) )
self.assertFalse( n.isAncestorOf( n2 ) )
def testDerivingInPython( self ) :
class TestGraphComponent( Gaffer.GraphComponent ) :
def __init__( self, name = "TestGraphComponent" ) :
Gaffer.GraphComponent.__init__( self, name )
self.acceptsChildCalled = False
self.acceptsParentCalled = False
def acceptsChild( self, potentialChild ) :
self.acceptsChildCalled = True
return isinstance( potentialChild, TestGraphComponent )
def acceptsParent( self, potentialParent ) :
self.acceptsParentCalled = True
return isinstance( potentialParent, TestGraphComponent )
IECore.registerRunTimeTyped( TestGraphComponent )
# check names in constructors
g1 = TestGraphComponent()
self.assertEqual( g1.getName(), "TestGraphComponent" )
g2 = TestGraphComponent( "g" )
self.assertEqual( g2.getName(), "g" )
# check calling virtual overrides directly
self.assertEqual( g1.acceptsChildCalled, False )
self.assertEqual( g1.acceptsParentCalled, False )
self.assertEqual( g2.acceptsChildCalled, False )
self.assertEqual( g2.acceptsParentCalled, False )
self.assertTrue( g1.acceptsChild( g2 ) )
self.assertTrue( g1.acceptsParent( g2 ) )
self.assertFalse( g1.acceptsChild( Gaffer.Node() ) )
self.assertFalse( g1.acceptsParent( Gaffer.Node() ) )
self.assertEqual( g1.acceptsChildCalled, True )
self.assertEqual( g1.acceptsParentCalled, True )
self.assertEqual( g2.acceptsChildCalled, False )
self.assertEqual( g2.acceptsParentCalled, False )
# check calling virtual overrides indirectly through C++
g1 = TestGraphComponent()
g2 = TestGraphComponent( "g" )
self.assertEqual( g1.acceptsChildCalled, False )
self.assertEqual( g1.acceptsParentCalled, False )
self.assertRaises( RuntimeError, g1.addChild, Gaffer.Node() )
self.assertEqual( g1.acceptsChildCalled, True )
self.assertEqual( g1.acceptsParentCalled, False )
self.assertRaises( RuntimeError, Gaffer.GraphComponent().addChild, g1 )
self.assertEqual( g1.acceptsChildCalled, True )
self.assertEqual( g1.acceptsParentCalled, True )
def testLen( self ) :
g = Gaffer.GraphComponent()
self.assertEqual( len( g ), 0 )
g["a"] = Gaffer.GraphComponent()
self.assertEqual( len( g ), 1 )
g["b"] = Gaffer.GraphComponent()
self.assertEqual( len( g ), 2 )
del g["a"]
self.assertEqual( len( g ), 1 )
def testSetChild( self ) :
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
self.assertEqual( len( p1 ), 0 )
self.assertEqual( len( p2 ), 0 )
self.assertEqual( c1.parent(), None )
self.assertEqual( c2.parent(), None )
p1.setChild( "a", c1 )
self.assertEqual( c1.getName(), "a" )
self.assertEqual( c1.parent(), p1 )
self.assertEqual( len( p1 ), 1 )
p1.setChild( "a", c2 )
self.assertEqual( c1.getName(), "a" )
self.assertEqual( c2.getName(), "a" )
self.assertEqual( c1.parent(), None )
self.assertEqual( c2.parent(), p1 )
self.assertEqual( len( p1 ), 1 )
p2.setChild( "b", c2 )
self.assertEqual( c2.getName(), "b" )
self.assertEqual( c2.parent(), p2 )
self.assertEqual( len( p1 ), 0 )
self.assertEqual( len( p2 ), 1 )
def testSetChildAgain( self ) :
# Setting a child to the same thing should
# cause nothing to happen and no signals to
# be triggered.
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
parent.setChild( "c", child )
self.assertTrue( child.parent().isSame( parent ) )
GraphComponentTest.numSignals = 0
def f( *args ) :
GraphComponentTest.numSignals += 1
child.parentChangedSignal().connect( f, scoped = False )
parent.childAddedSignal().connect( f, scoped = False )
parent.childRemovedSignal().connect( f, scoped = False )
child.nameChangedSignal().connect( f, scoped = False )
parent.setChild( "c", child )
self.assertEqual( GraphComponentTest.numSignals, 0 )
def testEmptyName( self ) :
g = Gaffer.GraphComponent()
self.assertRaises( RuntimeError, g.setName, "" )
def testGetChildWithEmptyName( self ) :
g = Gaffer.GraphComponent()
self.assertEqual( g.getChild( "" ), None )
self.assertRaises( KeyError, g.__getitem__, "" )
def testKeysAndValuesAndItems( self ) :
g = Gaffer.GraphComponent()
self.assertEqual( g.keys(), [] )
self.assertEqual( g.values(), [] )
g["a"] = Gaffer.GraphComponent()
g["b"] = Gaffer.GraphComponent()
g["c"] = Gaffer.GraphComponent()
self.assertEqual( g.keys(), [ "a", "b", "c" ] )
self.assertEqual( len( g.values() ), 3 )
self.assertEqual( g.values()[0].getName(), "a" )
self.assertEqual( g.values()[1].getName(), "b" )
self.assertEqual( g.values()[2].getName(), "c" )
items = g.items()
self.assertEqual( len( items ), 3 )
self.assertEqual( items[0][0], "a" )
self.assertEqual( items[1][0], "b" )
self.assertEqual( items[2][0], "c" )
self.assertEqual( items[0][1].getName(), "a" )
self.assertEqual( items[1][1].getName(), "b" )
self.assertEqual( items[2][1].getName(), "c" )
for item in items :
self.assertIsInstance( item[0], str )
def testIndexByIndex( self ) :
g = Gaffer.GraphComponent()
g["a"] = Gaffer.GraphComponent()
g["b"] = Gaffer.GraphComponent()
g["c"] = Gaffer.GraphComponent()
self.assertEqual( len( g ), 3 )
self.assertRaises( IndexError, g.__getitem__, 3 )
self.assertRaises( IndexError, g.__getitem__, -4 )
self.assertEqual( g[0].getName(), "a" )
self.assertEqual( g[1].getName(), "b" )
self.assertEqual( g[2].getName(), "c" )
self.assertEqual( g[-1].getName(), "c" )
self.assertEqual( g[-2].getName(), "b" )
self.assertEqual( g[-3].getName(), "a" )
def testChildrenByType( self ) :
g = Gaffer.Node()
g["a"] = Gaffer.IntPlug()
g["b"] = Gaffer.Plug()
g["c"] = Gaffer.Node()
self.assertEqual( len( g.children() ), 4 )
self.assertEqual( len( g.children( Gaffer.GraphComponent ) ), 4 )
self.assertEqual( len( g.children( Gaffer.Plug ) ), 3 )
self.assertEqual( len( g.children( Gaffer.Node ) ), 1 )
self.assertEqual( len( g.children( Gaffer.IntPlug ) ), 1 )
def testRemoveMany( self ) :
g = Gaffer.GraphComponent()
l = []
for i in range( 0, 10000 ) :
c = Gaffer.GraphComponent()
l.append( c )
g["c%d"%i] = c
for c in l :
g.removeChild( c )
def testManyChildrenWithSameInitialName( self ) :
g = Gaffer.GraphComponent()
for i in range( 0, 2000 ) :
g.addChild( Gaffer.GraphComponent() )
for index, child in enumerate( g ) :
if index == 0 :
self.assertEqual( child.getName(), "GraphComponent" )
else :
self.assertEqual( child.getName(), "GraphComponent%d" % index )
def testNamesWithStrangeSuffixes( self ) :
g = Gaffer.GraphComponent()
g.addChild( Gaffer.GraphComponent( "a" ) )
g.addChild( Gaffer.GraphComponent( "a1somethingElse" ) )
self.assertEqual( g[0].getName(), "a" )
self.assertEqual( g[1].getName(), "a1somethingElse" )
g.addChild( Gaffer.GraphComponent( "a" ) )
self.assertEqual( g[2].getName(), "a1" )
def testAddChildWithExistingNumericSuffix( self ) :
g = Gaffer.GraphComponent()
g.addChild( Gaffer.GraphComponent( "a1" ) )
g.addChild( Gaffer.GraphComponent( "a1" ) )
self.assertEqual( g[0].getName(), "a1" )
self.assertEqual( g[1].getName(), "a2" )
def testSetChildDoesntRemoveChildIfNewChildIsntAccepted( self ) :
class AddNodeAcceptor( Gaffer.Node ) :
def __init__( self, name = "AddNodeAcceptor" ) :
Gaffer.Node.__init__( self, name )
def acceptsChild( self, potentialChild ) :
return isinstance( potentialChild, GafferTest.AddNode )
IECore.registerRunTimeTyped( AddNodeAcceptor )
g = AddNodeAcceptor()
a = GafferTest.AddNode()
g["a"] = a
self.assertRaises( RuntimeError, g.setChild, "a", GafferTest.MultiplyNode() )
self.assertTrue( g["a"].isSame( a ) )
def testCircularParentingThrows( self ) :
a = Gaffer.GraphComponent()
b = Gaffer.GraphComponent()
a["b"] = b
self.assertRaises( RuntimeError, b.addChild, a )
a = Gaffer.GraphComponent()
b = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
a["b"] = b
b["c"] = c
self.assertRaises( RuntimeError, c.addChild, a )
a = Gaffer.GraphComponent()
self.assertRaises( RuntimeError, a.addChild, a )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed(
Gaffer,
# Ignore the names imported from GafferCortex and
# GafferDispatch into the Gaffer namespace - they're
# just for backwards compatibility.
namesToIgnore = set( [
"GafferCortex::ObjectReader",
"GafferCortex::ObjectWriter",
"GafferCortex::ExecutableOpHolder",
"GafferCortex::OpHolder",
"GafferCortex::ParameterisedHolderNode",
"GafferCortex::ParameterisedHolderDependencyNode",
"GafferCortex::ParameterisedHolderComputeNode",
"GafferCortex::ParameterisedHolderTaskNode",
"GafferCortex::AttributeCachePath",
"GafferCortex::ClassLoaderPath",
"GafferCortex::IndexedIOPath",
"GafferCortex::ParameterPath",
"GafferDispatch::Dispatcher",
"GafferDispatch::LocalDispatcher",
"GafferDispatch::TaskNode",
"GafferDispatch::PythonCommand",
"GafferDispatch::SystemCommand",
"GafferDispatch::TaskContextProcessor",
"GafferDispatch::TaskContextVariables",
"GafferDispatch::TaskList",
"GafferDispatch::TaskSwitch",
"GafferDispatch::Wedge",
"GafferDispatch::FrameMask",
"IECorePreview::MessagesData"
] )
)
self.assertTypeNamesArePrefixed( GafferTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( Gaffer )
self.assertDefaultNamesAreCorrect( GafferTest )
def testClearChildren( self ) :
p = Gaffer.GraphComponent()
for i in range( 0, 10 ) :
p.addChild( Gaffer.GraphComponent() )
self.assertEqual( len( p ), 10 )
p.clearChildren()
self.assertEqual( len( p ), 0 )
def testParentChanging( self ) :
class Child( Gaffer.GraphComponent ) :
def __init__( self, name = "Child" ) :
Gaffer.GraphComponent.__init__( self, name )
self.parentChanges = []
def _parentChanging( self, newParent ) :
self.parentChanges.append( ( self.parent(), newParent ) )
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c = Child()
self.assertEqual( len( c.parentChanges ), 0 )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p1.removeChild( c )
self.assertEqual( len( c.parentChanges ), 2 )
self.assertEqual( c.parentChanges[-1], ( p1, None ) )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 3 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p2.addChild( c )
self.assertEqual( len( c.parentChanges ), 4 )
self.assertEqual( c.parentChanges[-1], ( p1, p2 ) )
# cause a parent change by destroying the parent.
# we need to remove all references to the parent to do
# this, including those stored in the parentChanges list.
del p2
del c.parentChanges[:]
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, None ) )
def testDescriptiveKeyErrors( self ) :
g = Gaffer.GraphComponent()
six.assertRaisesRegex( self, KeyError, "'a' is not a child of 'GraphComponent'", g.__getitem__, "a" )
six.assertRaisesRegex( self, KeyError, "'a' is not a child of 'GraphComponent'", g.__delitem__, "a" )
def testNoneIsNotAString( self ) :
g = Gaffer.GraphComponent()
self.assertRaises( TypeError, g.getChild, None )
self.assertRaises( TypeError, g.__getitem__, None )
self.assertRaises( TypeError, g.__delitem__, None )
self.assertRaises( TypeError, g.descendant, None )
self.assertRaises( TypeError, g.__contains__, None )
self.assertRaises( TypeError, g.setName, None )
def testDelItemByIndex( self ) :
g = Gaffer.GraphComponent()
a = Gaffer.GraphComponent( "a" )
b = Gaffer.GraphComponent( "b" )
g["a"] = a
g["b"] = b
self.assertEqual( a.parent(), g )
self.assertEqual( b.parent(), g )
del g[0]
self.assertEqual( a.parent(), None )
self.assertEqual( b.parent(), g )
def testRemoveChildUndoIndices( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
a = Gaffer.Plug( "a" )
b = Gaffer.Plug( "b" )
c = Gaffer.Plug( "c" )
s["n"]["user"].addChild( a )
s["n"]["user"].addChild( b )
s["n"]["user"].addChild( c )
def assertPreconditions() :
self.assertEqual( len( s["n"]["user"] ), 3 )
self.assertEqual( s["n"]["user"][0], a )
self.assertEqual( s["n"]["user"][1], b )
self.assertEqual( s["n"]["user"][2], c )
assertPreconditions()
with Gaffer.UndoScope( s ) :
del s["n"]["user"]["b"]
def assertPostConditions() :
self.assertEqual( len( s["n"]["user"] ), 2 )
self.assertEqual( s["n"]["user"][0], a )
self.assertEqual( s["n"]["user"][1], c )
assertPostConditions()
s.undo()
assertPreconditions()
s.redo()
assertPostConditions()
s.undo()
assertPreconditions()
def testMoveChildUndoIndices( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n2"] = Gaffer.Node()
a = Gaffer.Plug( "a" )
b = Gaffer.Plug( "b" )
c = Gaffer.Plug( "c" )
s["n1"]["user"].addChild( a )
s["n1"]["user"].addChild( b )
s["n1"]["user"].addChild( c )
def assertPreconditions() :
self.assertEqual( len( s["n1"]["user"] ), 3 )
self.assertEqual( s["n1"]["user"][0], a )
self.assertEqual( s["n1"]["user"][1], b )
self.assertEqual( s["n1"]["user"][2], c )
self.assertEqual( len( s["n2"]["user"] ), 0 )
assertPreconditions()
with Gaffer.UndoScope( s ) :
s["n2"]["user"].addChild( s["n1"]["user"]["b"] )
def assertPostConditions() :
self.assertEqual( len( s["n1"]["user"] ), 2 )
self.assertEqual( s["n1"]["user"][0], a )
self.assertEqual( s["n1"]["user"][1], c )
self.assertEqual( len( s["n2"]["user"] ), 1 )
self.assertEqual( s["n2"]["user"][0], b )
assertPostConditions()
s.undo()
assertPreconditions()
s.redo()
assertPostConditions()
s.undo()
assertPreconditions()
def testParentChangedOverride( self ) :
class Child( Gaffer.GraphComponent ) :
def __init__( self, name = "Child" ) :
Gaffer.GraphComponent.__init__( self, name )
self.parentChanges = []
def _parentChanged( self, oldParent ) :
self.parentChanges.append( ( oldParent, self.parent() ) )
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c = Child()
self.assertEqual( len( c.parentChanges ), 0 )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p1.removeChild( c )
self.assertEqual( len( c.parentChanges ), 2 )
self.assertEqual( c.parentChanges[-1], ( p1, None ) )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 3 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p2.addChild( c )
self.assertEqual( len( c.parentChanges ), 4 )
self.assertEqual( c.parentChanges[-1], ( p1, p2 ) )
# Cause a parent change by destroying the parent.
# We need to remove all references to the parent to do
# this, including those stored in the parentChanges list.
del p2
del c.parentChanges[:]
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, None ) )
@GafferTest.TestRunner.PerformanceTestMethod()
def testMakeNamesUnique( self ) :
s = Gaffer.ScriptNode()
for i in range( 0, 1000 ) :
n = GafferTest.AddNode()
s.addChild( n )
@GafferTest.TestRunner.PerformanceTestMethod()
def testGetChild( self ) :
s = Gaffer.ScriptNode()
for i in range( 0, 1000 ) :
# explicitly setting the name to something unique
# avoids the overhead incurred by the example
# in testMakeNamesUnique
n = GafferTest.AddNode( "AddNode" + str( i ) )
s.addChild( n )
for i in range( 0, 1000 ) :
n = "AddNode" + str( i )
c = s[n]
self.assertEqual( c.getName(), n )
def testNoneIsNotAGraphComponent( self ) :
g = Gaffer.GraphComponent()
with six.assertRaisesRegex( self, Exception, r"did not match C\+\+ signature" ) :
g.addChild( None )
with six.assertRaisesRegex( self, Exception, r"did not match C\+\+ signature" ) :
g.setChild( "x", None )
with six.assertRaisesRegex( self, Exception, r"did not match C\+\+ signature" ) :
g.removeChild( None )
def testRanges( self ) :
g = Gaffer.GraphComponent()
g["c1"] = Gaffer.GraphComponent()
g["c2"] = Gaffer.GraphComponent()
g["c2"]["gc1"] = Gaffer.GraphComponent()
g["c3"] = Gaffer.GraphComponent()
g["c3"]["gc2"] = Gaffer.GraphComponent()
g["c3"]["gc3"] = Gaffer.GraphComponent()
self.assertEqual(
list( Gaffer.GraphComponent.Range( g ) ),
[ g["c1"], g["c2"], g["c3"] ],
)
self.assertEqual(
list( Gaffer.GraphComponent.RecursiveRange( g ) ),
[ g["c1"], g["c2"], g["c2"]["gc1"], g["c3"], g["c3"]["gc2"], g["c3"]["gc3"] ],
)
def testReorderChildren( self ) :
script = Gaffer.ScriptNode()
p = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
c1 = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
c2 = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
c3 = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
c4 = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["node"] = Gaffer.Node()
script["node"]["p"] = p
script["node"]["p"]["c1"] = c1
script["node"]["p"]["c2"] = c2
script["node"]["p"]["c3"] = c3
script["node"]["p"]["c4"] = c4
mirror = [ c.getName() for c in script["node"]["p"] ]
def childrenReordered( parent, oldIndices ) :
# Demonstrates how you could maintain a parallel data structure
# to keep the same order. For example, a list of widgets in the UI.
mirror[:] = [ mirror[i] for i in oldIndices ]
script["node"]["p"].childrenReorderedSignal().connect( childrenReordered, scoped = False )
cs = GafferTest.CapturingSlot( p.childrenReorderedSignal() )
with Gaffer.UndoScope( script ) :
p.reorderChildren( [ c4, c3, c1, c2 ] )
self.assertEqual( p.children(), ( c4, c3, c1, c2 ) )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[-1], ( p, [ 3, 2, 0, 1 ] ) )
self.assertEqual( mirror, [ c.getName() for c in script["node"]["p"] ] )
script.undo()
self.assertEqual( p.children(), ( c1, c2, c3, c4 ) )
self.assertEqual( len( cs ), 2 )
self.assertEqual( cs[-1], ( p, [ 2, 3, 1, 0 ] ) )
self.assertEqual( mirror, [ c.getName() for c in script["node"]["p"] ] )
script.redo()
self.assertEqual( p.children(), ( c4, c3, c1, c2 ) )
self.assertEqual( len( cs ), 3 )
self.assertEqual( cs[-1], ( p, [ 3, 2, 0, 1 ] ) )
self.assertEqual( mirror, [ c.getName() for c in script["node"]["p"] ] )
script.undo()
self.assertEqual( p.children(), ( c1, c2, c3, c4 ) )
self.assertEqual( len( cs ), 4 )
self.assertEqual( cs[-1], ( p, [ 2, 3, 1, 0 ] ) )
self.assertEqual( mirror, [ c.getName() for c in script["node"]["p"] ] )
def testReorderChildrenArgumentChecks( self ) :
p = Gaffer.Plug( "p" )
p["c1"] = c1 = Gaffer.Plug()
p["c2"] = c2 = Gaffer.Plug()
p["c3"] = c3 = Gaffer.Plug()
with six.assertRaisesRegex( self, Exception, r"Wrong number of children specified \(2 but should be 3\)" ) :
p.reorderChildren( [ c1, c2 ] )
with six.assertRaisesRegex( self, Exception, r"Wrong number of children specified \(4 but should be 3\)" ) :
p.reorderChildren( [ c1, c2, c3, c1 ] )
with six.assertRaisesRegex( self, Exception, 'Child "c2" is in more than one position' ) :
p.reorderChildren( [ c1, c2, c2 ] )
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
"""
IGWeight.py -
Compute IG Weights given a set of tokenized buckets and a feature set
Marco Lui, January 2013
Based on research by Marco Lui and Tim Baldwin.
Copyright 2013 Marco Lui <saffsd@gmail.com>. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the copyright holder.
"""
import os, sys, argparse
import csv
import numpy
import multiprocessing as mp
from itertools import tee, imap, islice
from collections import defaultdict
from contextlib import closing
from common import unmarshal_iter, MapPool, Enumerator, write_weights, read_features
def entropy(v, axis=0):
"""
Optimized implementation of entropy. This version is faster than that in
scipy.stats.distributions, particularly over long vectors.
"""
v = numpy.array(v, dtype='float')
s = numpy.sum(v, axis=axis)
with numpy.errstate(divide='ignore', invalid='ignore'):
rhs = numpy.nansum(v * numpy.log(v), axis=axis) / s
r = numpy.log(s) - rhs
# Where dealing with binarized events, it is possible that an event always
# occurs and thus has 0 information. In this case, the negative class
# will have frequency 0, resulting in log(0) being computed as nan.
# We replace these nans with 0
nan_index = numpy.isnan(rhs)
if nan_index.any():
r[nan_index] = 0
return r
def setup_pass_IG(features, dist, binarize, suffix):
"""
@param features the list of features to compute IG for
@param dist the background distribution
@param binarize (boolean) compute IG binarized per-class if True
@param suffix of files in bucketdir to process
"""
global __features, __dist, __binarize, __suffix
__features = features
__dist = dist
__binarize = binarize
__suffix = suffix
def pass_IG(buckets):
"""
In this pass we compute the information gain for each feature, binarized
with respect to each language as well as unified over the set of all
classes.
@global __features the list of features to compute IG for
@global __dist the background distribution
@global __binarize (boolean) compute IG binarized per-class if True
@global __suffix of files in bucketdir to process
@param buckets a list of buckets. Each bucket must be a directory that contains files
with the appropriate suffix. Each file must contain marshalled
(term, event_id, count) triplets.
"""
global __features, __dist, __binarize, __suffix
# We first tally the per-event frequency of each
# term in our selected feature set.
term_freq = defaultdict(lambda: defaultdict(int))
term_index = defaultdict(Enumerator())
for bucket in buckets:
for path in os.listdir(bucket):
if path.endswith(__suffix):
for key, event_id, count in unmarshal_iter(os.path.join(bucket,path)):
# Select only our listed features
if key in __features:
term_index[key]
term_freq[key][event_id] += count
num_term = len(term_index)
num_event = len(__dist)
cm_pos = numpy.zeros((num_term, num_event), dtype='int')
for term,term_id in term_index.iteritems():
# update event matrix
freq = term_freq[term]
for event_id, count in freq.iteritems():
cm_pos[term_id, event_id] = count
cm_neg = __dist - cm_pos
cm = numpy.dstack((cm_neg, cm_pos))
if not __binarize:
# non-binarized event space
x = cm.sum(axis=1)
term_w = x / x.sum(axis=1)[:, None].astype(float)
# Entropy of the term-present/term-absent events
e = entropy(cm, axis=1)
# Information Gain with respect to the set of events
ig = entropy(__dist) - (term_w * e).sum(axis=1)
else:
# binarized event space
# Compute IG binarized with respect to each event
ig = list()
for event_id in xrange(num_event):
num_doc = __dist.sum()
prior = numpy.array((num_doc - __dist[event_id], __dist[event_id]), dtype=float) / num_doc
cm_bin = numpy.zeros((num_term, 2, 2), dtype=int) # (term, p(term), p(lang|term))
cm_bin[:,0,:] = cm.sum(axis=1) - cm[:,event_id,:]
cm_bin[:,1,:] = cm[:,event_id,:]
e = entropy(cm_bin, axis=1)
x = cm_bin.sum(axis=1)
term_w = x / x.sum(axis=1)[:, None].astype(float)
ig.append( entropy(prior) - (term_w * e).sum(axis=1) )
ig = numpy.vstack(ig)
terms = sorted(term_index, key=term_index.get)
return terms, ig
def compute_IG(bucketlist, features, dist, binarize, suffix, job_count=None):
pass_IG_args = (features, dist, binarize, suffix)
num_chunk = len(bucketlist)
weights = []
terms = []
with MapPool(job_count, setup_pass_IG, pass_IG_args) as f:
pass_IG_out = f(pass_IG, bucketlist)
for i, (t, w) in enumerate(pass_IG_out):
weights.append(w)
terms.extend(t)
print "processed chunk (%d/%d) [%d terms]" % (i+1, num_chunk, len(t))
if binarize:
weights = numpy.hstack(weights).transpose()
else:
weights = numpy.concatenate(weights)
terms = ["".join(t) for t in terms]
return zip(terms, weights)
def read_dist(path):
"""
Read the distribution from a file containing item, count pairs.
@param path path to read form
"""
with open(path) as f:
reader = csv.reader(f)
return numpy.array(zip(*reader)[1], dtype=int)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-j","--jobs", type=int, metavar='N', help="spawn N processes (set to 1 for no paralleization)")
parser.add_argument("-f","--features", metavar='FEATURE_FILE', help="read features from FEATURE_FILE")
parser.add_argument("-w","--weights", metavar='WEIGHTS', help="output weights to WEIGHTS")
parser.add_argument("-d","--domain", action="store_true", default=False, help="compute IG with respect to domain")
parser.add_argument("-b","--binarize", action="store_true", default=False, help="binarize the event space in the IG computation")
parser.add_argument("-l","--lang", action="store_true", default=False, help="compute IG with respect to language")
parser.add_argument("model", metavar='MODEL_DIR', help="read index and produce output in MODEL_DIR")
parser.add_argument("buckets", nargs='*', help="read bucketlist from")
args = parser.parse_args()
if not(args.domain or args.lang) or (args.domain and args.lang):
parser.error("exactly one of domain(-d) or language (-l) must be specified")
if args.features:
feature_path = args.features
else:
feature_path = os.path.join(args.model, 'DFfeats')
if args.buckets:
bucketlist_paths = args.buckets
else:
bucketlist_paths = [os.path.join(args.model, 'bucketlist')]
if not os.path.exists(feature_path):
parser.error('{0} does not exist'.format(feature_path))
features = read_features(feature_path)
if args.domain:
index_path = os.path.join(args.model,'domain_index')
suffix = '.domain'
elif args.lang:
index_path = os.path.join(args.model,'lang_index')
suffix = '.lang'
else:
raise ValueError("no event specified")
if args.weights:
weights_path = args.weights
else:
weights_path = os.path.join(args.model, 'IGweights' + suffix + ('.bin' if args.binarize else ''))
# display paths
print "model path:", args.model
print "buckets path:", bucketlist_paths
print "features path:", feature_path
print "weights path:", weights_path
print "index path:", index_path
print "suffix:", suffix
print "computing information gain"
# Compile buckets together
bucketlist = zip(*(map(str.strip, open(p)) for p in bucketlist_paths))
# Check that each bucketlist has the same number of buckets
assert len(set(map(len,bucketlist))) == 1, "incompatible bucketlists!"
dist = read_dist(index_path)
ig = compute_IG(bucketlist, features, dist, args.binarize, suffix, args.jobs)
write_weights(ig, weights_path)
| |
"""PostgreSQL check
Collects database-wide metrics and optionally per-relation metrics, custom metrics.
"""
# project
from checks import AgentCheck, CheckException
# 3rd party
import pg8000 as pg
from pg8000 import InterfaceError, ProgrammingError
import socket
MAX_CUSTOM_RESULTS = 100
class ShouldRestartException(Exception): pass
class PostgreSql(AgentCheck):
"""Collects per-database, and optionally per-relation metrics, custom metrics
"""
SOURCE_TYPE_NAME = 'postgresql'
RATE = AgentCheck.rate
GAUGE = AgentCheck.gauge
MONOTONIC = AgentCheck.monotonic_count
SERVICE_CHECK_NAME = 'postgres.can_connect'
# turning columns into tags
DB_METRICS = {
'descriptors': [
('datname', 'db')
],
'metrics': {},
'query': """
SELECT datname,
%s
FROM pg_stat_database
WHERE datname not ilike 'template%%'
AND datname not ilike 'postgres'
""",
'relation': False,
}
COMMON_METRICS = {
'numbackends' : ('postgresql.connections', GAUGE),
'xact_commit' : ('postgresql.commits', RATE),
'xact_rollback' : ('postgresql.rollbacks', RATE),
'blks_read' : ('postgresql.disk_read', RATE),
'blks_hit' : ('postgresql.buffer_hit', RATE),
'tup_returned' : ('postgresql.rows_returned', RATE),
'tup_fetched' : ('postgresql.rows_fetched', RATE),
'tup_inserted' : ('postgresql.rows_inserted', RATE),
'tup_updated' : ('postgresql.rows_updated', RATE),
'tup_deleted' : ('postgresql.rows_deleted', RATE),
'pg_database_size(datname) as pg_database_size' : ('postgresql.database_size', GAUGE),
}
NEWER_92_METRICS = {
'deadlocks' : ('postgresql.deadlocks', RATE),
'temp_bytes' : ('postgresql.temp_bytes', RATE),
'temp_files' : ('postgresql.temp_files', RATE),
}
BGW_METRICS = {
'descriptors': [],
'metrics': {},
'query': "select %s FROM pg_stat_bgwriter",
'relation': False,
}
COMMON_BGW_METRICS = {
'checkpoints_timed' : ('postgresql.bgwriter.checkpoints_timed', MONOTONIC),
'checkpoints_req' : ('postgresql.bgwriter.checkpoints_requested', MONOTONIC),
'buffers_checkpoint' : ('postgresql.bgwriter.buffers_checkpoint', MONOTONIC),
'buffers_clean' : ('postgresql.bgwriter.buffers_clean', MONOTONIC),
'maxwritten_clean' : ('postgresql.bgwriter.maxwritten_clean', MONOTONIC),
'buffers_backend' : ('postgresql.bgwriter.buffers_backend', MONOTONIC),
'buffers_alloc' : ('postgresql.bgwriter.buffers_alloc', MONOTONIC),
}
NEWER_91_BGW_METRICS = {
'buffers_backend_fsync': ('postgresql.bgwriter.buffers_backend_fsync', MONOTONIC),
}
NEWER_92_BGW_METRICS = {
'checkpoint_write_time': ('postgresql.bgwriter.write_time', MONOTONIC),
'checkpoint_sync_time' : ('postgresql.bgwriter.sync_time', MONOTONIC),
}
LOCK_METRICS = {
'descriptors': [
('mode', 'lock_mode'),
('relname', 'table'),
],
'metrics': {
'lock_count' : ('postgresql.locks', GAUGE),
},
'query': """
SELECT mode,
pc.relname,
count(*) AS %s
FROM pg_locks l
JOIN pg_class pc ON (l.relation = pc.oid)
WHERE l.mode IS NOT NULL
AND pc.relname NOT LIKE 'pg_%%'
GROUP BY pc.relname, mode""",
'relation': False,
}
REL_METRICS = {
'descriptors': [
('relname', 'table')
],
'metrics': {
'seq_scan' : ('postgresql.seq_scans', RATE),
'seq_tup_read' : ('postgresql.seq_rows_read', RATE),
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
'n_tup_ins' : ('postgresql.rows_inserted', RATE),
'n_tup_upd' : ('postgresql.rows_updated', RATE),
'n_tup_del' : ('postgresql.rows_deleted', RATE),
'n_tup_hot_upd' : ('postgresql.rows_hot_updated', RATE),
'n_live_tup' : ('postgresql.live_rows', GAUGE),
'n_dead_tup' : ('postgresql.dead_rows', GAUGE),
},
'query': """
SELECT relname,
%s
FROM pg_stat_user_tables
WHERE relname = ANY(%s)""",
'relation': True,
}
IDX_METRICS = {
'descriptors': [
('relname', 'table'),
('indexrelname', 'index')
],
'metrics': {
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_read' : ('postgresql.index_rows_read', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
},
'query': """
SELECT relname,
indexrelname,
%s
FROM pg_stat_user_indexes
WHERE relname = ANY(%s)""",
'relation': True,
}
SIZE_METRICS = {
'descriptors': [
('relname', 'table'),
],
'metrics': {
'pg_table_size(C.oid) as table_size' : ('postgresql.table_size', GAUGE),
'pg_indexes_size(C.oid) as index_size' : ('postgresql.index_size', GAUGE),
'pg_total_relation_size(C.oid) as total_size' : ('postgresql.total_size', GAUGE),
},
'relation': True,
'query': """
SELECT
relname,
%s
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema') AND
nspname !~ '^pg_toast' AND
relkind IN ('r') AND
relname = ANY(%s)"""
}
COUNT_METRICS = {
'descriptors': [
('schemaname', 'schema')
],
'metrics': {
'pg_stat_user_tables': ('postgresql.total_tables', GAUGE),
},
'relation': False,
'query': """
SELECT schemaname, count(*)
FROM %s
GROUP BY schemaname
"""
}
REPLICATION_METRICS_9_1 = {
'CASE WHEN pg_last_xlog_receive_location() = pg_last_xlog_replay_location() THEN 0 ELSE GREATEST (0, EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())) END': ('postgresql.replication_delay', GAUGE),
}
REPLICATION_METRICS_9_2 = {
'abs(pg_xlog_location_diff(pg_last_xlog_receive_location(), pg_last_xlog_replay_location())) AS replication_delay_bytes': ('postgres.replication_delay_bytes', GAUGE)
}
REPLICATION_METRICS = {
'descriptors': [],
'metrics': {},
'relation': False,
'query': """
SELECT %s
WHERE (SELECT pg_is_in_recovery())"""
}
CONNECTION_METRICS = {
'descriptors': [],
'metrics': {
'MAX(setting) AS max_connections': ('postgresql.max_connections', GAUGE),
'SUM(numbackends)/MAX(setting) AS pct_connections': ('postgresql.percent_usage_connections', GAUGE),
},
'relation': False,
'query': """
WITH max_con AS (SELECT setting::float FROM pg_settings WHERE name = 'max_connections')
SELECT %s
FROM pg_stat_database, max_con
"""
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.dbs = {}
self.versions = {}
self.instance_metrics = {}
self.bgw_metrics = {}
self.db_instance_metrics = []
self.db_bgw_metrics = []
self.replication_metrics = {}
def _get_version(self, key, db):
if key not in self.versions:
cursor = db.cursor()
cursor.execute('SHOW SERVER_VERSION;')
result = cursor.fetchone()
try:
version = map(int, result[0].split('.'))
except Exception:
version = result[0]
self.versions[key] = version
return self.versions[key]
def _is_above(self, key, db, version_to_compare):
version = self._get_version(key, db)
if type(version) == list:
return version >= version_to_compare
return False
def _is_9_1_or_above(self, key, db):
return self._is_above(key, db, [9,1,0])
def _is_9_2_or_above(self, key, db):
return self._is_above(key, db, [9,2,0])
def _get_instance_metrics(self, key, db):
"""Use either COMMON_METRICS or COMMON_METRICS + NEWER_92_METRICS
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
# Extended 9.2+ metrics if needed
metrics = self.instance_metrics.get(key)
if metrics is None:
# Hack to make sure that if we have multiple instances that connect to
# the same host, port, we don't collect metrics twice
# as it will result in https://github.com/DataDog/dd-agent/issues/1211
sub_key = key[:2]
if sub_key in self.db_instance_metrics:
self.instance_metrics[key] = {}
self.log.debug("Not collecting instance metrics for key: {0} as"\
" they are already collected by another instance".format(key))
return {}
self.db_instance_metrics.append(sub_key)
if self._is_9_2_or_above(key, db):
self.instance_metrics[key] = dict(self.COMMON_METRICS, **self.NEWER_92_METRICS)
else:
self.instance_metrics[key] = dict(self.COMMON_METRICS)
metrics = self.instance_metrics.get(key)
return metrics
def _get_bgw_metrics(self, key, db):
"""Use either COMMON_BGW_METRICS or COMMON_BGW_METRICS + NEWER_92_BGW_METRICS
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
# Extended 9.2+ metrics if needed
metrics = self.bgw_metrics.get(key)
if metrics is None:
# Hack to make sure that if we have multiple instances that connect to
# the same host, port, we don't collect metrics twice
# as it will result in https://github.com/DataDog/dd-agent/issues/1211
sub_key = key[:2]
if sub_key in self.db_bgw_metrics:
self.bgw_metrics[key] = {}
self.log.debug("Not collecting bgw metrics for key: {0} as"\
" they are already collected by another instance".format(key))
return {}
self.db_bgw_metrics.append(sub_key)
self.bgw_metrics[key] = dict(self.COMMON_BGW_METRICS)
if self._is_9_1_or_above(key, db):
self.bgw_metrics[key].update(self.NEWER_91_BGW_METRICS)
if self._is_9_2_or_above(key, db):
self.bgw_metrics[key].update(self.NEWER_92_BGW_METRICS)
metrics = self.bgw_metrics.get(key)
return metrics
def _get_replication_metrics(self, key, db):
""" Use either REPLICATION_METRICS_9_1 or REPLICATION_METRICS_9_1 + REPLICATION_METRICS_9_2
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
metrics = self.replication_metrics.get(key)
if self._is_9_1_or_above(key, db) and metrics is None:
self.replication_metrics[key] = dict(self.REPLICATION_METRICS_9_1)
if self._is_9_2_or_above(key, db):
self.replication_metrics[key].update(self.REPLICATION_METRICS_9_2)
metrics = self.replication_metrics.get(key)
return metrics
def _collect_stats(self, key, db, instance_tags, relations, custom_metrics):
"""Query pg_stat_* for various metrics
If relations is not an empty list, gather per-relation metrics
on top of that.
If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml
"""
self.DB_METRICS['metrics'] = self._get_instance_metrics(key, db)
self.BGW_METRICS['metrics'] = self._get_bgw_metrics(key, db)
metric_scope = [
self.DB_METRICS,
self.CONNECTION_METRICS,
self.BGW_METRICS,
self.LOCK_METRICS,
self.COUNT_METRICS
]
# Do we need relation-specific metrics?
if relations:
metric_scope += [
self.REL_METRICS,
self.IDX_METRICS,
self.SIZE_METRICS
]
replication_metrics = self._get_replication_metrics(key, db)
if replication_metrics is not None:
self.REPLICATION_METRICS['metrics'] = replication_metrics
metric_scope.append(self.REPLICATION_METRICS)
full_metric_scope = list(metric_scope) + custom_metrics
try:
cursor = db.cursor()
for scope in full_metric_scope:
if scope == self.REPLICATION_METRICS or not self._is_above(key, db, [9,0,0]):
log_func = self.log.debug
warning_func = self.log.debug
else:
log_func = self.log.warning
warning_func = self.warning
# build query
cols = scope['metrics'].keys() # list of metrics to query, in some order
# we must remember that order to parse results
try:
# if this is a relation-specific query, we need to list all relations last
if scope['relation'] and len(relations) > 0:
query = scope['query'] % (", ".join(cols), "%s") # Keep the last %s intact
self.log.debug("Running query: %s with relations: %s" % (query, relations))
cursor.execute(query, (relations, ))
else:
query = scope['query'] % (", ".join(cols))
self.log.debug("Running query: %s" % query)
cursor.execute(query.replace(r'%', r'%%'))
results = cursor.fetchall()
except ProgrammingError, e:
log_func("Not all metrics may be available: %s" % str(e))
continue
if not results:
continue
if scope in custom_metrics and len(results) > MAX_CUSTOM_RESULTS:
self.warning(
"Query: {0} returned more than {1} results ({2})Truncating").format(
query, MAX_CUSTOM_RESULTS, len(results))
results = results[:MAX_CUSTOM_RESULTS]
if scope == self.DB_METRICS:
self.gauge("postgresql.db.count", len(results),
tags=[t for t in instance_tags if not t.startswith("db:")])
# parse & submit results
# A row should look like this
# (descriptor, descriptor, ..., value, value, value, value, ...)
# with descriptor a PG relation or index name, which we use to create the tags
for row in results:
# turn descriptors into tags
desc = scope['descriptors']
# Check that all columns will be processed
assert len(row) == len(cols) + len(desc)
# Build tags
# descriptors are: (pg_name, dd_tag_name): value
# Special-case the "db" tag, which overrides the one that is passed as instance_tag
# The reason is that pg_stat_database returns all databases regardless of the
# connection.
if not scope['relation']:
tags = [t for t in instance_tags if not t.startswith("db:")]
else:
tags = [t for t in instance_tags]
tags += ["%s:%s" % (d[0][1], d[1]) for d in zip(desc, row[:len(desc)])]
# [(metric-map, value), (metric-map, value), ...]
# metric-map is: (dd_name, "rate"|"gauge")
# shift the results since the first columns will be the "descriptors"
values = zip([scope['metrics'][c] for c in cols], row[len(desc):])
# To submit simply call the function for each value v
# v[0] == (metric_name, submit_function)
# v[1] == the actual value
# tags are
[v[0][1](self, v[0][0], v[1], tags=tags) for v in values]
cursor.close()
except InterfaceError, e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
except socket.error, e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
def _get_service_check_tags(self, host, port, dbname):
service_check_tags = [
"host:%s" % host,
"port:%s" % port,
"db:%s" % dbname
]
return service_check_tags
def get_connection(self, key, host, port, user, password, dbname, use_cached=True):
"Get and memoize connections to instances"
if key in self.dbs and use_cached:
return self.dbs[key]
elif host != "" and user != "":
try:
if host == 'localhost' and password == '':
# Use ident method
connection = pg.connect("user=%s dbname=%s" % (user, dbname))
elif port != '':
connection = pg.connect(host=host, port=port, user=user,
password=password, database=dbname)
else:
connection = pg.connect(host=host, user=user, password=password,
database=dbname)
except Exception as e:
message = u'Error establishing postgres connection: %s' % (str(e))
service_check_tags = self._get_service_check_tags(host, port, dbname)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=message)
raise
else:
if not host:
raise CheckException("Please specify a Postgres host to connect to.")
elif not user:
raise CheckException("Please specify a user to connect to Postgres as.")
self.dbs[key] = connection
return connection
def _process_customer_metrics(self,custom_metrics):
required_parameters = ("descriptors", "metrics", "query", "relation")
for m in custom_metrics:
for param in required_parameters:
if param not in m:
raise CheckException("Missing {0} parameter in custom metric"\
.format(param))
self.log.debug("Metric: {0}".format(m))
for k, v in m['metrics'].items():
if v[1].upper() not in ['RATE','GAUGE','MONOTONIC']:
raise CheckException("Collector method {0} is not known."\
"Known methods are RATE,GAUGE,MONOTONIC".format(
v[1].upper()))
m['metrics'][k][1] = getattr(PostgreSql, v[1].upper())
self.log.debug("Method: %s" % (str(v[1])))
def check(self, instance):
host = instance.get('host', '')
port = instance.get('port', '')
user = instance.get('username', '')
password = instance.get('password', '')
tags = instance.get('tags', [])
dbname = instance.get('dbname', None)
relations = instance.get('relations', [])
custom_metrics = instance.get('custom_metrics') or []
self._process_customer_metrics(custom_metrics)
if relations and not dbname:
self.warning('"dbname" parameter must be set when using the "relations" parameter.')
if dbname is None:
dbname = 'postgres'
key = (host, port, dbname)
# Clean up tags in case there was a None entry in the instance
# e.g. if the yaml contains tags: but no actual tags
if tags is None:
tags = []
else:
tags = list(set(tags))
# preset tags to the database name
tags.extend(["db:%s" % dbname])
self.log.debug("Custom metrics: %s" % custom_metrics)
# preset tags to the database name
db = None
# Collect metrics
try:
# Check version
db = self.get_connection(key, host, port, user, password, dbname)
version = self._get_version(key, db)
self.log.debug("Running check against version %s" % version)
self._collect_stats(key, db, tags, relations, custom_metrics)
except ShouldRestartException:
self.log.info("Resetting the connection")
db = self.get_connection(key, host, port, user, password, dbname, use_cached=False)
self._collect_stats(key, db, tags, relations, custom_metrics)
if db is not None:
service_check_tags = self._get_service_check_tags(host, port, dbname)
message = u'Established connection to postgres://%s:%s/%s' % (host, port, dbname)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags, message=message)
try:
# commit to close the current query transaction
db.commit()
except Exception, e:
self.log.warning("Unable to commit: {0}".format(e))
| |
"""
This module contains tests for tofu.geom in its structured version
"""
# External modules
import os
import itertools as itt
import numpy as np
import matplotlib.pyplot as plt
import warnings as warn
# Importing package tofu.gem
import tofu as tf
from tofu import __version__
import tofu.defaults as tfd
import tofu.utils as tfu
import tofu.geom as tfg
_here = os.path.abspath(os.path.dirname(__file__))
_PATH_DATA = os.path.join(_here, 'test_03_core_data')
VerbHead = 'tofu.geom.test_04_core_optics'
keyVers = 'Vers'
_Exp = 'WEST'
#######################################################
#
# Setup and Teardown
#
#######################################################
def setup_module(module):
print("") # this is to get a newline after the dots
lf = os.listdir(_here)
lf = [
ff for ff in lf
if all([s in ff for s in ['TFG_', _Exp, '.npz']])
]
lF = []
for f in lf:
ff = f.split('_')
v = [fff[len(keyVers):] for fff in ff
if fff[:len(keyVers)] == keyVers]
msg = f + "\n "+str(ff) + "\n " + str(v)
assert len(v) == 1, msg
v = v[0]
if '.npz' in v:
v = v[:v.index('.npz')]
# print(v, __version__)
if v != __version__:
lF.append(f)
if len(lF) > 0:
print("Removing the following previous test files:")
for f in lF:
os.remove(os.path.join(_here, f))
# print("setup_module before anything in this file")
def teardown_module(module):
# os.remove(VesTor.Id.SavePath + VesTor.Id.SaveName + '.npz')
# os.remove(VesLin.Id.SavePath + VesLin.Id.SaveName + '.npz')
# print("teardown_module after everything in this file")
# print("") # this is to get a newline
lf = os.listdir(_here)
lf = [
ff for ff in lf
if all([s in ff for s in ['TFG_', _Exp, '.npz']])
]
lF = []
for f in lf:
ff = f.split('_')
v = [fff[len(keyVers):] for fff in ff
if fff[:len(keyVers)] == keyVers]
msg = f + "\n "+str(ff) + "\n " + str(v)
assert len(v) == 1, msg
v = v[0]
if '.npz' in v:
v = v[:v.index('.npz')]
# print(v, __version__)
if v == __version__:
lF.append(f)
if len(lF) > 0:
print("Removing the following test files:")
for f in lF:
os.remove(os.path.join(_here, f))
#######################################################
#
# Crystal class
#
#######################################################
class Test01_Crystal(object):
@classmethod
def setup_class(cls, verb=False):
# print ("")
# print "--------- "+VerbHead+cls.__name__
# Prepare input
dgeom = {
'Type': 'sph',
'Typeoutline': 'rect',
'summit': np.array([4.6497750e-01, -8.8277925e+00, 3.5125000e-03]),
'center': np.array([1.560921, -6.31106476, 0.00729429]),
'extenthalf': np.array([0.01457195, 0.01821494]),
'rcurve': 2.745,
'move': 'rotate_around_3daxis',
'move_param': 0.022889993139905633,
'move_kwdargs': {
'axis': np.array([
[4.95e-01, -8.95e+00, -8.63e-02],
[-1.37e-04, -2.18e-03, 9.99e-01],
])
}
}
dmat = {
'formula': 'Quartz',
'density': 2.6576,
'symmetry': 'hexagonal',
'lengths': np.array([4.9079e-10, 4.9079e-10, 5.3991e-10]),
'angles': np.array([1.57079633, 1.57079633, 2.0943951]),
'cut': np.array([1, 1, -2, 0]),
'd': 2.4539499999999996e-10,
}
dbragg = {
'lambref': 3.96e-10,
}
dmat1 = dict(dmat)
dmat2 = dict(dmat)
dmat3 = dict(dmat)
# cryst1
cryst1 = tfg.CrystalBragg(
dgeom=dgeom,
dmat=dmat1,
dbragg=dbragg,
Name='Cryst1',
Diag='SpectrX2D',
Exp='WEST',
)
# cryst2
dmat2['alpha'] = 0.
dmat2['beta'] = 0.
cryst2 = tfg.CrystalBragg(
dgeom=dgeom,
dmat=dmat2,
dbragg=dbragg,
Name='Cryst2',
Diag='SpectrX2D',
Exp='WEST',
)
# cryst3
dmat3['alpha'] = (3/60)*np.pi/180
dmat3['beta'] = 0.
cryst3 = tfg.CrystalBragg(
dgeom=dgeom,
dmat=dmat3,
dbragg=dbragg,
Name='Cryst3',
Diag='SpectrX2D',
Exp='WEST',
)
# cryst4
dmat['alpha'] = (3/60)*np.pi/180
dmat['beta'] = np.pi/1000.
cryst4 = tfg.CrystalBragg(
dgeom=dgeom,
dmat=dmat,
dbragg=dbragg,
Name='Cryst4',
Diag='SpectrX2D',
Exp='WEST',
)
cls.dobj = {
'cryst1': cryst1,
'cryst2': cryst2,
'cryst3': cryst3,
'cryst4': cryst4,
}
cls.xi = 0.05*np.linspace(-1, 1, 100)
cls.xj = 0.10*np.linspace(-1, 1, 200)
@classmethod
def teardown_class(cls):
pass
def setup(self):
# print ("TestUM:setup() before each test method")
pass
def teardown(self):
# print ("TestUM:teardown() after each test method")
pass
# def test00_todo(self):
# pass
def test01_todict(self):
for k0 in self.dobj.keys():
dd = self.dobj[k0].to_dict()
assert type(dd) is dict
def test02_fromdict(self):
for k0 in self.dobj.keys():
dd = self.dobj[k0].to_dict()
obj = tfg.CrystalBragg(fromdict=dd)
assert isinstance(obj, self.dobj[k0].__class__)
def test03_copy_equal(self):
for k0 in self.dobj.keys():
obj = self.dobj[k0].copy()
assert obj == self.dobj[k0]
assert not obj != self.dobj[k0]
def test04_get_nbytes(self):
for k0 in self.dobj.keys():
nb, dnb = self.dobj[k0].get_nbytes()
def test05_strip_nbytes(self, verb=False):
lok = tfg.CrystalBragg._dstrip['allowed']
nb = np.full((len(lok),), np.nan)
for k0, obj in self.dobj.items():
for ii in lok:
obj.strip(ii)
nb[ii] = obj.get_nbytes()[0]
assert np.all(np.diff(nb) <= 0.)
for ii in lok[::-1]:
obj.strip(ii)
def test06_set_move_None(self):
pass
def test07_rotate_copy(self):
pass
def test08_get_detector_approx(self):
for k0, obj in self.dobj.items():
det0 = obj.get_detector_approx(use_non_parallelism=False)
det1 = obj.get_detector_approx(use_non_parallelism=True)
assert isinstance(det0, dict) and isinstance(det0, dict)
lk = ['nout', 'ei']
assert all([ss in det0.keys() for ss in lk])
assert all([ss in det1.keys() for ss in lk])
if k0 in ['cryst1', 'cryst2']:
assert all([
np.allclose(det0[kk], det1[kk])
for kk in lk
])
elif k0 in ['cryst3', 'cryst4']:
assert not any([
np.allclose(det0[kk], det1[kk])
for kk in lk
])
for k1, v1 in det0.items():
assert np.linalg.norm(v1 - det1[k1]) <= 0.01
def test09_plot(self):
ii = 0
for k0, obj in self.dobj.items():
det = obj.get_detector_approx()
det['outline'] = np.array([
0.1*np.r_[-1, 1, 1, -1, -1],
0.1*np.r_[-1, -1, 1, 1, -1],
])
pts, vect = obj.get_rays_from_cryst(
phi=np.pi, returnas='(pts, vect)',
)
dist = obj.get_rowland_dist_from_lambbragg()
pts = pts + dist*np.r_[0.5, 1., 2][None, :]*vect[:, 0:1, 0]
lamb = obj.dbragg['lambref'] + np.r_[-1, 0, 1, 2]*1-12
dax = obj.plot(
pts=pts,
lamb=lamb,
det=det,
rays_color='pts' if ii % 2 == 0 else 'lamb',
)
ii += 1
plt.close('all')
def test10_get_lamb_avail_from_pts(self):
for k0, obj in self.dobj.items():
det = obj.get_detector_approx()
pts, vect = obj.get_rays_from_cryst(
phi=-9*np.pi/10., returnas='(pts, vect)',
)
dist = obj.get_rowland_dist_from_lambbragg()
pts = pts + dist*np.r_[0.5, 1., 2][None, :]*vect[:, :, 0]
lamb, phi, dtheta, psi, xi, xj = obj.get_lamb_avail_from_pts(
pts=pts, det=det,
)
pts = pts + np.r_[7.5][None, :]*vect[:, :, 0]
lamb, phi, dtheta, psi, xi, xj = obj.get_lamb_avail_from_pts(
pts=pts, det=det,
)
conf = tf.load_config('WEST-V0')
pts, dv, ind, res_eff = conf.Ves.V1.get_sampleV(
res=0.3,
domain=[None, None, [-np.pi, -np.pi/2.]],
)
lamb, phi, dtheta, psi, xi, xj = obj.get_lamb_avail_from_pts(
pts=pts, det=det, strict=True,
)
def test11_calc_johann_error(self):
for k0, obj in self.dobj.items():
det = obj.get_detector_approx()
err_lamb, err_phi, _, _, _ = obj.calc_johannerror(
xi=self.xi,
xj=self.xj,
det=det,
)
def test12_plot_line_on_det_tracing(self):
for k0, obj in self.dobj.items():
det = obj.get_detector_approx()
det['outline'] = np.array([
0.1*np.r_[-1, 1, 1, -1, -1],
0.1*np.r_[-1, -1, 1, 1, -1],
])
dax = obj.plot_line_on_det_tracing(det=det)
def test13_calc_meridional_sagital_focus(self):
derr = {}
for k0, obj in self.dobj.items():
out = obj.calc_meridional_sagital_focus(
use_non_parallelism=False,
verb=False,
)
c0 = round(out[0], ndigits=12) == round(out[2], ndigits=12)
c1 = round(out[1], ndigits=12) == round(out[3], ndigits=12)
if not c0:
derr[k0] = f'Meridional ({out[0]} vs {out[2]})'
if not c1:
derr[k0] += f' + Sagital ({out[1]} vs {out[3]})'
derr[k0] += ' focus wrong'
elif not c1:
derr[k0] = f'Sagital ({out[1]} vs {out[3]}) focus wrong'
if obj.dmat['alpha'] != 0.0:
out = obj.calc_meridional_sagital_focus(
use_non_parallelism=True,
verb=False,
)
c0 = round(out[0], ndigits=12) != round(out[2], ndigits=12)
c1 = round(out[1], ndigits=12) != round(out[3], ndigits=12)
if not c0:
derr[k0] = f'Meridional ({out[0]} vs {out[2]})'
if not c1:
derr[k0] += f' + Sagital ({out[1]} vs {out[3]})'
derr[k0] += ' focus wrong'
elif not c1:
derr[k0] = f'Sagital ({out[1]} vs {out[3]}) focus wrong'
if len(derr) > 0:
lstr = [f'\t- {k0}: {v0}' for k0, v0 in derr.items()]
msg = (
"The following crystals have wrong focus:\n"
+ "\n".join(lstr)
)
raise Exception(msg)
def test14_plot_focal_error_summed(self):
det = dict(np.load(
os.path.join(_PATH_DATA, 'det37_CTVD_incC4_New.npz'),
allow_pickle=True,
))
for k0, obj in self.dobj.items():
out = obj.plot_focal_error_summed(
dist_min=-0.02, dist_max=0.02, ndist=5,
di_min=-0.02, di_max=0.02, ndi=5,
xi=self.xi[::20], xj=self.xj[::20],
use_non_parallelism=False,
det_ref=det,
plot_dets=True,
)
plt.close('all')
def test15_split(self):
for ii, (k0, obj) in enumerate(self.dobj.items()):
direction = None if ii == 0 else ('e1' if ii % 2 == 0 else 'e2')
nb = None if ii == 0 else (2 if ii % 2 == 0 else 3)
lcryst = obj.split(direction=direction, nb=nb)
def test16_saveload(self, verb=False):
for k0, obj in self.dobj.items():
obj.strip(-1)
pfe = obj.save(verb=verb, return_pfe=True)
obj2 = tf.load(pfe, verb=verb)
msg = "Unequal saved / loaded objects !"
assert obj2 == obj, msg
# Just to check the loaded version works fine
obj2.strip(0)
os.remove(pfe)
| |
#!/usr/bin/env python2.7
from PyQt4 import QtCore, QtGui
class NewAddressWizardIntroPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Creating a new address")
label = QtGui.QLabel("This wizard will help you create as many addresses as you like. Indeed, creating and abandoning addresses is encouraged.\n\n"
"What type of address would you like? Would you like to send emails or not?\n"
"You can still change your mind later, and register/unregister with an email service provider.\n\n")
label.setWordWrap(True)
self.emailAsWell = QtGui.QRadioButton("Combined email and bitmessage address")
self.onlyBM = QtGui.QRadioButton("Bitmessage-only address (no email)")
self.emailAsWell.setChecked(True)
self.registerField("emailAsWell", self.emailAsWell)
self.registerField("onlyBM", self.onlyBM)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(self.emailAsWell)
layout.addWidget(self.onlyBM)
self.setLayout(layout)
def nextId(self):
if self.emailAsWell.isChecked():
return 4
else:
return 1
class NewAddressWizardRngPassphrasePage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Random or Passphrase")
label = QtGui.QLabel("<html><head/><body><p>You may generate addresses by using either random numbers or by using a passphrase. "
"If you use a passphrase, the address is called a "deterministic" address. "
"The \'Random Number\' option is selected by default but deterministic addresses have several pros and cons:</p>"
"<table border=0><tr><td><span style=\" font-weight:600;\">Pros:</span></td><td><span style=\" font-weight:600;\">Cons:</span></td></tr>"
"<tr><td>You can recreate your addresses on any computer from memory. "
"You need-not worry about backing up your keys.dat file as long as you can remember your passphrase.</td>"
"<td>You must remember (or write down) your passphrase if you expect to be able "
"to recreate your keys if they are lost. "
# "You must remember the address version number and the stream number along with your passphrase. "
"If you choose a weak passphrase and someone on the Internet can brute-force it, they can read your messages and send messages as you."
"</p></body></html>")
label.setWordWrap(True)
self.randomAddress = QtGui.QRadioButton("Use a random number generator to make an address")
self.deterministicAddress = QtGui.QRadioButton("Use a passphrase to make an address")
self.randomAddress.setChecked(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(self.randomAddress)
layout.addWidget(self.deterministicAddress)
self.setLayout(layout)
def nextId(self):
if self.randomAddress.isChecked():
return 2
else:
return 3
class NewAddressWizardRandomPage(QtGui.QWizardPage):
def __init__(self, addresses):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Random")
label = QtGui.QLabel("Random address.")
label.setWordWrap(True)
labelLabel = QtGui.QLabel("Label (not shown to anyone except you):")
self.labelLineEdit = QtGui.QLineEdit()
self.radioButtonMostAvailable = QtGui.QRadioButton("Use the most available stream\n"
"(best if this is the first of many addresses you will create)")
self.radioButtonExisting = QtGui.QRadioButton("Use the same stream as an existing address\n"
"(saves you some bandwidth and processing power)")
self.radioButtonMostAvailable.setChecked(True)
self.comboBoxExisting = QtGui.QComboBox()
self.comboBoxExisting.setEnabled(False)
self.comboBoxExisting.setEditable(True)
for address in addresses:
self.comboBoxExisting.addItem(address)
# self.comboBoxExisting.setObjectName(_fromUtf8("comboBoxExisting"))
self.checkBoxEighteenByteRipe = QtGui.QCheckBox("Spend several minutes of extra computing time to make the address(es) 1 or 2 characters shorter")
layout = QtGui.QGridLayout()
layout.addWidget(label, 0, 0)
layout.addWidget(labelLabel, 1, 0)
layout.addWidget(self.labelLineEdit, 2, 0)
layout.addWidget(self.radioButtonMostAvailable, 3, 0)
layout.addWidget(self.radioButtonExisting, 4, 0)
layout.addWidget(self.comboBoxExisting, 5, 0)
layout.addWidget(self.checkBoxEighteenByteRipe, 6, 0)
self.setLayout(layout)
QtCore.QObject.connect(self.radioButtonExisting, QtCore.SIGNAL("toggled(bool)"), self.comboBoxExisting.setEnabled)
self.registerField("label", self.labelLineEdit)
self.registerField("radioButtonMostAvailable", self.radioButtonMostAvailable)
self.registerField("radioButtonExisting", self.radioButtonExisting)
self.registerField("comboBoxExisting", self.comboBoxExisting)
# self.emailAsWell = QtGui.QRadioButton("Combined email and bitmessage account")
# self.onlyBM = QtGui.QRadioButton("Bitmessage-only account (no email)")
# self.emailAsWell.setChecked(True)
def nextId(self):
return 6
class NewAddressWizardPassphrasePage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Passphrase")
label = QtGui.QLabel("Deterministric address.")
label.setWordWrap(True)
passphraseLabel = QtGui.QLabel("Passphrase")
self.lineEditPassphrase = QtGui.QLineEdit()
self.lineEditPassphrase.setEchoMode(QtGui.QLineEdit.Password)
self.lineEditPassphrase.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText)
retypePassphraseLabel = QtGui.QLabel("Retype passphrase")
self.lineEditPassphraseAgain = QtGui.QLineEdit()
self.lineEditPassphraseAgain.setEchoMode(QtGui.QLineEdit.Password)
numberLabel = QtGui.QLabel("Number of addresses to make based on your passphrase:")
self.spinBoxNumberOfAddressesToMake = QtGui.QSpinBox()
self.spinBoxNumberOfAddressesToMake.setMinimum(1)
self.spinBoxNumberOfAddressesToMake.setProperty("value", 8)
# self.spinBoxNumberOfAddressesToMake.setObjectName(_fromUtf8("spinBoxNumberOfAddressesToMake"))
label2 = QtGui.QLabel("In addition to your passphrase, you must remember these numbers:")
label3 = QtGui.QLabel("Address version number: 4")
label4 = QtGui.QLabel("Stream number: 1")
layout = QtGui.QGridLayout()
layout.addWidget(label, 0, 0, 1, 4)
layout.addWidget(passphraseLabel, 1, 0, 1, 4)
layout.addWidget(self.lineEditPassphrase, 2, 0, 1, 4)
layout.addWidget(retypePassphraseLabel, 3, 0, 1, 4)
layout.addWidget(self.lineEditPassphraseAgain, 4, 0, 1, 4)
layout.addWidget(numberLabel, 5, 0, 1, 3)
layout.addWidget(self.spinBoxNumberOfAddressesToMake, 5, 3)
layout.setColumnMinimumWidth(3, 1)
layout.addWidget(label2, 6, 0, 1, 4)
layout.addWidget(label3, 7, 0, 1, 2)
layout.addWidget(label4, 7, 2, 1, 2)
self.setLayout(layout)
def nextId(self):
return 6
class NewAddressWizardEmailProviderPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Choose email provider")
label = QtGui.QLabel("Currently only Mailchuck email gateway is available "
"(@mailchuck.com email address). In the future, maybe other gateways will be available. "
"Press Next.")
label.setWordWrap(True)
# self.mailchuck = QtGui.QRadioButton("Mailchuck email gateway (@mailchuck.com)")
# self.mailchuck.setChecked(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
# layout.addWidget(self.mailchuck)
self.setLayout(layout)
def nextId(self):
return 5
class NewAddressWizardEmailAddressPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Email address")
label = QtGui.QLabel("Choosing an email address. Address must end with @mailchuck.com")
label.setWordWrap(True)
self.specificEmail = QtGui.QRadioButton("Pick your own email address:")
self.specificEmail.setChecked(True)
self.emailLineEdit = QtGui.QLineEdit()
self.randomEmail = QtGui.QRadioButton("Generate a random email address")
QtCore.QObject.connect(self.specificEmail, QtCore.SIGNAL("toggled(bool)"), self.emailLineEdit.setEnabled)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(self.specificEmail)
layout.addWidget(self.emailLineEdit)
layout.addWidget(self.randomEmail)
self.setLayout(layout)
def nextId(self):
return 6
class NewAddressWizardWaitPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Wait")
self.label = QtGui.QLabel("Wait!")
self.label.setWordWrap(True)
self.progressBar = QtGui.QProgressBar()
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(100)
self.progressBar.setValue(0)
# self.emailAsWell = QtGui.QRadioButton("Combined email and bitmessage account")
# self.onlyBM = QtGui.QRadioButton("Bitmessage-only account (no email)")
# self.emailAsWell.setChecked(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.progressBar)
# layout.addWidget(self.emailAsWell)
# layout.addWidget(self.onlyBM)
self.setLayout(layout)
def update(self, i):
if i == 101 and self.wizard().currentId() == 6:
self.wizard().button(QtGui.QWizard.NextButton).click()
return
elif i == 101:
print "haha"
return
self.progressBar.setValue(i)
if i == 50:
self.emit(QtCore.SIGNAL('completeChanged()'))
def isComplete(self):
# print "val = " + str(self.progressBar.value())
if self.progressBar.value() >= 50:
return True
else:
return False
def initializePage(self):
if self.field("emailAsWell").toBool():
val = "yes/"
else:
val = "no/"
if self.field("onlyBM").toBool():
val += "yes"
else:
val += "no"
self.label.setText("Wait! " + val)
# self.wizard().button(QtGui.QWizard.NextButton).setEnabled(False)
self.progressBar.setValue(0)
self.thread = NewAddressThread()
self.connect(self.thread, self.thread.signal, self.update)
self.thread.start()
def nextId(self):
return 10
class NewAddressWizardConclusionPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("All done!")
label = QtGui.QLabel("You successfully created a new address.")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
class Ui_NewAddressWizard(QtGui.QWizard):
def __init__(self, addresses):
super(QtGui.QWizard, self).__init__()
self.pages = {}
page = NewAddressWizardIntroPage()
self.setPage(0, page)
self.setStartId(0)
page = NewAddressWizardRngPassphrasePage()
self.setPage(1, page)
page = NewAddressWizardRandomPage(addresses)
self.setPage(2, page)
page = NewAddressWizardPassphrasePage()
self.setPage(3, page)
page = NewAddressWizardEmailProviderPage()
self.setPage(4, page)
page = NewAddressWizardEmailAddressPage()
self.setPage(5, page)
page = NewAddressWizardWaitPage()
self.setPage(6, page)
page = NewAddressWizardConclusionPage()
self.setPage(10, page)
self.setWindowTitle("New address wizard")
self.adjustSize()
self.show()
class NewAddressThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.signal = QtCore.SIGNAL("signal")
def __del__(self):
self.wait()
def createDeterministic(self):
pass
def createPassphrase(self):
pass
def broadcastAddress(self):
pass
def registerMailchuck(self):
pass
def waitRegistration(self):
pass
def run(self):
import time
for i in range(1, 101):
time.sleep(0.1) # artificial time delay
self.emit(self.signal, i)
self.emit(self.signal, 101)
# self.terminate()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
wizard = Ui_NewAddressWizard(["a", "b", "c", "d"])
if (wizard.exec_()):
print "Email: " + ("yes" if wizard.field("emailAsWell").toBool() else "no")
print "BM: " + ("yes" if wizard.field("onlyBM").toBool() else "no")
else:
print "Wizard cancelled"
sys.exit()
| |
from __future__ import division, print_function
""" gotetra.py provides functions for interacting with gotetra output files.
When run as a script it will print the header information of the target
gotetra output file to stdout.
Supported functions:
read_header(filename) -> gotetra.Header
read_grid(filename) -> numpy.ndarray
Supported classes:
Header
TypeInfo
CosmoInfo
RenderInfo
LocationInfo
VelocityInfo
"""
import array
import struct
import sys
import numpy as np
DENSITY = 0
DENSITY_GRADIENT = 1
VELOCITY = 2
VELOCITY_DIVERGENCE = 3
VELOCITY_CURL = 4
class Sizes(object):
def __init__(self, ver):
if ver == 1:
self.header = 232
self.type_info = 24
self.cosmo_info = 64
self.render_info = 40
self.location_info = 104
elif ver == 2:
self.header = 336
self.type_info = 24
self.cosmo_info = 64
self.render_info = 40
self.location_info = 104
self.velocity_info = 104
else:
print("Unrecognized gotetra output version, %d." % ver)
exit(1)
def _read_endianness_version(s):
flag = struct.unpack("q", s)
ver = flag[0] & 0xffffffff
end = -1 if ver >> 31 != 0 else 0
ver = 1 + (~ver & 0x00000000ffffffff) if end else ver
return end, ver
def _read_endianness_flag(s):
return _read_endianness_version(s)[0]
def _read_version(s):
return _read_endianness_version(s)[1]
def read_header(filename):
""" read_header returns the header information at the top of a gotetra
output file as a Header object.
"""
with open(filename, "r") as fp:
flag_s = fp.read(8)
ver = _read_version(flag_s)
sizes = Sizes(ver)
s = fp.read(sizes.header)
return Header(flag_s + s, sizes)
def read_grid(filename):
""" read_grid returns the grid data stored in a gotetra output file as a
3D numpy array if the file represents a scalar field or as a 3-tuple of
3D numpy arrays if the file represents a vector field.
The numpy arrays use a C-like element order, meaning the last index
corresponds to the x-coordinate.
"""
hd = read_header(filename)
def maybe_swap(xs):
endianness = sys.byteorder
if endianness == "little" and hd.type.endianness_flag == -1:
return
elif endianness == "big" and hd.type.endianness_flag == 0:
return
xs.byteswap()
n = 1
for i in range(3):
if i != hd.axis: n *= hd.dim[i]
if hd.axis == 0: j, k = 1, 2
if hd.axis == 1: j, k = 0, 2
if hd.axis == 2: j, k = 0, 1
if hd.type.is_vector_grid:
xs, ys, zs = array.array("f"), array.array("f"), array.array("f")
with open(filename, "rb") as fp:
fp.read(hd.sizes.header + 8)
xs.fromfile(fp, n)
ys.fromfile(fp, n)
zs.fromfile(fp, n)
maybe_swap(xs)
maybe_swap(ys)
maybe_swap(zs)
if hd.axis == -1:
xs = np.reshape(xs, (hd.dim[2], hd.dim[1], hd.dim[0]))
ys = np.reshape(ys, (hd.dim[2], hd.dim[1], hd.dim[0]))
zs = np.reshape(zs, (hd.dim[2], hd.dim[1], hd.dim[0]))
else:
xs = np.reshape(xs, (hd.dim[k], hd.dim[j]))
ys = np.reshape(ys, (hd.dim[k], hd.dim[j]))
zs = np.reshape(zs, (hd.dim[k], hd.dim[j]))
return np.array([xs, ys, zs])
else:
xs = array.array("f")
with open(filename, "rb") as fp:
fp.read(hd.sizes.header + 8)
xs.fromfile(fp, n)
maybe_swap(xs)
if hd.axis == -1:
xs = np.reshape(xs, (hd.dim[2], hd.dim[1], hd.dim[0]))
else:
xs = np.reshape(xs, (hd.dim[k], hd.dim[j]))
return xs
class Header(object):
""" Header contains header information from a gotetra header file. It
contains the fields:
type : TypeInfo
cosmo : CosmoInfo
render : RenderInfo
loc : LocationInfo
vel : VelocityInfo - Only included in phase-space grids.
These contain many fields, but the three most useful are reproduced as
top-level fields:
dim : np.array - The dimensions of the grid in pixels. Equivalent to
Header.loc.pixel_span.
pw : float - The width of a single pixel. Equivalent to
Header.loc.pixel_width.
axis : int - The axis over which the image is projected over. If
no projection was performed and the array is 3D,
this will be set to -1.
(These are the only fields which are truly neccessary to form images. The
others can be learned as needed.)
"""
def __init__(self, s, sizes):
end = _read_endianness_flag(s[0:8])
self.version = _read_version(s[0:8])
ver = self.version
self.sizes = sizes
assert len(s) == self.sizes.header + 8
type_start = 8
type_end = self.sizes.type_info + type_start
cosmo_start = type_end
cosmo_end = cosmo_start + self.sizes.cosmo_info
render_start = cosmo_end
render_end = render_start + self.sizes.render_info
loc_start = render_end
loc_end = render_end + self.sizes.location_info
if ver >= 2:
vel_start = loc_end
vel_end = vel_start + self.sizes.velocity_info
if ver == 1:
assert(loc_end == self.sizes.header + 8)
elif ver >= 2:
assert(vel_end == self.sizes.header + 8)
self.type = TypeInfo(s[type_start: type_end], end)
if self.type.header_size != 8 + sizes.header:
print(
"Header size %d is different from expected, %d." %
(self.type.header_size, sizes.header + 8)
)
self.cosmo = CosmoInfo(s[cosmo_start: cosmo_end], end)
self.render = RenderInfo(s[render_start: render_end], end)
self.loc = LocationInfo(s[loc_start: loc_end], end)
if ver >= 2:
pass
self.dim = self.loc.pixel_span
self.pw = self.loc.pixel_width
self.axis = self.render.projection_axis
def __str__(self):
return "\n".join([
"TypeInfo:", str(self.type), "",
"CosmoInfo:", str(self.cosmo), "",
"RenderInfo:", str(self.render), "",
"LocationInfo:", str(self.loc),
])
def little_endian(end): return end == -1
def endian_unpack(fmt, s, end):
fmt = "<" + fmt if little_endian(end) else ">" + fmt
return struct.unpack(fmt, s)
class TypeInfo(object):
""" TypeInfo contains system information about the file. This is primarily
useful for systems purposes. Its fields are:
endianness_flag : int - Flag indicating the endianness of the input
file's endianness.
header_size : int - Number of bytes in the input file's header.
grid_type : int - A flag indicating the type of the
information stored in the file.
is_vector_grid : bool - Flag indicating whether the file is a
vector field or a scalar field.
"""
def __init__(self, s, end):
self.endianness_flag = end
fmt = "qqq"
data = endian_unpack(fmt, s, self.endianness_flag)
self.header_size = data[0]
self.grid_type = data[1]
self.is_vector_grid = not (self.grid_type == DENSITY or
self.grid_type == VELOCITY_DIVERGENCE)
def __str__(self):
return "\n".join([
" endianness_flag = %s" % self.endianness_str(),
" header_size = %d" % self.header_size,
" grid_type = %s" % self.grid_type_str(),
" is_vector_grid = %r" % self.is_vector_grid,
])
def endianness_str(self):
if little_endian(self.endianness_flag):
return "Little Endian"
else:
return "Big Endian"
def grid_type_str(self):
if self.grid_type == DENSITY:
return "Density"
elif self.grid_type == DENSITY_GRADIENT:
return "Density Gradient"
elif self.grid_type == VELOCITY:
return "Velocity"
elif self.grid_type == VELOCITY_DIVERGENCE:
return "Velocity Divergence"
elif self.grid_type == VELOCITY_CURL:
return "Velocity Curl"
class CosmoInfo(object):
""" CosmoInfo contains information about the data file's underlying
cosmology, as well as other useful physical information. Its fields are:
redshift : float
scale_factor : float
omega_m : float
omega_l : float
h0 : float - units are (km / s) / Mpc
rho_mean : float - units are (M_sun / h) / (Mpc / h)^3
rho_critical : float - units are (M_sun / h) / (Mpc / h)^3
box_width : float - units are Mpc / h
"""
def __init__(self, s, end):
fmt = "d" * 8
data = endian_unpack(fmt, s, end)
self.redshift = data[0]
self.scale_factor = data[1]
self.omega_m = data[2]
self.omega_l = data[3]
self.h0 = data[4]
self.rho_mean = data[5]
self.rho_critical = data[6]
self.box_width = data[7]
def __str__(self):
return "\n".join([
" redshift = %.4g" % self.redshift,
" scale_factor = %.4g" % self.scale_factor,
" omega_m = %.4g" % self.omega_m,
" omega_l = %.4g" % self.omega_l,
" h0 = %.4g" % self.h0,
" rho_mean = %.4g" % self.rho_mean,
" rho_critical = %.4g" % self.rho_critical,
" box_width = %.4g" % self.box_width,
])
class RenderInfo(object):
""" RenderInfo contains information about the free parameters used to fine
tune gotetra when rendering the density distribution. Its fields are
particles : int - Number of particles per tetrahedron.
total_pixels : int - Number of pixels required to render to a
single side of the sim box. Equal to
box_width / pixel_width.
subsample_length : int - Level of subsampling used. 1 indicates no
subsampling.
min_projection_depth : int - Suggested minimum layers to use when
forming a projected image. WARNING: This
feature is totally broken.
projection_axis : int - If the image is projected along an axis,
the index of that axis. Otherwise this
will be set to -1.
"""
def __init__(self, s, end):
fmt = "qqqqq"
data = endian_unpack(fmt, s, end)
self.particles = data[0]
self.total_pixels = data[1]
self.subsample_length = data[2]
self.min_projection_depth = data[3]
self.projection_axis = data[4]
def __str__(self):
return "\n".join([
" particles = %d" % self.particles,
" total_pixels = %d" % self.total_pixels,
" subsample_length = %d" % self.subsample_length,
" min_projection_depth = %d" % self.min_projection_depth,
" projection_axis = %s" % self._projection_axis_str(),
])
def _projection_axis_str(self):
if self.projection_axis == -1:
return "None"
elif self.projection_axis == 0:
return "X"
elif self.projection_axis == 1:
return "Y"
elif self.projection_axis == 2:
return "Z"
else:
assert(0)
class LocationInfo(object):
""" LocationInfo contains information on the dimensions and location of the
grid. Its fields are:
origin : float numpy.array - Vector to the bottommost cornder of
the grid. Units are Mpc / h.
span : float numpy.array - Physical dimensions of box. Units are
Mpc / h.
pixel_origin : int numpy.array - Vector to the bottommost corner of
the grid. Units are pixels.
pixel_span : int numpy.array - Dimensions of box. Units are pixels.
pixel_width : float - The width of a single pixel. Units are
Mpc / h.
"""
def __init__(self, s, end):
fmt = ("d" * 6) + ("q" * 6) + "d"
data = endian_unpack(fmt, s, end)
self.origin = np.array([data[0], data[1], data[2]])
self.span = np.array([data[3], data[4], data[5]])
self.pixel_origin = np.array([data[6], data[7], data[8]])
self.pixel_span = np.array([data[9], data[10], data[11]])
self.pixel_width = data[12]
def __str__(self):
return "\n".join([
(" origin = [%.4g, %.4g, %.4g]" %
(self.origin[0], self.origin[1], self.origin[2])),
(" span = [%.4g, %.4g, %.4g]" %
(self.span[0], self.span[1], self.span[2])),
(" pixel_origin = [%d, %d, %d]" %
(self.pixel_origin[0],self.pixel_origin[1],self.pixel_origin[2])),
(" pixel_span = [%d, %d, %d]" %
(self.pixel_span[0], self.pixel_span[1], self.pixel_span[2])),
" pixel_width = %.4g" % self.pixel_width,
])
if __name__ == "__main__":
if len(sys.argv) < 2:
print("%s requires a target file" % (sys.argv[0]))
exit(1)
print(read_header(sys.argv[1]))
| |
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.db import reset_queries
from django.db.models.options import Options
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils import six
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class TestContextDecorator(object):
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Acts as either a decorator or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
super(override_settings, self).__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
class override_system_checks(TestContextDecorator):
"""
Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super(override_system_checks, self).__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = self.new_checks
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = self.deployment_checks
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison. Leading and trailing whitespace is ignored on both chunks.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")
def is_quoted_unicode(s):
s = s.strip()
return len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super(ignore_warnings, self).__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
@contextmanager
def patch_logger(logger_name, log_level, log_kwargs=False):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
call = msg % args
calls.append((call, kwargs) if log_kwargs else call)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class override_script_prefix(TestContextDecorator):
"""
Decorator or context manager to temporary override the script prefix.
"""
def __init__(self, prefix):
self.prefix = prefix
super(override_script_prefix, self).__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super(isolate_apps, self).__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
def tag(*tags):
"""
Decorator to add tags to a test class or method.
"""
def decorator(obj):
setattr(obj, 'tags', set(tags))
return obj
return decorator
| |
"""
Matrix Market I/O in Python.
See http://math.nist.gov/MatrixMarket/formats.html
for information about the Matrix Market format.
"""
#
# Author: Pearu Peterson <pearu@cens.ioc.ee>
# Created: October, 2004
#
# References:
# http://math.nist.gov/MatrixMarket/
#
from __future__ import division, print_function, absolute_import
import os
import sys
from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate,
ones, can_cast)
from numpy.compat import asbytes, asstr
from scipy.sparse import coo_matrix, isspmatrix
__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile']
# -----------------------------------------------------------------------------
def mminfo(source):
"""
Return size and storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
return MMFile.info(source)
# -----------------------------------------------------------------------------
def mmread(source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file-like object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
"""
return MMFile().read(source)
# -----------------------------------------------------------------------------
def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None):
"""
Writes the sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2-D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
"""
MMFile().write(target, a, comment, field, precision, symmetry)
###############################################################################
class MMFile (object):
__slots__ = ('_rows',
'_cols',
'_entries',
'_format',
'_field',
'_symmetry')
@property
def rows(self):
return self._rows
@property
def cols(self):
return self._cols
@property
def entries(self):
return self._entries
@property
def format(self):
return self._format
@property
def field(self):
return self._field
@property
def symmetry(self):
return self._symmetry
@property
def has_symmetry(self):
return self._symmetry in (self.SYMMETRY_SYMMETRIC,
self.SYMMETRY_SKEW_SYMMETRIC,
self.SYMMETRY_HERMITIAN)
# format values
FORMAT_COORDINATE = 'coordinate'
FORMAT_ARRAY = 'array'
FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
@classmethod
def _validate_format(self, format):
if format not in self.FORMAT_VALUES:
raise ValueError('unknown format type %s, must be one of %s' %
(format, self.FORMAT_VALUES))
# field values
FIELD_INTEGER = 'integer'
FIELD_UNSIGNED = 'unsigned-integer'
FIELD_REAL = 'real'
FIELD_COMPLEX = 'complex'
FIELD_PATTERN = 'pattern'
FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN)
@classmethod
def _validate_field(self, field):
if field not in self.FIELD_VALUES:
raise ValueError('unknown field type %s, must be one of %s' %
(field, self.FIELD_VALUES))
# symmetry values
SYMMETRY_GENERAL = 'general'
SYMMETRY_SYMMETRIC = 'symmetric'
SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
SYMMETRY_HERMITIAN = 'hermitian'
SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
@classmethod
def _validate_symmetry(self, symmetry):
if symmetry not in self.SYMMETRY_VALUES:
raise ValueError('unknown symmetry type %s, must be one of %s' %
(symmetry, self.SYMMETRY_VALUES))
DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp',
FIELD_UNSIGNED: 'uint64',
FIELD_REAL: 'd',
FIELD_COMPLEX: 'D',
FIELD_PATTERN: 'd'}
# -------------------------------------------------------------------------
@staticmethod
def reader():
pass
# -------------------------------------------------------------------------
@staticmethod
def writer():
pass
# -------------------------------------------------------------------------
@classmethod
def info(self, source):
"""
Return size, storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
stream, close_it = self._open(source)
try:
# read and validate header line
line = stream.readline()
mmid, matrix, format, field, symmetry = \
[asstr(part.strip()) for part in line.split()]
if not mmid.startswith('%%MatrixMarket'):
raise ValueError('source is not in Matrix Market format')
if not matrix.lower() == 'matrix':
raise ValueError("Problem reading file header: " + line)
# http://math.nist.gov/MatrixMarket/formats.html
if format.lower() == 'array':
format = self.FORMAT_ARRAY
elif format.lower() == 'coordinate':
format = self.FORMAT_COORDINATE
# skip comments
# line.startswith('%')
while line and line[0] in ['%', 37]:
line = stream.readline()
# skip empty lines
while not line.strip():
line = stream.readline()
line = line.split()
if format == self.FORMAT_ARRAY:
if not len(line) == 2:
raise ValueError("Header line not of length 2: " + line)
rows, cols = map(int, line)
entries = rows * cols
else:
if not len(line) == 3:
raise ValueError("Header line not of length 3: " + line)
rows, cols, entries = map(int, line)
return (rows, cols, entries, format, field.lower(),
symmetry.lower())
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
@staticmethod
def _open(filespec, mode='rb'):
""" Return an open file stream for reading based on source.
If source is a file name, open it (after trying to find it with mtx and
gzipped mtx extensions). Otherwise, just return source.
Parameters
----------
filespec : str or file-like
String giving file name or file-like object
mode : str, optional
Mode with which to open file, if `filespec` is a file name.
Returns
-------
fobj : file-like
Open file-like object.
close_it : bool
True if the calling function should close this file when done,
false otherwise.
"""
close_it = False
if isinstance(filespec, str):
close_it = True
# open for reading
if mode[0] == 'r':
# determine filename plus extension
if not os.path.isfile(filespec):
if os.path.isfile(filespec+'.mtx'):
filespec = filespec + '.mtx'
elif os.path.isfile(filespec+'.mtx.gz'):
filespec = filespec + '.mtx.gz'
elif os.path.isfile(filespec+'.mtx.bz2'):
filespec = filespec + '.mtx.bz2'
# open filename
if filespec.endswith('.gz'):
import gzip
stream = gzip.open(filespec, mode)
elif filespec.endswith('.bz2'):
import bz2
stream = bz2.BZ2File(filespec, 'rb')
else:
stream = open(filespec, mode)
# open for writing
else:
if filespec[-4:] != '.mtx':
filespec = filespec + '.mtx'
stream = open(filespec, mode)
else:
stream = filespec
return stream, close_it
# -------------------------------------------------------------------------
@staticmethod
def _get_symmetry(a):
m, n = a.shape
if m != n:
return MMFile.SYMMETRY_GENERAL
issymm = True
isskew = True
isherm = a.dtype.char in 'FD'
# sparse input
if isspmatrix(a):
# check if number of nonzero entries of lower and upper triangle
# matrix are equal
a = a.tocoo()
(row, col) = a.nonzero()
if (row < col).sum() != (row > col).sum():
return MMFile.SYMMETRY_GENERAL
# define iterator over symmetric pair entries
a = a.todok()
def symm_iterator():
for ((i, j), aij) in a.items():
if i > j:
aji = a[j, i]
yield (aij, aji)
# non-sparse input
else:
# define iterator over symmetric pair entries
def symm_iterator():
for j in range(n):
for i in range(j+1, n):
aij, aji = a[i][j], a[j][i]
yield (aij, aji)
# check for symmetry
for (aij, aji) in symm_iterator():
if issymm and aij != aji:
issymm = False
if isskew and aij != -aji:
isskew = False
if isherm and aij != conj(aji):
isherm = False
if not (issymm or isskew or isherm):
break
# return symmetry value
if issymm:
return MMFile.SYMMETRY_SYMMETRIC
if isskew:
return MMFile.SYMMETRY_SKEW_SYMMETRIC
if isherm:
return MMFile.SYMMETRY_HERMITIAN
return MMFile.SYMMETRY_GENERAL
# -------------------------------------------------------------------------
@staticmethod
def _field_template(field, precision):
return {MMFile.FIELD_REAL: '%%.%ie\n' % precision,
MMFile.FIELD_INTEGER: '%i\n',
MMFile.FIELD_UNSIGNED: '%u\n',
MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' %
(precision, precision)
}.get(field, None)
# -------------------------------------------------------------------------
def __init__(self, **kwargs):
self._init_attrs(**kwargs)
# -------------------------------------------------------------------------
def read(self, source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
"""
stream, close_it = self._open(source)
try:
self._parse_header(stream)
return self._parse_body(stream)
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
def write(self, target, a, comment='', field=None, precision=None,
symmetry=None):
"""
Writes sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2-D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
"""
stream, close_it = self._open(target, 'wb')
try:
self._write(stream, a, comment, field, precision, symmetry)
finally:
if close_it:
stream.close()
else:
stream.flush()
# -------------------------------------------------------------------------
def _init_attrs(self, **kwargs):
"""
Initialize each attributes with the corresponding keyword arg value
or a default of None
"""
attrs = self.__class__.__slots__
public_attrs = [attr[1:] for attr in attrs]
invalid_keys = set(kwargs.keys()) - set(public_attrs)
if invalid_keys:
raise ValueError('''found %s invalid keyword arguments, please only
use %s''' % (tuple(invalid_keys),
public_attrs))
for attr in attrs:
setattr(self, attr, kwargs.get(attr[1:], None))
# -------------------------------------------------------------------------
def _parse_header(self, stream):
rows, cols, entries, format, field, symmetry = \
self.__class__.info(stream)
self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
field=field, symmetry=symmetry)
# -------------------------------------------------------------------------
def _parse_body(self, stream):
rows, cols, entries, format, field, symm = (self.rows, self.cols,
self.entries, self.format,
self.field, self.symmetry)
try:
from scipy.sparse import coo_matrix
except ImportError:
coo_matrix = None
dtype = self.DTYPES_BY_FIELD.get(field, None)
has_symmetry = self.has_symmetry
is_integer = field == self.FIELD_INTEGER
is_unsigned_integer = field == self.FIELD_UNSIGNED
is_complex = field == self.FIELD_COMPLEX
is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
is_herm = symm == self.SYMMETRY_HERMITIAN
is_pattern = field == self.FIELD_PATTERN
if format == self.FORMAT_ARRAY:
a = zeros((rows, cols), dtype=dtype)
line = 1
i, j = 0, 0
if is_skew:
a[i, j] = 0
if i < rows - 1:
i += 1
while line:
line = stream.readline()
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
if is_integer:
aij = int(line)
elif is_unsigned_integer:
aij = int(line)
elif is_complex:
aij = complex(*map(float, line.split()))
else:
aij = float(line)
a[i, j] = aij
if has_symmetry and i != j:
if is_skew:
a[j, i] = -aij
elif is_herm:
a[j, i] = conj(aij)
else:
a[j, i] = aij
if i < rows-1:
i = i + 1
else:
j = j + 1
if not has_symmetry:
i = 0
else:
i = j
if is_skew:
a[i, j] = 0
if i < rows-1:
i += 1
if is_skew:
if not (i in [0, j] and j == cols - 1):
raise ValueError("Parse error, did not read all lines.")
else:
if not (i in [0, j] and j == cols):
raise ValueError("Parse error, did not read all lines.")
elif format == self.FORMAT_COORDINATE and coo_matrix is None:
# Read sparse matrix to dense when coo_matrix is not available.
a = zeros((rows, cols), dtype=dtype)
line = 1
k = 0
while line:
line = stream.readline()
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
l = line.split()
i, j = map(int, l[:2])
i, j = i-1, j-1
if is_integer:
aij = int(l[2])
elif is_unsigned_integer:
aij = int(l[2])
elif is_complex:
aij = complex(*map(float, l[2:]))
else:
aij = float(l[2])
a[i, j] = aij
if has_symmetry and i != j:
if is_skew:
a[j, i] = -aij
elif is_herm:
a[j, i] = conj(aij)
else:
a[j, i] = aij
k = k + 1
if not k == entries:
ValueError("Did not read all entries")
elif format == self.FORMAT_COORDINATE:
# Read sparse COOrdinate format
if entries == 0:
# empty matrix
return coo_matrix((rows, cols), dtype=dtype)
I = zeros(entries, dtype='intc')
J = zeros(entries, dtype='intc')
if is_pattern:
V = ones(entries, dtype='int8')
elif is_integer:
V = zeros(entries, dtype='intp')
elif is_unsigned_integer:
V = zeros(entries, dtype='uint64')
elif is_complex:
V = zeros(entries, dtype='complex')
else:
V = zeros(entries, dtype='float')
entry_number = 0
for line in stream:
# line.startswith('%')
if not line or line[0] in ['%', 37] or not line.strip():
continue
if entry_number+1 > entries:
raise ValueError("'entries' in header is smaller than "
"number of entries")
l = line.split()
I[entry_number], J[entry_number] = map(int, l[:2])
if not is_pattern:
if is_integer:
V[entry_number] = int(l[2])
elif is_unsigned_integer:
V[entry_number] = int(l[2])
elif is_complex:
V[entry_number] = complex(*map(float, l[2:]))
else:
V[entry_number] = float(l[2])
entry_number += 1
if entry_number < entries:
raise ValueError("'entries' in header is larger than "
"number of entries")
I -= 1 # adjust indices (base 1 -> base 0)
J -= 1
if has_symmetry:
mask = (I != J) # off diagonal mask
od_I = I[mask]
od_J = J[mask]
od_V = V[mask]
I = concatenate((I, od_J))
J = concatenate((J, od_I))
if is_skew:
od_V *= -1
elif is_herm:
od_V = od_V.conjugate()
V = concatenate((V, od_V))
a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
else:
raise NotImplementedError(format)
return a
# ------------------------------------------------------------------------
def _write(self, stream, a, comment='', field=None, precision=None,
symmetry=None):
if isinstance(a, list) or isinstance(a, ndarray) or \
isinstance(a, tuple) or hasattr(a, '__array__'):
rep = self.FORMAT_ARRAY
a = asarray(a)
if len(a.shape) != 2:
raise ValueError('Expected 2 dimensional array')
rows, cols = a.shape
if field is not None:
if field == self.FIELD_INTEGER:
if not can_cast(a.dtype, 'intp'):
raise OverflowError("mmwrite does not support integer "
"dtypes larger than native 'intp'.")
a = a.astype('intp')
elif field == self.FIELD_REAL:
if a.dtype.char not in 'fd':
a = a.astype('d')
elif field == self.FIELD_COMPLEX:
if a.dtype.char not in 'FD':
a = a.astype('D')
else:
if not isspmatrix(a):
raise ValueError('unknown matrix type: %s' % type(a))
rep = 'coordinate'
rows, cols = a.shape
typecode = a.dtype.char
if precision is None:
if typecode in 'fF':
precision = 8
else:
precision = 16
if field is None:
kind = a.dtype.kind
if kind == 'i':
if not can_cast(a.dtype, 'intp'):
raise OverflowError("mmwrite does not support integer "
"dtypes larger than native 'intp'.")
field = 'integer'
elif kind == 'f':
field = 'real'
elif kind == 'c':
field = 'complex'
elif kind == 'u':
field = 'unsigned-integer'
else:
raise TypeError('unexpected dtype kind ' + kind)
if symmetry is None:
symmetry = self._get_symmetry(a)
# validate rep, field, and symmetry
self.__class__._validate_format(rep)
self.__class__._validate_field(field)
self.__class__._validate_symmetry(symmetry)
# write initial header line
stream.write(asbytes('%%MatrixMarket matrix {0} {1} {2}\n'.format(rep,
field, symmetry)))
# write comments
for line in comment.split('\n'):
stream.write(asbytes('%%%s\n' % (line)))
template = self._field_template(field, precision)
# write dense format
if rep == self.FORMAT_ARRAY:
# write shape spec
stream.write(asbytes('%i %i\n' % (rows, cols)))
if field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED):
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
stream.write(asbytes(template % a[i, j]))
elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC:
for j in range(cols):
for i in range(j + 1, rows):
stream.write(asbytes(template % a[i, j]))
else:
for j in range(cols):
for i in range(j, rows):
stream.write(asbytes(template % a[i, j]))
elif field == self.FIELD_COMPLEX:
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
else:
for j in range(cols):
for i in range(j, rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
elif field == self.FIELD_PATTERN:
raise ValueError('pattern type inconsisted with dense format')
else:
raise TypeError('Unknown field type %s' % field)
# write sparse format
else:
coo = a.tocoo() # convert to COOrdinate format
# if symmetry format used, remove values above main diagonal
if symmetry != self.SYMMETRY_GENERAL:
lower_triangle_mask = coo.row >= coo.col
coo = coo_matrix((coo.data[lower_triangle_mask],
(coo.row[lower_triangle_mask],
coo.col[lower_triangle_mask])),
shape=coo.shape)
# write shape spec
stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz)))
template = self._field_template(field, precision-1)
if field == self.FIELD_PATTERN:
for r, c in zip(coo.row+1, coo.col+1):
stream.write(asbytes("%i %i\n" % (r, c)))
elif field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED):
for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
stream.write(asbytes(("%i %i " % (r, c)) +
(template % d)))
elif field == self.FIELD_COMPLEX:
for r, c, d in zip(coo.row+1, coo.col+1, coo.data):
stream.write(asbytes(("%i %i " % (r, c)) +
(template % (d.real, d.imag))))
else:
raise TypeError('Unknown field type %s' % field)
def _is_fromfile_compatible(stream):
"""
Check whether `stream` is compatible with numpy.fromfile.
Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with
Python 3.
"""
bad_cls = []
try:
import gzip
bad_cls.append(gzip.GzipFile)
except ImportError:
pass
try:
import bz2
bad_cls.append(bz2.BZ2File)
except ImportError:
pass
bad_cls = tuple(bad_cls)
return not isinstance(stream, bad_cls)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import time
for filename in sys.argv[1:]:
print('Reading', filename, '...', end=' ')
sys.stdout.flush()
t = time.time()
mmread(filename)
print('took %s seconds' % (time.time() - t))
| |
"""
A simple vtkTkRenderWidget for Tkinter.
Created by David Gobbi, April 1999
May ??, 1999 - Modifications peformed by Heather Drury,
to rewrite _pan to match method in TkInteractor.tcl
May 11, 1999 - Major rewrite by David Gobbi to make the
interactor bindings identical to the TkInteractor.tcl
bindings.
July 14, 1999 - Added modification by Ken Martin for VTK 2.4, to
use vtk widgets instead of Togl.
Aug 29, 1999 - Renamed file to vtkRenderWidget.py
Nov 14, 1999 - Added support for keyword 'rw'
Mar 23, 2000 - Extensive but backwards compatible changes,
improved documentation
"""
"""
A few important notes:
This class is meant to be used as a base-class widget for
doing VTK rendering in Python.
In VTK (and C++) there is a very important distinction between
public ivars (attributes in pythonspeak), protected ivars, and
private ivars. When you write a python class that you want
to 'look and feel' like a VTK class, you should follow these rules.
1) Attributes should never be public. Attributes should always be
either protected (prefixed with a single underscore) or private
(prefixed with a double underscore). You can provide access to
attributes through public Set/Get methods (same as VTK).
2) Use a single underscore to denote a protected attribute, e.g.
self._RenderWindow is protected (can be accessed from this
class or a derived class).
3) Use a double underscore to denote a private attribute, e.g.
self.__InExpose cannot be accessed outside of this class.
All attributes should be 'declared' in the __init__() function
i.e. set to some initial value. Don't forget that 'None' means
'NULL' - the python/vtk wrappers guarantee their equivalence.
"""
import Tkinter
import math, os, sys
import vtk
from vtkLoadPythonTkWidgets import vtkLoadPythonTkWidgets
class vtkTkRenderWidget(Tkinter.Widget):
"""
A vtkTkRenderWidget for Python.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to generate a
stereo-capable window.
Create with the keyword focus_on_enter=1 to enable
focus-follows-mouse. The default is for a click-to-focus mode.
"""
def __init__(self, master, cnf={}, **kw):
"""
Constructor.
Keyword arguments:
rw -- Use passed render window instead of creating a new one.
stereo -- If True, generate a stereo-capable window.
Defaults to False.
focus_on_enter -- If True, use a focus-follows-mouse mode.
Defaults to False where the widget will use a click-to-focus
mode.
"""
# load the necessary extensions into tk
vtkLoadPythonTkWidgets(master.tk)
try: # check to see if a render window was specified
renderWindow = kw['rw']
except KeyError:
renderWindow = vtk.vtkRenderWindow()
try: # was a stereo rendering context requested?
if kw['stereo']:
renderWindow.StereoCapableWindowOn()
del kw['stereo']
except KeyError:
pass
# check if focus should follow mouse
if kw.get('focus_on_enter'):
self._FocusOnEnter = 1
del kw['focus_on_enter']
else:
self._FocusOnEnter = 0
kw['rw'] = renderWindow.GetAddressAsString("vtkRenderWindow")
Tkinter.Widget.__init__(self, master, 'vtkTkRenderWidget', cnf, kw)
self._CurrentRenderer = None
self._CurrentCamera = None
self._CurrentZoom = 1.0
self._CurrentLight = None
self._ViewportCenterX = 0
self._ViewportCenterY = 0
self._Picker = vtk.vtkCellPicker()
self._PickedAssembly = None
self._PickedProperty = vtk.vtkProperty()
self._PickedProperty.SetColor(1,0,0)
self._PrePickedProperty = None
self._OldFocus = None
# used by the LOD actors
self._DesiredUpdateRate = 15
self._StillUpdateRate = 0.0001
# these record the previous mouse position
self._LastX = 0
self._LastY = 0
# private attributes
self.__InExpose = 0
# create the Tk bindings
self.BindTkRenderWidget()
def __getattr__(self,attr):
# because the tk part of vtkTkRenderWidget must have
# the only remaining reference to the RenderWindow when
# it is destroyed, we can't actually store the RenderWindow
# as an attribute but instead have to get it from the tk-side
if attr == '_RenderWindow':
return self.GetRenderWindow()
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def BindTkRenderWidget(self):
"""
Bind some default actions.
"""
self.bind("<ButtonPress>",
lambda e,s=self: s.StartMotion(e.x,e.y))
self.bind("<ButtonRelease>",
lambda e,s=self: s.EndMotion(e.x,e.y))
self.bind("<B1-Motion>",
lambda e,s=self: s.Rotate(e.x,e.y))
self.bind("<B2-Motion>",
lambda e,s=self: s.Pan(e.x,e.y))
self.bind("<B3-Motion>",
lambda e,s=self: s.Zoom(e.x,e.y))
self.bind("<Shift-B1-Motion>",
lambda e,s=self: s.Pan(e.x,e.y))
self.bind("<KeyPress-r>",
lambda e,s=self: s.Reset(e.x,e.y))
self.bind("<KeyPress-u>",
lambda e,s=self: s.deiconify())
self.bind("<KeyPress-w>",
lambda e,s=self: s.Wireframe())
self.bind("<KeyPress-s>",
lambda e,s=self: s.Surface())
self.bind("<KeyPress-p>",
lambda e,s=self: s.PickActor(e.x,e.y))
if self._FocusOnEnter:
self.bind("<Enter>",
lambda e,s=self: s.Enter(e.x,e.y))
self.bind("<Leave>",
lambda e,s=self: s.Leave(e.x,e.y))
else:
self.bind("<ButtonPress>",
lambda e,s=self: s.Enter(e.x,e.y))
self.bind("<Expose>",
lambda e,s=self: s.Expose())
def GetZoomFactor(self):
return self._CurrentZoom
def SetDesiredUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._DesiredUpdateRate = rate
def GetDesiredUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._DesiredUpdateRate
def SetStillUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._StillUpdateRate = rate
def GetStillUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._StillUpdateRate
def GetRenderWindow(self):
addr = self.tk.call(self._w, 'GetRenderWindow')[5:]
return vtk.vtkRenderWindow('_%s_vtkRenderWindow_p' % addr)
def GetPicker(self):
return self._Picker
def Expose(self):
if (not self.__InExpose):
self.__InExpose = 1
self.update()
self._RenderWindow.Render()
self.__InExpose = 0
def Render(self):
if (self._CurrentLight):
light = self._CurrentLight
light.SetPosition(self._CurrentCamera.GetPosition())
light.SetFocalPoint(self._CurrentCamera.GetFocalPoint())
self._RenderWindow.Render()
def UpdateRenderer(self,x,y):
"""
UpdateRenderer will identify the renderer under the mouse and set
up _CurrentRenderer, _CurrentCamera, and _CurrentLight.
"""
windowX = self.winfo_width()
windowY = self.winfo_height()
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
self._CurrentRenderer = None
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
vx,vy = (0,0)
if (windowX > 1):
vx = float(x)/(windowX-1)
if (windowY > 1):
vy = (windowY-float(y)-1)/(windowY-1)
(vpxmin,vpymin,vpxmax,vpymax) = renderer.GetViewport()
if (vx >= vpxmin and vx <= vpxmax and
vy >= vpymin and vy <= vpymax):
self._CurrentRenderer = renderer
self._ViewportCenterX = float(windowX)*(vpxmax-vpxmin)/2.0\
+vpxmin
self._ViewportCenterY = float(windowY)*(vpymax-vpymin)/2.0\
+vpymin
self._CurrentCamera = self._CurrentRenderer.GetActiveCamera()
lights = self._CurrentRenderer.GetLights()
lights.InitTraversal()
self._CurrentLight = lights.GetNextItem()
break
self._LastX = x
self._LastY = y
def GetCurrentRenderer(self):
return self._CurrentRenderer
def Enter(self,x,y):
self._OldFocus=self.focus_get()
self.focus()
self.StartMotion(x, y)
def Leave(self,x,y):
if (self._OldFocus != None):
self._OldFocus.focus()
def StartMotion(self,x,y):
self.GetRenderWindow().SetDesiredUpdateRate(self._DesiredUpdateRate)
self.UpdateRenderer(x,y)
def EndMotion(self,x,y):
self.GetRenderWindow().SetDesiredUpdateRate(self._StillUpdateRate)
if self._CurrentRenderer:
self.Render()
def Rotate(self,x,y):
if self._CurrentRenderer:
self._CurrentCamera.Azimuth(self._LastX - x)
self._CurrentCamera.Elevation(y - self._LastY)
self._CurrentCamera.OrthogonalizeViewUp()
self._LastX = x
self._LastY = y
self._CurrentRenderer.ResetCameraClippingRange()
self.Render()
def Pan(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
(pPoint0,pPoint1,pPoint2) = camera.GetPosition()
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
if (camera.GetParallelProjection()):
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetFocalPoint(fx,fy,fz)
renderer.SetWorldPoint(pPoint0,pPoint1,pPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetPosition(fx,fy,fz)
else:
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
# Specify a point location in world coordinates
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
# Convert world point coordinates to display coordinates
dPoint = renderer.GetDisplayPoint()
focalDepth = dPoint[2]
aPoint0 = self._ViewportCenterX + (x - self._LastX)
aPoint1 = self._ViewportCenterY - (y - self._LastY)
renderer.SetDisplayPoint(aPoint0,aPoint1,focalDepth)
renderer.DisplayToWorld()
(rPoint0,rPoint1,rPoint2,rPoint3) = renderer.GetWorldPoint()
if (rPoint3 != 0.0):
rPoint0 = rPoint0/rPoint3
rPoint1 = rPoint1/rPoint3
rPoint2 = rPoint2/rPoint3
camera.SetFocalPoint((fPoint0 - rPoint0) + fPoint0,
(fPoint1 - rPoint1) + fPoint1,
(fPoint2 - rPoint2) + fPoint2)
camera.SetPosition((fPoint0 - rPoint0) + pPoint0,
(fPoint1 - rPoint1) + pPoint1,
(fPoint2 - rPoint2) + pPoint2)
self._LastX = x
self._LastY = y
self.Render()
def Zoom(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
zoomFactor = math.pow(1.02,(0.5*(self._LastY - y)))
self._CurrentZoom = self._CurrentZoom * zoomFactor
if camera.GetParallelProjection():
parallelScale = camera.GetParallelScale()/zoomFactor
camera.SetParallelScale(parallelScale)
else:
camera.Dolly(zoomFactor)
renderer.ResetCameraClippingRange()
self._LastX = x
self._LastY = y
self.Render()
def Reset(self,x,y):
if self._CurrentRenderer:
self._CurrentRenderer.ResetCamera()
self.Render()
def Wireframe(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToWireframe()
self.Render()
def Surface(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToSurface()
self.Render()
def PickActor(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
picker = self._Picker
windowY = self.winfo_height()
picker.Pick(x,(windowY - y - 1),0.0,renderer)
assembly = picker.GetAssembly()
if (self._PickedAssembly != None and
self._PrePickedProperty != None):
self._PickedAssembly.SetProperty(self._PrePickedProperty)
# release hold of the property
self._PrePickedProperty.UnRegister(self._PrePickedProperty)
self._PrePickedProperty = None
if (assembly != None):
self._PickedAssembly = assembly
self._PrePickedProperty = self._PickedAssembly.GetProperty()
# hold onto the property
self._PrePickedProperty.Register(self._PrePickedProperty)
self._PickedAssembly.SetProperty(self._PickedProperty)
self.Render()
#----------------------------------------------------------------------------
def vtkRenderWidgetConeExample():
"""Like it says, just a simple example
"""
# create root window
root = Tkinter.Tk()
# create vtkTkRenderWidget
pane = vtkTkRenderWidget(root,width=300,height=300)
ren = vtk.vtkRenderer()
pane.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# pack the pane into the tk root
pane.pack()
# start the tk mainloop
root.mainloop()
if __name__ == "__main__":
vtkRenderWidgetConeExample()
| |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-lines
"""Create / interact with Google Cloud Storage blobs."""
import base64
import copy
import hashlib
from io import BytesIO
from io import UnsupportedOperation
import json
import mimetypes
import os
import time
import httplib2
import six
from six.moves.urllib.parse import quote
from google.cloud._helpers import _rfc3339_to_datetime
from google.cloud._helpers import _to_bytes
from google.cloud._helpers import _bytes_to_unicode
from google.cloud.credentials import generate_signed_url
from google.cloud.exceptions import NotFound
from google.cloud.exceptions import make_exception
from google.cloud.storage._helpers import _PropertyMixin
from google.cloud.storage._helpers import _scalar_property
from google.cloud.storage.acl import ObjectACL
from google.cloud.streaming.http_wrapper import Request
from google.cloud.streaming.http_wrapper import make_api_request
from google.cloud.streaming.transfer import Download
from google.cloud.streaming.transfer import RESUMABLE_UPLOAD
from google.cloud.streaming.transfer import Upload
_API_ACCESS_ENDPOINT = 'https://storage.googleapis.com'
class Blob(_PropertyMixin):
"""A wrapper around Cloud Storage's concept of an ``Object``.
:type name: str
:param name: The name of the blob. This corresponds to the
unique path of the object in the bucket.
:type bucket: :class:`google.cloud.storage.bucket.Bucket`
:param bucket: The bucket to which this blob belongs.
:type chunk_size: int
:param chunk_size: The size of a chunk of data whenever iterating (1 MB).
This must be a multiple of 256 KB per the API
specification.
:type encryption_key: bytes
:param encryption_key:
Optional 32 byte encryption key for customer-supplied encryption.
See https://cloud.google.com/storage/docs/encryption#customer-supplied
"""
_chunk_size = None # Default value for each instance.
_CHUNK_SIZE_MULTIPLE = 256 * 1024
"""Number (256 KB, in bytes) that must divide the chunk size."""
_STORAGE_CLASSES = (
'NEARLINE',
'MULTI_REGIONAL',
'REGIONAL',
'COLDLINE',
'STANDARD', # alias for MULTI_REGIONAL/REGIONAL, based on location
)
"""Allowed values for :attr:`storage_class`.
See:
https://cloud.google.com/storage/docs/json_api/v1/objects#storageClass
https://cloud.google.com/storage/docs/per-object-storage-class
.. note::
This list does not include 'DURABLE_REDUCED_AVAILABILITY', which
is only documented for buckets (and deprectated.
.. note::
The documentation does *not* mention 'STANDARD', but it is the value
assigned by the back-end for objects created in buckets with 'STANDARD'
set as their 'storage_class'.
"""
def __init__(self, name, bucket, chunk_size=None, encryption_key=None):
super(Blob, self).__init__(name=name)
self.chunk_size = chunk_size # Check that setter accepts value.
self.bucket = bucket
self._acl = ObjectACL(self)
self._encryption_key = encryption_key
@property
def chunk_size(self):
"""Get the blob's default chunk size.
:rtype: int or ``NoneType``
:returns: The current blob's chunk size, if it is set.
"""
return self._chunk_size
@chunk_size.setter
def chunk_size(self, value):
"""Set the blob's default chunk size.
:type value: int
:param value: (Optional) The current blob's chunk size, if it is set.
:raises: :class:`ValueError` if ``value`` is not ``None`` and is not a
multiple of 256 KB.
"""
if value is not None and value % self._CHUNK_SIZE_MULTIPLE != 0:
raise ValueError('Chunk size must be a multiple of %d.' % (
self._CHUNK_SIZE_MULTIPLE,))
self._chunk_size = value
@staticmethod
def path_helper(bucket_path, blob_name):
"""Relative URL path for a blob.
:type bucket_path: str
:param bucket_path: The URL path for a bucket.
:type blob_name: str
:param blob_name: The name of the blob.
:rtype: str
:returns: The relative URL path for ``blob_name``.
"""
return bucket_path + '/o/' + quote(blob_name, safe='')
@property
def acl(self):
"""Create our ACL on demand."""
return self._acl
def __repr__(self):
if self.bucket:
bucket_name = self.bucket.name
else:
bucket_name = None
return '<Blob: %s, %s>' % (bucket_name, self.name)
@property
def path(self):
"""Getter property for the URL path to this Blob.
:rtype: str
:returns: The URL path to this Blob.
"""
if not self.name:
raise ValueError('Cannot determine path without a blob name.')
return self.path_helper(self.bucket.path, self.name)
@property
def client(self):
"""The client bound to this blob."""
return self.bucket.client
@property
def public_url(self):
"""The public URL for this blob's object.
:rtype: `string`
:returns: The public URL for this blob.
"""
return '{storage_base_url}/{bucket_name}/{quoted_name}'.format(
storage_base_url='https://storage.googleapis.com',
bucket_name=self.bucket.name,
quoted_name=quote(self.name, safe=''))
def generate_signed_url(self, expiration, method='GET',
content_type=None,
generation=None, response_disposition=None,
response_type=None, client=None, credentials=None):
"""Generates a signed URL for this blob.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL. Follow `Issue 922`_ for updates on this. If you'd like to
be able to generate a signed URL from GCE, you can use a standard
service account from a JSON file rather than a GCE service account.
.. _Issue 922: https://github.com/GoogleCloudPlatform/\
google-cloud-python/issues/922
If you have a blob that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible blobs, but don't want to require users to explicitly
log in.
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type content_type: str
:param content_type: (Optional) The content type of the object
referenced by ``resource``.
:type generation: str
:param generation: (Optional) A value that indicates which generation
of the resource to fetch.
:type response_disposition: str
:param response_disposition: (Optional) Content disposition of
responses to requests for the signed URL.
For example, to enable the signed URL
to initiate a file of ``blog.png``, use
the value
``'attachment; filename=blob.png'``.
:type response_type: str
:param response_type: (Optional) Content type of responses to requests
for the signed URL. Used to over-ride the content
type of the underlying blob/object.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: (Optional) The OAuth2 credentials to use to sign
the URL. Defaults to the credentials stored on the
client used.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration.
"""
resource = '/{bucket_name}/{quoted_name}'.format(
bucket_name=self.bucket.name,
quoted_name=quote(self.name, safe=''))
if credentials is None:
client = self._require_client(client)
credentials = client._base_connection.credentials
return generate_signed_url(
credentials, resource=resource,
api_access_endpoint=_API_ACCESS_ENDPOINT,
expiration=expiration, method=method,
content_type=content_type,
response_type=response_type,
response_disposition=response_disposition,
generation=generation)
def exists(self, client=None):
"""Determines whether or not this blob exists.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: bool
:returns: True if the blob exists in Cloud Storage.
"""
client = self._require_client(client)
try:
# We only need the status code (200 or not) so we seek to
# minimize the returned payload.
query_params = {'fields': 'name'}
# We intentionally pass `_target_object=None` since fields=name
# would limit the local properties.
client._connection.api_request(
method='GET', path=self.path,
query_params=query_params, _target_object=None)
# NOTE: This will not fail immediately in a batch. However, when
# Batch.finish() is called, the resulting `NotFound` will be
# raised.
return True
except NotFound:
return False
def delete(self, client=None):
"""Deletes a blob from Cloud Storage.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: :class:`Blob`
:returns: The blob that was just deleted.
:raises: :class:`google.cloud.exceptions.NotFound`
(propagated from
:meth:`google.cloud.storage.bucket.Bucket.delete_blob`).
"""
return self.bucket.delete_blob(self.name, client=client)
def download_to_file(self, file_obj, client=None):
"""Download the contents of this blob into a file-like object.
.. note::
If the server-set property, :attr:`media_link`, is not yet
initialized, makes an additional API request to load it.
Downloading a file that has been encrypted with a `customer-supplied`_
encryption key:
.. literalinclude:: storage_snippets.py
:start-after: [START download_to_file]
:end-before: [END download_to_file]
The ``encryption_key`` should be a str or bytes with a length of at
least 32.
.. _customer-supplied: https://cloud.google.com/storage/docs/\
encryption#customer-supplied
:type file_obj: file
:param file_obj: A file handle to which to write the blob's data.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`google.cloud.exceptions.NotFound`
"""
client = self._require_client(client)
if self.media_link is None: # not yet loaded
self.reload()
download_url = self.media_link
# Use apitools 'Download' facility.
download = Download.from_stream(file_obj)
if self.chunk_size is not None:
download.chunksize = self.chunk_size
headers = _get_encryption_headers(self._encryption_key)
request = Request(download_url, 'GET', headers)
# Use ``_base_connection`` rather ``_connection`` since the current
# connection may be a batch. A batch wraps a client's connection,
# but does not store the ``http`` object. The rest (API_BASE_URL and
# build_api_url) are also defined on the Batch class, but we just
# use the wrapped connection since it has all three (http,
# API_BASE_URL and build_api_url).
download.initialize_download(request, client._base_connection.http)
def download_to_filename(self, filename, client=None):
"""Download the contents of this blob into a named file.
:type filename: str
:param filename: A filename to be passed to ``open``.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`google.cloud.exceptions.NotFound`
"""
with open(filename, 'wb') as file_obj:
self.download_to_file(file_obj, client=client)
mtime = time.mktime(self.updated.timetuple())
os.utime(file_obj.name, (mtime, mtime))
def download_as_string(self, client=None):
"""Download the contents of this blob as a string.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: bytes
:returns: The data stored in this blob.
:raises: :class:`google.cloud.exceptions.NotFound`
"""
string_buffer = BytesIO()
self.download_to_file(string_buffer, client=client)
return string_buffer.getvalue()
def _create_upload(
self, client, file_obj=None, size=None, content_type=None,
chunk_size=None, strategy=None, extra_headers=None):
"""Helper for upload methods.
Creates a :class:`google.cloud.core.streaming.Upload` object to handle
the details of uploading a file to Cloud Storage.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type file_obj: file
:param file_obj: A file handle open for reading.
:type size: int
:param size: The size of the upload, in bytes.
:type content_type: str
:param content_type: Optional type of content being uploaded.
:type chunk_size: int
:param chunk_size: The size of each chunk when doing resumable and
media uploads.
:type strategy: str
:param strategy: Either
:attr:`google.cloud.core.streaming.transfer.SIMPLE_UPLOAD` or
:attr:`google.cloud.core.streaming.transfer.RESUMABLE_UPLOAD`.
:type extra_headers: dict
:param extra_headers: Additional headers to be sent with the upload
initiation request.
:rtype: Tuple[google.cloud.core.streaming.Upload,
google.cloud.core.streaming.Request,
google.cloud.core.streaming.Response]
:returns: The Upload object, the upload HTTP request, and the upload
initiation response.
"""
client = self._require_client(client)
# Use ``_base_connection`` rather ``_connection`` since the current
# connection may be a batch. A batch wraps a client's connection,
# but does not store the ``http`` object. The rest (API_BASE_URL and
# build_api_url) are also defined on the Batch class, but we just
# use the wrapped connection since it has all three (http,
# API_BASE_URL and build_api_url).
connection = client._base_connection
content_type = (content_type or self._properties.get('contentType') or
'application/octet-stream')
headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': connection.USER_AGENT,
}
if extra_headers:
headers.update(extra_headers)
headers.update(_get_encryption_headers(self._encryption_key))
# Use apitools' Upload functionality
upload = Upload(
file_obj, content_type, total_size=size, auto_transfer=False)
if chunk_size is not None:
upload.chunksize = chunk_size
if strategy is not None:
upload.strategy = RESUMABLE_UPLOAD
url_builder = _UrlBuilder(
bucket_name=self.bucket.name,
object_name=self.name)
upload_config = _UploadConfig()
# Temporary URL until strategy is determined.
base_url = connection.API_BASE_URL + '/upload'
upload_url = connection.build_api_url(
api_base_url=base_url,
path=self.bucket.path + '/o')
# Configure the upload request parameters.
request = Request(upload_url, 'POST', headers)
upload.configure_request(upload_config, request, url_builder)
# Configure final URL
query_params = url_builder.query_params
base_url = connection.API_BASE_URL + '/upload'
request.url = connection.build_api_url(
api_base_url=base_url,
path=self.bucket.path + '/o',
query_params=query_params)
# Start the upload session
response = upload.initialize_upload(request, connection.http)
return upload, request, response
@staticmethod
def _check_response_error(request, http_response):
"""Helper for :meth:`upload_from_file`."""
info = http_response.info
status = int(info['status'])
if not 200 <= status < 300:
faux_response = httplib2.Response({'status': status})
raise make_exception(faux_response, http_response.content,
error_info=request.url)
def upload_from_file(self, file_obj, rewind=False, size=None,
content_type=None, num_retries=6, client=None):
"""Upload the contents of this blob from a file-like object.
The content type of the upload will either be
- The value passed in to the function (if any)
- The value stored on the current blob
- The default value of 'application/octet-stream'
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
Uploading a file with a `customer-supplied`_ encryption key:
.. literalinclude:: storage_snippets.py
:start-after: [START upload_from_file]
:end-before: [END upload_from_file]
The ``encryption_key`` should be a str or bytes with a length of at
least 32.
.. _customer-supplied: https://cloud.google.com/storage/docs/\
encryption#customer-supplied
:type file_obj: file
:param file_obj: A file handle open for reading.
:type rewind: bool
:param rewind: If True, seek to the beginning of the file handle before
writing the file to Cloud Storage.
:type size: int
:param size: The number of bytes to read from the file handle.
If not provided, we'll try to guess the size using
:func:`os.fstat`. (If the file handle is not from the
filesystem this won't be possible.)
:type content_type: str
:param content_type: Optional type of content being uploaded.
:type num_retries: int
:param num_retries: Number of upload retries. Defaults to 6.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`ValueError` if size is not passed in and can not be
determined; :class:`google.cloud.exceptions.GoogleCloudError`
if the upload response returns an error status.
"""
client = self._require_client(client)
# Use ``_base_connection`` rather ``_connection`` since the current
# connection may be a batch. A batch wraps a client's connection,
# but does not store the ``http`` object. The rest (API_BASE_URL and
# build_api_url) are also defined on the Batch class, but we just
# use the wrapped connection since it has all three (http,
# API_BASE_URL and build_api_url).
connection = client._base_connection
# Rewind the file if desired.
if rewind:
file_obj.seek(0, os.SEEK_SET)
# Get the basic stats about the file.
total_bytes = size
if total_bytes is None:
if hasattr(file_obj, 'fileno'):
try:
total_bytes = os.fstat(file_obj.fileno()).st_size
except (OSError, UnsupportedOperation):
pass # Assuming fd is not an actual file (maybe socket).
chunk_size = None
strategy = None
if self.chunk_size is not None:
chunk_size = self.chunk_size
if total_bytes is None:
strategy = RESUMABLE_UPLOAD
elif total_bytes is None:
raise ValueError('total bytes could not be determined. Please '
'pass an explicit size, or supply a chunk size '
'for a streaming transfer.')
upload, request, _ = self._create_upload(
client, file_obj=file_obj, size=total_bytes,
content_type=content_type, chunk_size=chunk_size,
strategy=strategy)
if upload.strategy == RESUMABLE_UPLOAD:
http_response = upload.stream_file(use_chunks=True)
else:
http_response = make_api_request(
connection.http, request, retries=num_retries)
self._check_response_error(request, http_response)
response_content = http_response.content
if not isinstance(response_content,
six.string_types): # pragma: NO COVER Python3
response_content = response_content.decode('utf-8')
self._set_properties(json.loads(response_content))
def upload_from_filename(self, filename, content_type=None, client=None):
"""Upload this blob's contents from the content of a named file.
The content type of the upload will either be
- The value passed in to the function (if any)
- The value stored on the current blob
- The value given by mimetypes.guess_type
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
:type filename: str
:param filename: The path to the file.
:type content_type: str
:param content_type: Optional type of content being uploaded.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
content_type = content_type or self._properties.get('contentType')
if content_type is None:
content_type, _ = mimetypes.guess_type(filename)
with open(filename, 'rb') as file_obj:
self.upload_from_file(
file_obj, content_type=content_type, client=client)
def upload_from_string(self, data, content_type='text/plain', client=None):
"""Upload contents of this blob from the provided string.
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
:type data: bytes or str
:param data: The data to store in this blob. If the value is
text, it will be encoded as UTF-8.
:type content_type: str
:param content_type: Optional type of content being uploaded. Defaults
to ``'text/plain'``.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8')
string_buffer = BytesIO()
string_buffer.write(data)
self.upload_from_file(
file_obj=string_buffer, rewind=True, size=len(data),
content_type=content_type, client=client)
def create_resumable_upload_session(
self,
content_type=None,
size=None,
origin=None,
client=None):
"""Create a resumable upload session.
Resumable upload sessions allow you to start an upload session from
one client and complete the session in another. This method is called
by the initiator to set the metadata and limits. The initiator then
passes the session URL to the client that will upload the binary data.
The client performs a PUT request on the session URL to complete the
upload. This process allows untrusted clients to upload to an
access-controlled bucket. For more details, see the
`documentation on signed URLs`_.
.. _documentation on signed URLs: https://cloud.google.com/storage\
/docs/access-control/signed-urls#signing-resumable
The content type of the upload will either be
- The value passed in to the function (if any)
- The value stored on the current blob
- The default value of 'application/octet-stream'
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
If :attr:`encryption_key` is set, the blob will be `encrypted`_.
.. _encrypted: https://cloud.google.com/storage/docs/\
encryption#customer-supplied
:type size: int
:param size: Optional, the maximum number of bytes that can be
uploaded using this session. If the size is not known when creating
the session, this should be left blank.
:type content_type: str
:param content_type: Optional type of content being uploaded. This can
be used to restrict the allowed file type that can be uploaded
to the size.
:type origin: str
:param origin: Optional origin. If set, the upload can only be
completed by a user-agent that uploads from the given origin. This
can be useful when passing the session to a web client.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: str
:returns: The resumable upload session URL. The upload can be
completed by making an HTTP PUT request with the file's contents.
:raises: :class:`google.cloud.exceptions.GoogleCloudError`
if the session creation response returns an error status.
"""
extra_headers = {}
if origin is not None:
# This header is specifically for client-side uploads, it
# determines the origins allowed for CORS.
extra_headers['Origin'] = origin
_, _, start_response = self._create_upload(
client,
size=size,
content_type=content_type,
strategy=RESUMABLE_UPLOAD,
extra_headers=extra_headers)
# The location header contains the session URL. This can be used
# to continue the upload.
resumable_upload_session_url = start_response.info['location']
return resumable_upload_session_url
def make_public(self, client=None):
"""Make this blob public giving all users read access.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
self.acl.all().grant_read()
self.acl.save(client=client)
def compose(self, sources, client=None):
"""Concatenate source blobs into this one.
:type sources: list of :class:`Blob`
:param sources: blobs whose contents will be composed into this blob.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :exc:`ValueError` if this blob does not have its
:attr:`content_type` set.
"""
if self.content_type is None:
raise ValueError("Destination 'content_type' not set.")
client = self._require_client(client)
request = {
'sourceObjects': [{'name': source.name} for source in sources],
'destination': self._properties.copy(),
}
api_response = client._connection.api_request(
method='POST', path=self.path + '/compose', data=request,
_target_object=self)
self._set_properties(api_response)
def rewrite(self, source, token=None, client=None):
"""Rewrite source blob into this one.
:type source: :class:`Blob`
:param source: blob whose contents will be rewritten into this blob.
:type token: str
:param token: Optional. Token returned from an earlier, not-completed
call to rewrite the same source blob. If passed,
result will include updated status, total bytes written.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: tuple
:returns: ``(token, bytes_rewritten, total_bytes)``, where ``token``
is a rewrite token (``None`` if the rewrite is complete),
``bytes_rewritten`` is the number of bytes rewritten so far,
and ``total_bytes`` is the total number of bytes to be
rewritten.
"""
client = self._require_client(client)
headers = _get_encryption_headers(self._encryption_key)
headers.update(_get_encryption_headers(
source._encryption_key, source=True))
if token:
query_params = {'rewriteToken': token}
else:
query_params = {}
api_response = client._connection.api_request(
method='POST', path=source.path + '/rewriteTo' + self.path,
query_params=query_params, data=self._properties, headers=headers,
_target_object=self)
rewritten = int(api_response['totalBytesRewritten'])
size = int(api_response['objectSize'])
# The resource key is set if and only if the API response is
# completely done. Additionally, there is no rewrite token to return
# in this case.
if api_response['done']:
self._set_properties(api_response['resource'])
return None, rewritten, size
return api_response['rewriteToken'], rewritten, size
def update_storage_class(self, new_class, client=None):
"""Update blob's storage class via a rewrite-in-place.
See:
https://cloud.google.com/storage/docs/per-object-storage-class
:type new_class: str
:param new_class: new storage class for the object
:type client: :class:`~google.cloud.storage.client.Client`
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
if new_class not in self._STORAGE_CLASSES:
raise ValueError("Invalid storage class: %s" % (new_class,))
client = self._require_client(client)
headers = _get_encryption_headers(self._encryption_key)
headers.update(_get_encryption_headers(
self._encryption_key, source=True))
api_response = client._connection.api_request(
method='POST', path=self.path + '/rewriteTo' + self.path,
data={'storageClass': new_class}, headers=headers,
_target_object=self)
self._set_properties(api_response['resource'])
cache_control = _scalar_property('cacheControl')
"""HTTP 'Cache-Control' header for this object.
See: https://tools.ietf.org/html/rfc7234#section-5.2 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_disposition = _scalar_property('contentDisposition')
"""HTTP 'Content-Disposition' header for this object.
See: https://tools.ietf.org/html/rfc6266 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_encoding = _scalar_property('contentEncoding')
"""HTTP 'Content-Encoding' header for this object.
See: https://tools.ietf.org/html/rfc7231#section-3.1.2.2 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_language = _scalar_property('contentLanguage')
"""HTTP 'Content-Language' header for this object.
See: http://tools.ietf.org/html/bcp47 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
content_type = _scalar_property('contentType')
"""HTTP 'Content-Type' header for this object.
See: https://tools.ietf.org/html/rfc2616#section-14.17 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
crc32c = _scalar_property('crc32c')
"""CRC32C checksum for this object.
See: http://tools.ietf.org/html/rfc4960#appendix-B and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
@property
def component_count(self):
"""Number of underlying components that make up this object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The component count (in case of a composed object) or
``None`` if the property is not set locally. This property
will not be set on objects not created via ``compose``.
"""
component_count = self._properties.get('componentCount')
if component_count is not None:
return int(component_count)
@property
def etag(self):
"""Retrieve the ETag for the object.
See: http://tools.ietf.org/html/rfc2616#section-3.11 and
https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The blob etag or ``None`` if the property is not set locally.
"""
return self._properties.get('etag')
@property
def generation(self):
"""Retrieve the generation for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The generation of the blob or ``None`` if the property
is not set locally.
"""
generation = self._properties.get('generation')
if generation is not None:
return int(generation)
@property
def id(self):
"""Retrieve the ID for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The ID of the blob or ``None`` if the property is not
set locally.
"""
return self._properties.get('id')
md5_hash = _scalar_property('md5Hash')
"""MD5 hash for this object.
See: http://tools.ietf.org/html/rfc4960#appendix-B and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: str or ``NoneType``
"""
@property
def media_link(self):
"""Retrieve the media download URI for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The media link for the blob or ``None`` if the property is
not set locally.
"""
return self._properties.get('mediaLink')
@property
def metadata(self):
"""Retrieve arbitrary/application specific metadata for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: dict or ``NoneType``
:returns: The metadata associated with the blob or ``None`` if the
property is not set locally.
"""
return copy.deepcopy(self._properties.get('metadata'))
@metadata.setter
def metadata(self, value):
"""Update arbitrary/application specific metadata for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:type value: dict
:param value: (Optional) The blob metadata to set.
"""
self._patch_property('metadata', value)
@property
def metageneration(self):
"""Retrieve the metageneration for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The metageneration of the blob or ``None`` if the property
is not set locally.
"""
metageneration = self._properties.get('metageneration')
if metageneration is not None:
return int(metageneration)
@property
def owner(self):
"""Retrieve info about the owner of the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: dict or ``NoneType``
:returns: Mapping of owner's role/ID. If the property is not set
locally, returns ``None``.
"""
return copy.deepcopy(self._properties.get('owner'))
@property
def self_link(self):
"""Retrieve the URI for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: str or ``NoneType``
:returns: The self link for the blob or ``None`` if the property is
not set locally.
"""
return self._properties.get('selfLink')
@property
def size(self):
"""Size of the object, in bytes.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: int or ``NoneType``
:returns: The size of the blob or ``None`` if the property
is not set locally.
"""
size = self._properties.get('size')
if size is not None:
return int(size)
@property
def storage_class(self):
"""Retrieve the storage class for the object.
See: https://cloud.google.com/storage/docs/storage-classes
:rtype: str or ``NoneType``
:returns: If set, one of "MULTI_REGIONAL", "REGIONAL",
"NEARLINE", "COLDLINE", "STANDARD", or
"DURABLE_REDUCED_AVAILABILITY", else ``None``.
"""
return self._properties.get('storageClass')
@property
def time_deleted(self):
"""Retrieve the timestamp at which the object was deleted.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the property is not set locally. If the blob has
not been deleted, this will never be set.
"""
value = self._properties.get('timeDeleted')
if value is not None:
return _rfc3339_to_datetime(value)
@property
def time_created(self):
"""Retrieve the timestamp at which the object was created.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the property is not set locally.
"""
value = self._properties.get('timeCreated')
if value is not None:
return _rfc3339_to_datetime(value)
@property
def updated(self):
"""Retrieve the timestamp at which the object was updated.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the property is not set locally.
"""
value = self._properties.get('updated')
if value is not None:
return _rfc3339_to_datetime(value)
class _UploadConfig(object):
"""Faux message FBO apitools' 'configure_request'.
Values extracted from apitools
'samples/storage_sample/storage/storage_v1_client.py'
"""
accept = ['*/*']
max_size = None
resumable_multipart = True
resumable_path = u'/resumable/upload/storage/v1/b/{bucket}/o'
simple_multipart = True
simple_path = u'/upload/storage/v1/b/{bucket}/o'
class _UrlBuilder(object):
"""Faux builder FBO apitools' 'configure_request'"""
def __init__(self, bucket_name, object_name):
self.query_params = {'name': object_name}
self._bucket_name = bucket_name
self._relative_path = ''
def _get_encryption_headers(key, source=False):
"""Builds customer encryption key headers
:type key: bytes
:param key: 32 byte key to build request key and hash.
:type source: bool
:param source: If true, return headers for the "source" blob; otherwise,
return headers for the "destination" blob.
:rtype: dict
:returns: dict of HTTP headers being sent in request.
"""
if key is None:
return {}
key = _to_bytes(key)
key_hash = hashlib.sha256(key).digest()
key_hash = base64.b64encode(key_hash).rstrip()
key = base64.b64encode(key).rstrip()
if source:
prefix = 'X-Goog-Copy-Source-Encryption-'
else:
prefix = 'X-Goog-Encryption-'
return {
prefix + 'Algorithm': 'AES256',
prefix + 'Key': _bytes_to_unicode(key),
prefix + 'Key-Sha256': _bytes_to_unicode(key_hash),
}
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import os
import re
import sys
import json
import time
import atexit
import base64
import shutil
import getpass
import hashlib
import binascii
import paramiko
import threading
import traceback
from rpaths import PosixPath
from tej import RemoteQueue, parse_ssh_destination, QueueDoesntExist, RemoteCommandFailure, JobNotFound
from tunnel import start_tunnel, check_tunnel, check_permission_denied
def simple_msg(message, *args, **kwargs):
print(message.format(*args, **kwargs), file=sys.stdout)
msg = simple_msg
def set_msg(m):
global msg
msg = m
MAIN_LOCK = threading.RLock()
DEFAULT_BASE = os.path.dirname(__file__)
DIR_ENV_DEFAULT = os.path.join(DEFAULT_BASE, "default_envs")
DIR_ENV = "envs"
DIR_SERVER = "servers"
DIR_PROJECT = "projects"
DIR_TEMP = "temp_files"
EXT = ".json"
DIR_REMOTE_TEJ = "~/.parcell"
LOCALHOST = "127.0.0.1"
DEFAULT_REGEX = "(.*)"
DEFAULT_LINE = 0
UPGRADE_ENV = []
UPGRADE_SERVER = []
UPGRADE_PROJECT = []
def upgrade(array, version):
def wrapper(func):
if len(array) != version:
raise ValueError("upgrade definition in wrong order {0} != {1}".format(len(array), version))
array.append(func)
return func
return wrapper
def _get_config_list(config, default=None, no_default=False):
if not os.path.exists(config):
if no_default:
return []
os.makedirs(config)
res = [ c[:-len(EXT)] for c in os.listdir(config) if c.endswith(EXT) ]
if default is not None and not no_default:
res += [ c[:-len(EXT)] for c in os.listdir(default) if c.endswith(EXT) ]
res = list(set(res))
return res
def get_envs(no_default=False):
return _get_config_list(DIR_ENV, DIR_ENV_DEFAULT, no_default=no_default)
def get_servers(no_default=False):
return _get_config_list(DIR_SERVER, no_default=no_default)
def get_projects(no_default=False):
return _get_config_list(DIR_PROJECT, no_default=no_default)
def _get_path(path, name):
return os.path.join(path, "{0}{1}".format(name, EXT))
def _write_json(path, obj):
with open(path, 'wb') as f:
json.dump(obj, f, indent=2, sort_keys=True)
def _rm_json(config, path):
if os.path.exists(path):
os.remove(path)
if not _get_config_list(config, no_default=True):
os.rmdir(config)
CONFIG_LOG = threading.RLock()
ALL_CONFIG = {}
CONFIG_NUM = 0
def _close_all_config():
with CONFIG_LOG:
for c in list(ALL_CONFIG.values()):
c.close()
atexit.register(_close_all_config)
class Config(object):
def __init__(self, name):
global CONFIG_NUM
if not _check_name(name):
raise ValueError("bad character '{0}' in name '{1}'".format(_get_bad_chars(name)[0], name))
self._name = name
self._chg = False
self._closed = True
self._deleted = False
with CONFIG_LOG:
self._config_num = CONFIG_NUM
CONFIG_NUM += 1
self._reopen()
def is_deleted(self):
return self._deleted
def _reopen(self):
if not self.is_closed():
return
with CONFIG_LOG:
if self.is_deleted():
return
self._obj = self._read()
self._closed = False
ALL_CONFIG[self._config_num] = self
def close(self):
with CONFIG_LOG:
if self._config_num in ALL_CONFIG:
del ALL_CONFIG[self._config_num]
if self._chg and not self.is_closed() and not self.is_deleted():
self._chg = False
self._write(self.write_object(self._obj))
self._closed = True
def is_closed(self):
return self._closed
def _get_config_path(self, config):
return _get_path(config, self._name)
def _read(self):
if self.is_deleted():
raise ValueError("server description does not exist!")
config = self.get_config_dir()
if not os.path.exists(config):
os.makedirs(config)
path = self._get_config_path(config)
is_new = False
default = self.get_default_dir()
if not os.path.exists(path) and default is not None:
path = self._get_config_path(default)
msg("{0}", path)
is_new = True
with open(path, 'rb') as f:
res = json.load(f)
res, chg = self._check_version(res)
if chg or is_new:
if not is_new:
os.rename(path, path + ".old")
self._write(res)
return self.read_object(res)
def _check_version(self, obj):
upgrade = self.get_upgrade_list()
v = int(obj.get("version", 0))
chg = False
while v < len(upgrade):
obj = upgrade[v](obj)
v += 1
obj["version"] = v
chg = True
return obj, chg
def _write(self, obj):
if self.is_deleted():
return
config = self.get_config_dir()
if not os.path.exists(config):
os.makedirs(config)
obj["version"] = len(self.get_upgrade_list())
_write_json(self._get_config_path(config), obj)
def delete_file(self):
with CONFIG_LOG:
self._deleted = True
self.close()
config = self.get_config_dir()
_rm_json(config, self._get_config_path(config))
def get_config_dir(self):
raise NotImplementedError("get_config_dir")
def get_default_dir(self):
return None
def get_upgrade_list(self):
raise NotImplementedError("get_upgrade_list")
def set_change(self, chg):
self._chg = chg
if chg:
self._reopen()
def has_change(self):
return self._chg
def read_object(self, obj):
return obj
def write_object(self, obj):
return obj
def __getitem__(self, key):
self._reopen()
return self._obj[key]
def __setitem__(self, key, value):
self._reopen()
if key not in self._obj or self._obj[key] != value:
self._obj[key] = value
self.set_change(True)
def __contains__(self, key):
self._reopen()
return key in self._obj
def get(self, key, default=None):
if key not in self:
return default
return self[key]
@property
def name(self):
return self._name
def get_obj(self, skip=None):
self._reopen()
return dict(
it for it in self._obj.items() if skip is None or it[0] not in skip
)
class EnvConfig(Config):
def __init__(self, name):
super(EnvConfig, self).__init__(name)
def get_config_dir(self):
return DIR_ENV
def get_default_dir(self):
return DIR_ENV_DEFAULT
def get_upgrade_list(self):
return UPGRADE_ENV
def read_object(self, obj):
def get(field, version):
res = []
if field in obj:
for e in obj[field]:
name = e["name"]
cmd = e["cmd"]
regex = re.compile(e.get("regex", DEFAULT_REGEX))
line = int(e.get("line", DEFAULT_LINE))
if not version:
asc = e.get("asc", True)
res.append((name, cmd, regex, line, asc))
else:
res.append((name, cmd, regex, line))
return res
return {
"versions": get("versions", True),
"vital": get("vital", False),
}
def write_object(self, obj):
def conv(e, version):
if not version:
name, cmd, regex, line, asc = e
res = {
"name": name,
"cmd": cmd,
"asc": asc,
}
else:
name, cmd, regex, line = e
res = {
"name": name,
"cmd": cmd,
}
if regex.pattern != DEFAULT_REGEX:
res["regex"] = regex.pattern
if line != DEFAULT_LINE:
res["line"] = line
return res
return {
"versions": [ conv(e, True) for e in obj["versions"] ],
"vital": [ conv(e, False) for e in obj["vital"] ],
}
@upgrade(UPGRADE_ENV, 0)
def up_e0(obj):
obj["vital"] = obj["cpus"]
del obj["cpus"]
for o in obj["vital"]:
o["asc"] = True
return obj
ALL_ENVS = {}
def get_env(e):
with MAIN_LOCK:
if e not in ALL_ENVS:
ALL_ENVS[e] = EnvConfig(e)
return ALL_ENVS[e]
SERVER_SKIP_KEYS = frozenset([
"needs_pw",
"tunnel",
"tunnel_port",
"needs_tunnel_pw",
"key",
"version",
])
class ServerConfig(Config):
def __init__(self, name):
super(ServerConfig, self).__init__(name)
def get_config_dir(self):
return DIR_SERVER
def get_upgrade_list(self):
return UPGRADE_SERVER
def read_object(self, obj):
if "password" in obj:
raise ValueError("password should not be stored in config! {0}".format(self._name))
return obj
def write_object(self, obj):
return dict((k, v) for (k, v) in obj.items() if k != "password")
def get_destination_obj(self, front):
res = self.get_obj(SERVER_SKIP_KEYS)
if front and "tunnel_port" in self:
res["hostname"] = LOCALHOST
res["port"] = self["tunnel_port"]
return res
def __setitem__(self, key, value):
chg = self.has_change()
super(ServerConfig, self).__setitem__(key, value)
if key == "password":
self.set_change(chg)
def check_key(self, hostname, key_type, key_base64, key_fp):
if hostname != self["hostname"]:
raise ValueError("mismatching hostname '{0}' != '{1}'".format(hostname, self["hostname"]))
kobj = self.get("key", {})
known_base64 = kobj.get("base64", None)
if known_base64 is None:
replay_fp = hashlib.md5(base64.decodestring(key_base64)).hexdigest()
if replay_fp != key_fp:
raise ValueError("Error encoding fingerprint of '{0}'! {1} != {2}\n{3}: {4}".format(hostname, replay_fp, key_fp, key_type, key_base64))
msg("The authenticity of host '{0}' can't be established.", hostname)
pretty_fp = ':'.join(a + b for (a, b) in zip(key_fp[::2], key_fp[1::2]))
msg("{0} key fingerprint is {1}.", key_type, pretty_fp)
if not _ask_yesno("Are you sure you want to continue connecting?"):
sys.exit(1)
self["key"] = {
"type": key_type,
"base64": key_base64,
}
# FIXME: there might be a better way
if key_type != self["key"]["type"]:
raise ValueError("mismatching key type for '{0}'. '{1}' != '{2}'".format(hostname, key_type, self["key"]["type"]))
if key_base64 != self["key"]["base64"]:
raise ValueError("mismatching {0} key for '{1}'. '{2}' != '{3}'".format(key_type, hostname, key_base64, self["key"]["base64"]))
@upgrade(UPGRADE_SERVER, 0)
def up_s0(obj):
obj["key"] = {
"type": None,
"base64": None,
}
return obj
ALL_SERVERS = {}
def get_server(s):
with MAIN_LOCK:
if s not in ALL_SERVERS:
ALL_SERVERS[s] = ServerConfig(s)
return ALL_SERVERS[s]
class ProjectConfig(Config):
def __init__(self, name):
super(ProjectConfig, self).__init__(name)
if not os.path.exists(self.path_local):
os.makedirs(self.path_local)
def get_config_dir(self):
return DIR_PROJECT
def get_upgrade_list(self):
return UPGRADE_PROJECT
def read_object(self, obj):
return {
"local": obj["local"],
"cmds": obj["cmds"],
"env": get_env(obj["env"]),
"servers": [ get_server(s) for s in obj["servers"] ],
}
def write_object(self, obj):
return {
"local": obj["local"],
"cmds": obj["cmds"],
"env": obj["env"].name,
"servers": [ s.name for s in obj["servers"] ],
}
@property
def path_local(self):
return self["local"]
@property
def commands(self):
return self["cmds"]
def remove_server(self, server):
self["servers"] = [ s for s in self["servers"] if s.name != server ]
def add_cmd(self, cmd):
cmd = cmd.strip()
if not cmd:
return
if cmd in self["cmds"] and cmd == self["cmds"][0]:
return
self["cmds"] = [ cmd ] + [ c for c in self["cmds"] if c != cmd ]
@property
def servers(self):
return dict( (s.name, s) for s in self["servers"] )
@upgrade(UPGRADE_PROJECT, 0)
def up_p0(obj):
obj["cmds"] = [ obj["cmd"] ]
del obj["cmd"]
return obj
ALL_PROJECTS = {}
def get_project(p):
with MAIN_LOCK:
if p not in ALL_PROJECTS:
ALL_PROJECTS[p] = ProjectConfig(p)
return ALL_PROJECTS[p]
def _get_tunnel_ports():
sobjs = [ get_server(n) for n in get_servers() ]
return [ int(s["tunnel_port"]) for s in sobjs if "tunnel_port" in s ]
_REUSE_PW = False
def set_password_reuse(reuse_pw):
global _REUSE_PW
_REUSE_PW = reuse_pw
_GLOBAL_PASSWORD = None
_ALL_PWS = {}
_ASK_REUSE = True
_ASK_REUSE_PRIMED = None
def ask_password(user, address):
global _GLOBAL_PASSWORD
global _ASK_REUSE
global _ASK_REUSE_PRIMED
pw_id = (user, address)
if pw_id not in _ALL_PWS:
if _ASK_REUSE_PRIMED is not None and _ask_yesno("Do you want to reuse this password for other servers"):
set_password_reuse(True)
res = _ASK_REUSE_PRIMED
_ASK_REUSE_PRIMED = None
_ASK_REUSE = False
auto = True
elif _REUSE_PW and _GLOBAL_PASSWORD is not None:
res = _GLOBAL_PASSWORD
auto = True
else:
res = _getpass("password for {0}@{1}:".format(user, address))
if _ASK_REUSE_PRIMED is not None:
_ASK_REUSE_PRIMED = None
_ASK_REUSE = False
elif _ASK_REUSE:
_ASK_REUSE_PRIMED = res
auto = False
if _REUSE_PW and _GLOBAL_PASSWORD is None:
_GLOBAL_PASSWORD = res
if auto:
msg("Password for {0}@{1} is known", user, address)
_ALL_PWS[pw_id] = res
return _ALL_PWS[pw_id]
def _setup_tunnel(server):
with MAIN_LOCK:
s = server.name
tunnel = parse_ssh_destination(server["tunnel"])
if "password" in tunnel:
raise ValueError("tunnel password should not be stored in config! {0}@{1}:{2}".format(tunnel["username"], tunnel["hostname"], tunnel["port"]))
if server.get("needs_tunnel_pw", False):
tunnel["password"] = ask_password(tunnel["username"], tunnel["hostname"])
start_tunnel(s, tunnel, server.get_destination_obj(False), server["tunnel_port"])
class LocalAddPolicy(paramiko.client.MissingHostKeyPolicy):
def __init__(self, s_obj):
self.s_obj = s_obj
super(LocalAddPolicy, self).__init__()
def missing_host_key(self, client, hostname, key):
server = self.s_obj
if "tunnel_port" in server and hostname == "[{0}]:{1}".format(LOCALHOST, server["tunnel_port"]):
hostname = server["hostname"]
server.check_key(hostname, key.get_name(), key.get_base64(), binascii.hexlify(key.get_fingerprint()))
class TunnelableRemoteQueue(RemoteQueue):
def __init__(self, *args, **kwargs):
# needs to be before actual constructor because
# _ssh_client is called from within
self.s_obj = kwargs.pop("s_obj")
super(TunnelableRemoteQueue, self).__init__(*args, **kwargs)
def _ssh_client(self):
ssh = super(TunnelableRemoteQueue, self)._ssh_client()
ssh.set_missing_host_key_policy(LocalAddPolicy(self.s_obj))
return ssh
ALL_REMOTES = {}
def get_remote(server):
with MAIN_LOCK:
s = server.name
if "tunnel" in server and not check_tunnel(s):
_setup_tunnel(server)
if s not in ALL_REMOTES:
if server.get("needs_pw", False) and "password" not in server:
raise ValueError("no password found in {0}".format(s))
remote_dir = "{0}_{1}".format(DIR_REMOTE_TEJ, s)
dest = server.get_destination_obj(True)
while s not in ALL_REMOTES:
try:
ALL_REMOTES[s] = TunnelableRemoteQueue(dest, remote_dir, s_obj=server)
except paramiko.ssh_exception.NoValidConnectionsError as e:
if e.errno is None:
if "tunnel" in server:
if check_permission_denied(s):
msg("Incorrect password for {0}.", server["tunnel"])
sys.exit(1)
if not check_tunnel(s):
msg("Error starting tunnel! Re-run with -vv for more information.")
sys.exit(1)
time.sleep(1)
else:
raise e
return ALL_REMOTES[s]
def test_connection(server, save):
s = server.name
if server.get("needs_pw", False):
server["password"] = ask_password(server["username"], server["hostname"])
msg("Checking connectivity of {0}", s)
conn = get_remote(server)
conn.check_call("hostname")
if save:
server.set_change(True)
server.close()
def init_passwords():
with MAIN_LOCK:
for s in get_servers():
test_connection(get_server(s), False)
def _check_project(name):
p = get_project(name)
for s in p["servers"]:
test_connection(s, True)
p.set_change(True)
p.close()
def list_jobs(rq):
try:
return [ ji for ji in rq.list() ]
except QueueDoesntExist:
return []
def kill_job(rq, s, j):
try:
rq.kill(j)
except (RemoteCommandFailure, JobNotFound):
pass
try:
rq.delete(j)
except JobNotFound:
pass
path = str(PosixPath(DIR_TEMP) / s / j)
if os.path.exists(path):
shutil.rmtree(path)
def remove_server(s):
with MAIN_LOCK:
msg("removing server '{0}' from projects", s)
for p in get_projects(no_default=True):
get_project(p).remove_server(s)
msg("stopping all jobs on '{0}'", s)
server = get_server(s)
test_connection(server, False)
rq = get_remote(server)
for (j, _) in list_jobs(rq):
kill_job(rq, s, j)
rpath = str(rq.queue)
msg("removing server side files '{0}'", rpath)
rq.check_call("rm -rf -- {0}".format(rpath))
msg("removing server description '{0}'", s)
server.delete_file()
def remove_all():
with MAIN_LOCK:
msg("removing all servers")
for s in get_servers(no_default=True):
remove_server(s)
msg("removing all projects")
for p in get_projects(no_default=True):
msg("removing project '{0}'", p)
get_project(p).delete_file()
msg("removing all environments")
for e in get_envs(no_default=True):
msg("removing environment '{0}'", p)
get_env(e).delete_file()
msg("Successfully removed all local and remote data!")
ALLOW_ASK = True
def allow_ask(allow):
global ALLOW_ASK
ALLOW_ASK = allow
def _getline(line):
if not ALLOW_ASK:
msg("Not allowed to use prompt! Terminating!\n{0}--", line)
sys.exit(1)
return raw_input(line).rstrip("\r\n")
def _getpass(line):
if not ALLOW_ASK:
msg("Not allowed to use prompt! Terminating!\n{0}--", line)
sys.exit(1)
return getpass.getpass(line)
def _ask(line, default=None, must=True):
line = "{0}{1}: ".format(line, '' if default is None else " ({0})".format(default))
while True:
res = _getline(line)
if res != '':
break
if default is not None:
res = default
break
if not must:
break
return res
def _ask_yesno(line):
line = "{0} (yes|no): ".format(line)
while True:
res = _getline(line)
if res == 'yes' or res == 'y':
res = True
break
if res == 'no' or res == 'n':
res = False
break
return res
def confirm_critical(line, expect):
msg("{0}", line)
res = _getline("Type '{0}' to confirm: ".format(expect))
return res == expect
PORT_LOWER = 1
PORT_UPPER = 65535
def _ask_port(line, default=None):
while True:
res = _ask(line, default)
try:
res = int(res)
if res >= PORT_LOWER and res <= PORT_UPPER:
break
except ValueError:
pass
msg("Must be integer in the range of {0}--{1}".format(PORT_LOWER, PORT_UPPER))
return res
def _ask_choice(line, of=[], special={}):
num_of = len(of)
num_special = len(special.keys())
if not num_of and not num_special:
raise ValueError("no choices!")
if num_of + num_special == 1:
return (0, of[0], True) if num_of else tuple(list(special.items()[0]) + [ False ])
while True:
msg("{0}:", line)
for (ix, o) in enumerate(of):
msg(" ({0}): {1}", ix, o)
for (k, v) in special.items():
msg(" ({0}): {1}", k, v)
res = _getline("Please select: ")
if res in special:
res = (res, special[res], False)
break
try:
res = int(res)
if res >= 0 and res < len(of):
res = (res, of[res], True)
break
except ValueError:
pass
return res
def _ask_server_list():
servers = []
while True:
opt_list = [ s for s in get_servers() if s not in servers ]
opt_cmds = {
"a": "..add new server",
"l": "..list selection",
}
if servers:
opt_cmds["d"] = "..done"
cmd, el, is_name = _ask_choice("Add server", opt_list, opt_cmds)
if is_name:
servers.append(el)
elif cmd == "a":
msg("Adding new server..")
name, okay = add_server()
if okay:
servers.append(name)
else:
msg("Creating server failed..")
elif cmd == "d":
break
elif cmd == "l":
msg("Currently selected servers:")
for s in servers:
msg(" {0}", s)
return servers
VALID_NAME_CHARS = frozenset("ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz" \
"0123456789_-+=@%:.,")
def _check_name(name):
return all(c in VALID_NAME_CHARS for c in name)
def _get_bad_chars(name):
return [ c for c in name if c not in VALID_NAME_CHARS ]
def add_project(name):
if not _check_name(name):
msg("Invalid character {0} in project name '{1}'", _get_bad_chars(name)[0], name)
return False
if name in get_projects():
msg("Project '{0}' already exists!", name)
return False
msg("Create project '{0}'.", name)
project = {
"cmds": [],
"version": 1,
}
project["local"] = _ask("Project root", default=os.path.join(DIR_PROJECT, name))
_, env, _ = _ask_choice("Environment", of=get_envs())
project["env"] = env
project["servers"] = _ask_server_list()
_write_json(_get_path(DIR_PROJECT, name), project)
msg("Checking project configuration")
_check_project(name)
msg("Successfully created project '{0}'!", name)
return True
def _infer_server_name(hostname):
if not _check_name(hostname):
return None
cur_host = hostname
name = ''
while '.' in cur_host:
dot = cur_host.index('.')
name = "{0}{1}{2}".format(name, '.' if name != '' else '', cur_host[:dot])
if name not in get_servers():
return name
cur_host = cur_host[dot+1:]
return None if hostname in get_servers() else hostname
def add_server():
hostname = _ask("Hostname")
name = _ask("Server name", default=_infer_server_name(hostname))
if not _check_name(name):
msg("Invalid character {0} in server name '{1}'", _get_bad_chars(name)[0], name)
return None, False
if name in get_servers():
msg("Server '{0}' already exists!", name)
return None, False
try:
server = {}
server["hostname"] = hostname
server["username"] = _ask("Username")
server["port"] = _ask_port("Port", default=22)
server["needs_pw"] = _ask_yesno("Is a password required?")
if _ask_yesno("Is a tunnel needed?"):
tunnel_host = _ask("Tunnel hostname")
tunnel_user = _ask("Tunnel username")
tport_final = None
while tport_final is None:
tport = 11111
blocked = set(_get_tunnel_ports())
while tport in blocked:
tport += 1
if tport > PORT_UPPER:
raise ValueError("All ports are blocked?")
tport_final = _ask_port("Unique tunnel port", default=tport)
if tport_final in blocked:
msg("Port {0} is not unique!", tport_final)
tport_final = None
server["tunnel_port"] = tport_final
tunnel_port = _ask_port("Standard tunnel port", default=22)
server["tunnel"] = "{0}@{1}{2}".format(
tunnel_user,
tunnel_host,
":{0}".format(tunnel_port) if tunnel_port != 22 else ""
)
server["needs_tunnel_pw"] = _ask_yesno("Is a tunnel password required?")
_write_json(_get_path(DIR_SERVER, name), server)
msg("Checking server configuration")
test_connection(get_server(name), True)
msg("Successfully created server '{0}'!", name)
except (KeyboardInterrupt, SystemExit):
raise
except:
msg("Error creating server {0}:\n{1}", name, traceback.format_exc())
return None, False
return name, True
| |
#!/usr/bin/env python
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Runs a side-by-side comparison of two PerfKitBenchmarker revisions.
Given a pair of revisions (e.g., 'dev', 'master') and command-line arguments,
this tool runs 'pkb.py' with for each and creates a report showing the
differences in the results between the two runs.
"""
import argparse
import collections
import contextlib
import difflib
import itertools
import json
import logging
import os
import pprint
import shlex
import shutil
import subprocess
import tempfile
import jinja2
DEFAULT_FLAGS = ('--cloud=GCP', '--machine_type=n1-standard-4',
'--benchmarks=netperf')
# Keys in the sample JSON we expect to vary between runs.
# These will be removed prior to diffing samples.
VARYING_KEYS = 'run_uri', 'sample_uri', 'timestamp', 'value'
# Template name, in same directory as this file.
TEMPLATE = 'side_by_side.html.j2'
# Thresholds for highlighting results
SMALL_CHANGE_THRESHOLD = 5
MEDIUM_CHANGE_THRESHOLD = 10
LARGE_CHANGE_THRESHOLD = 25
PerfKitBenchmarkerResult = collections.namedtuple(
'PerfKitBenchmarkerResult',
['name', 'description', 'sha1', 'samples', 'flags'])
@contextlib.contextmanager
def TempDir(delete=True, **kwargs):
"""Directory equivalent of tempfile.NamedTemporaryFile.
When used as a context manager, yields a temporary directory which by default
is removed when the context manager goes our of scope.
Example usage:
>>> with TempDir(prefix='perfkit') as td:
... shutil.copy('test.txt', td)
Args:
delete: Delete the directory on exit?
**kwargs: Passed to tempfile.mkdtemp.
Yields:
String. Path to the temporary directory.
"""
td = tempfile.mkdtemp(**kwargs)
logging.info('Created %s', td)
try:
yield td
finally:
if delete:
logging.info('Removing %s', td)
shutil.rmtree(td)
def _GitCommandPrefix():
"""Prefix for all git commands.
Returns:
list of strings; 'git' with an appropriate '--git-dir' flag.
"""
git_dir = os.path.join(os.path.dirname(__file__), '..', '..', '.git')
return ['git', '--git-dir', git_dir]
def _GitRevParse(revision):
"""Returns the output of 'git rev-parse' for 'revision'."""
output = subprocess.check_output(_GitCommandPrefix() +
['rev-parse', revision])
return output.rstrip()
def _GitDescribe(revision):
"""Returns the output of 'git describe' for 'revision'."""
output = subprocess.check_output(_GitCommandPrefix() +
['describe', '--always', revision])
return output.rstrip()
@contextlib.contextmanager
def PerfKitBenchmarkerCheckout(revision):
"""Yields a directory with PerfKitBenchmarker checked out to 'revision'."""
archive_cmd = _GitCommandPrefix() + ['archive', revision]
logging.info('Running: %s', archive_cmd)
p_archive = subprocess.Popen(archive_cmd, stdout=subprocess.PIPE)
with TempDir(prefix='pkb-test-') as td:
tar_cmd = ['tar', 'xf', '-']
logging.info('Running %s in %s', tar_cmd, td)
p_tar = subprocess.Popen(tar_cmd, stdin=p_archive.stdout, cwd=td)
archive_status = p_archive.wait()
tar_status = p_tar.wait()
if archive_status:
raise subprocess.CalledProcessError(archive_cmd, archive_status)
if tar_status:
raise subprocess.CalledProcessError(tar_status, tar_cmd)
yield td
def RunPerfKitBenchmarker(revision, flags):
"""Runs perfkitbenchmarker, returning the results as parsed JSON.
Args:
revision: string. git commit identifier. Version of PerfKitBenchmarker to
run.
flags: list of strings. Default arguments to pass to `pkb.py.`
Returns:
List of dicts. Deserialized JSON output of running PerfKitBenchmarker with
`--json_path`.
"""
sha1 = _GitRevParse(revision)
description = _GitDescribe(revision)
with PerfKitBenchmarkerCheckout(revision) as td:
with tempfile.NamedTemporaryFile(suffix='.json') as tf:
flags = flags + ['--json_path=' + tf.name]
cmd = ['./pkb.py'] + flags
logging.info('Running %s in %s', cmd, td)
subprocess.check_call(cmd, cwd=td)
samples = [json.loads(line) for line in tf]
return PerfKitBenchmarkerResult(name=revision, sha1=sha1, flags=flags,
samples=samples, description=description)
def _SplitLabels(labels):
"""Parse the 'labels' key from a PerfKitBenchmarker record.
Labels are recorded in '|key:value|,|key:value|' form.
This function transforms them to a dict.
Args:
labels: string. labels to parse.
Returns:
dict. Parsed 'labels'.
"""
result = {}
for item in labels.strip('|').split('|,|'):
k, v = item.split(':', 1)
result[k] = v
return result
def _CompareSamples(a, b, context=True, numlines=1):
"""Generate an HTML table showing differences between 'a' and 'b'.
Args:
a: dict, as output by PerfKitBenchmarker.
b: dict, as output by PerfKitBenchmarker.
context: boolean. Show context in diff? If False, all lines are output, even
those which are equal.
numlines: int. Passed to difflib.Htmldiff.make_table.
Returns:
string or None. An HTML table, or None if there are no differences.
"""
a = a.copy()
b = b.copy()
a['metadata'] = _SplitLabels(a.pop('labels', ''))
b['metadata'] = _SplitLabels(b.pop('labels', ''))
# Prune the keys in VARYING_KEYS prior to comparison to make the diff more
# informative.
for d in (a, b):
for key in VARYING_KEYS:
d.pop(key, None)
astr = pprint.pformat(a).splitlines()
bstr = pprint.pformat(b).splitlines()
if astr == bstr and context:
return None
differ = difflib.HtmlDiff()
return differ.make_table(astr, bstr, context=context, numlines=numlines)
def _MatchSamples(base_samples, head_samples):
"""Match items from base_samples with items from head_samples.
Rows are matched using 'test', 'metric', and 'unit' fields.
Args:
base_samples: List of dicts.
head_samples: List of dicts.
Returns:
List of pairs, each item of the pair containing either a dict or None.
"""
def ExtractKeys(samples):
return [(i['test'], i['metric'], i['unit']) for i in samples]
base_keys = ExtractKeys(base_samples)
head_keys = ExtractKeys(head_samples)
sm = difflib.SequenceMatcher('', base_keys, head_keys)
result = []
for opcode, base_begin, base_end, head_begin, head_end in sm.get_opcodes():
if opcode == 'equal':
result.extend(zip(base_samples[base_begin:base_end],
head_samples[head_begin:head_end]))
elif opcode == 'replace':
result.extend(zip(base_samples[base_begin:base_end],
[None] * (base_end - base_begin)))
result.extend(zip([None] * (head_end - head_begin),
head_samples[head_begin:head_end]))
elif opcode == 'delete':
result.extend(zip(base_samples[base_begin:base_end],
[None] * (base_end - base_begin)))
elif opcode == 'insert':
result.extend(zip([None] * (head_end - head_begin),
head_samples[head_begin:head_end]))
else:
raise AssertionError('Unknown op: ' + opcode)
return result
def RenderResults(base_result, head_result, template_name=TEMPLATE,
**kwargs):
"""Render the results of a comparison as an HTML page.
Args:
base_result: PerfKitBenchmarkerResult. Result of running against base
revision.
head_result: PerfKitBenchmarkerResult. Result of running against head
revision.
template_name: string. The filename of the template.
kwargs: Additional arguments to Template.render.
Returns:
String. The HTML template.
"""
def _ClassForPercentDifference(percent_diff):
"""Crude highlighting of differences between runs.
Samples varying by >25% are colored red.
Samples varying by 5-25% are colored orange.
Other samples are colored green.
Args:
percent_diff: float. percent difference between values.
"""
if percent_diff < 0:
direction = 'decrease'
else:
direction = 'increase'
percent_diff = abs(percent_diff)
if percent_diff > LARGE_CHANGE_THRESHOLD:
size = 'large'
elif percent_diff > MEDIUM_CHANGE_THRESHOLD:
size = 'medium'
elif percent_diff > SMALL_CHANGE_THRESHOLD:
size = 'small'
else:
return ''
return 'value-{0}-{1}'.format(direction, size)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
undefined=jinja2.StrictUndefined)
env.globals['class_for_percent_diff'] = _ClassForPercentDifference
env.globals['izip_longest'] = itertools.izip_longest
template = env.get_template('side_by_side.html.j2')
matched = _MatchSamples(base_result.samples,
head_result.samples)
# Generate sample diffs
sample_context_diffs = []
sample_diffs = []
for base_sample, head_sample in matched:
if not base_sample or not head_sample:
# Sample inserted or deleted.
continue
sample_context_diffs.append(
_CompareSamples(base_sample, head_sample))
sample_diffs.append(
_CompareSamples(base_sample, head_sample, context=False))
# Generate flag diffs
flag_diffs = difflib.HtmlDiff().make_table(
base_result.flags, head_result.flags, context=False)
# Used for generating a chart with differences.
matched_json = json.dumps(matched)\
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
return template.render(base=base_result,
head=head_result,
matched_samples=matched,
matched_samples_json=matched_json,
sample_diffs=sample_diffs,
sample_context_diffs=sample_context_diffs,
flag_diffs=flag_diffs,
infinity=float('inf'),
**kwargs)
def main():
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__)
p.add_argument('-t', '--title', default='PerfKitBenchmarker Comparison',
help="""HTML report title""")
p.add_argument('--base', default='master', help="""Base revision.""")
p.add_argument('--head', default='dev', help="""Head revision.""")
p.add_argument('--base-flags', default=None, help="""Flags for run against
'--base' revision. Will be combined with --flags.""",
type=shlex.split)
p.add_argument('--head-flags', default=None, help="""Flags for run against
'--head' revision. Will be combined with --flags.""",
type=shlex.split)
p.add_argument('-f', '--flags', type=shlex.split,
help="""Command line flags (Default: {0})""".format(
' '.join(DEFAULT_FLAGS)))
p.add_argument('-p', '--parallel', default=False, action='store_true',
help="""Run concurrently""")
p.add_argument('--rerender', help="""Re-render the HTML report from a JSON
file [for developers].""", action='store_true')
p.add_argument('json_output', help="""JSON output path.""")
p.add_argument('html_output', help="""HTML output path.""")
a = p.parse_args()
if (a.base_flags or a.head_flags):
if not (a.base_flags and a.head_flags):
p.error('--base-flags and --head-flags must be specified together.\n'
'\tbase flags={0}\n\thead flags={1}'.format(
a.base_flags, a.head_flags))
a.base_flags = a.base_flags + (a.flags or [])
a.head_flags = a.head_flags + (a.flags or [])
else:
# Just --flags
assert not a.base_flags, a.base_flags
assert not a.head_flags, a.head_flags
a.base_flags = a.flags or list(DEFAULT_FLAGS)
a.head_flags = a.flags or list(DEFAULT_FLAGS)
if not a.rerender:
if a.parallel:
from concurrent import futures
with futures.ThreadPoolExecutor(max_workers=2) as executor:
base_res_fut = executor.submit(RunPerfKitBenchmarker, a.base,
a.base_flags)
head_res_fut = executor.submit(RunPerfKitBenchmarker, a.head,
a.head_flags)
base_res = base_res_fut.result()
head_res = head_res_fut.result()
else:
base_res = RunPerfKitBenchmarker(a.base, a.base_flags)
head_res = RunPerfKitBenchmarker(a.head, a.head_flags)
logging.info('Base result: %s', base_res)
logging.info('Head result: %s', head_res)
with argparse.FileType('w')(a.json_output) as json_fp:
logging.info('Writing JSON to %s', a.json_output)
json.dump({'head': head_res._asdict(),
'base': base_res._asdict()},
json_fp,
indent=2)
json_fp.write('\n')
else:
logging.info('Loading results from %s', a.json_output)
with argparse.FileType('r')(a.json_output) as json_fp:
d = json.load(json_fp)
base_res = PerfKitBenchmarkerResult(**d['base'])
head_res = PerfKitBenchmarkerResult(**d['head'])
with argparse.FileType('w')(a.html_output) as html_fp:
logging.info('Writing HTML to %s', a.html_output)
html_fp.write(RenderResults(base_result=base_res,
head_result=head_res,
varying_keys=VARYING_KEYS,
title=a.title))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| |
import graphene
from saleor.product.models import DigitalContent, ProductVariant
from tests.api.utils import get_graphql_content
from tests.utils import create_image
from .utils import get_multipart_request_body
def test_fetch_all_digital_contents(
staff_api_client, variant, digital_content, permission_manage_products):
digital_content_num = DigitalContent.objects.count()
query = """
query {
digitalContents(first:1){
edges{
node{
id
contentFile
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_products])
content = get_graphql_content(response)
edges = content['data']['digitalContents']['edges']
assert len(edges) == digital_content_num
def test_fetch_single_digital_content(
staff_api_client, variant, digital_content, permission_manage_products):
query = """
query {
digitalContent(id:"%s"){
id
}
}
""" % graphene.Node.to_global_id('DigitalContent', digital_content.id)
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert 'digitalContent' in content['data']
assert 'id' in content['data']['digitalContent']
def test_digital_content_create_mutation_custom_settings(
monkeypatch, staff_api_client, variant, permission_manage_products,
media_root):
query = """
mutation createDigitalContent($variant: ID!,
$input: DigitalContentUploadInput!) {
digitalContentCreate(variantId: $variant, input: $input) {
variant {
id
}
}
}
"""
image_file, image_name = create_image()
url_valid_days = 3
max_downloads = 5
variables = {
'variant': graphene.Node.to_global_id('ProductVariant', variant.id),
'input': {
'useDefaultSettings': False,
'maxDownloads': max_downloads,
'urlValidDays': url_valid_days,
'automaticFulfillment': True,
'contentFile': image_name
}
}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products])
get_graphql_content(response)
variant.refresh_from_db()
assert variant.digital_content.content_file
assert variant.digital_content.max_downloads == max_downloads
assert variant.digital_content.url_valid_days == url_valid_days
assert variant.digital_content.automatic_fulfillment
assert not variant.digital_content.use_default_settings
def test_digital_content_create_mutation_default_settings(
monkeypatch, staff_api_client, variant, permission_manage_products,
media_root):
query = """
mutation digitalCreate($variant: ID!,
$input: DigitalContentUploadInput!) {
digitalContentCreate(variantId: $variant, input: $input) {
variant {
id
}
}
}
"""
image_file, image_name = create_image()
variables = {
'variant': graphene.Node.to_global_id('ProductVariant', variant.id),
'input': {
'useDefaultSettings': True,
'contentFile': image_name
}
}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products])
get_graphql_content(response)
variant.refresh_from_db()
assert variant.digital_content.content_file
assert variant.digital_content.use_default_settings
def test_digital_content_create_mutation_removes_old_content(
monkeypatch, staff_api_client, variant, permission_manage_products,
media_root):
query = """
mutation digitalCreate($variant: ID!,
$input: DigitalContentUploadInput!) {
digitalContentCreate(variantId: $variant, input: $input) {
variant {
id
}
}
}
"""
image_file, image_name = create_image()
d_content = DigitalContent.objects.create(
content_file=image_file, product_variant=variant,
use_default_settings=True)
variables = {
'variant': graphene.Node.to_global_id('ProductVariant', variant.id),
'input': {
'useDefaultSettings': True,
'contentFile': image_name
}
}
body = get_multipart_request_body(query, variables, image_file, image_name)
response = staff_api_client.post_multipart(
body, permissions=[permission_manage_products])
get_graphql_content(response)
variant.refresh_from_db()
assert variant.digital_content.content_file
assert variant.digital_content.use_default_settings
assert not DigitalContent.objects.filter(id=d_content.id).exists()
def test_digital_content_delete_mutation(
monkeypatch, staff_api_client, variant, digital_content,
permission_manage_products):
query = """
mutation digitalDelete($variant: ID!){
digitalContentDelete(variantId:$variant){
variant{
id
}
}
}
"""
variant.digital_content = digital_content
variant.digital_content.save()
assert hasattr(variant, 'digital_content')
variables = {
'variant': graphene.Node.to_global_id('ProductVariant', variant.id)
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
variant = ProductVariant.objects.get(id=variant.id)
assert not hasattr(variant, 'digital_content')
def test_digital_content_update_mutation(
monkeypatch, staff_api_client, variant, digital_content,
permission_manage_products):
url_valid_days = 3
max_downloads = 5
query = """
mutation digitalUpdate($variant: ID!, $input: DigitalContentInput!){
digitalContentUpdate(variantId:$variant, input: $input){
variant{
id
}
content{
contentFile
maxDownloads
urlValidDays
automaticFulfillment
}
}
}
"""
digital_content.automatic_fulfillment = False
variant.digital_content = digital_content
variant.digital_content.save()
variables = {
'variant': graphene.Node.to_global_id('ProductVariant', variant.id),
'input': {
'maxDownloads': max_downloads,
'urlValidDays': url_valid_days,
'automaticFulfillment': True,
'useDefaultSettings': False,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
variant = ProductVariant.objects.get(id=variant.id)
digital_content = variant.digital_content
assert digital_content.max_downloads == max_downloads
assert digital_content.url_valid_days == url_valid_days
assert digital_content.automatic_fulfillment
def test_digital_content_update_mutation_missing_content(
monkeypatch, staff_api_client, variant,
permission_manage_products):
url_valid_days = 3
max_downloads = 5
query = """
mutation digitalUpdate($variant: ID!, $input: DigitalContentInput!){
digitalContentUpdate(variantId:$variant, input: $input){
variant{
id
}
content{
contentFile
maxDownloads
urlValidDays
automaticFulfillment
}
errors {
field
message
}
}
}
"""
variables = {
'variant': graphene.Node.to_global_id('ProductVariant', variant.id),
'input': {
'maxDownloads': max_downloads,
'urlValidDays': url_valid_days,
'automaticFulfillment': True,
'useDefaultSettings': False,
}
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
assert content['data']['digitalContentUpdate']['errors']
errors = content['data']['digitalContentUpdate']['errors']
assert len(errors) == 1
assert errors[0]['field'] == 'variantId'
def test_digital_content_url_create(
monkeypatch, staff_api_client, variant, permission_manage_products,
digital_content):
query = """
mutation digitalContentUrlCreate($input: DigitalContentUrlCreateInput!) {
digitalContentUrlCreate(input: $input) {
digitalContentUrl {
id
url
}
errors {
field
message
}
}
}
"""
variables = {
'input': {
'content': graphene.Node.to_global_id(
'DigitalContent', digital_content.id),
}
}
assert digital_content.urls.count() == 0
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
get_graphql_content(response)
digital_content.refresh_from_db()
assert digital_content.urls.count() == 1
| |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.executor import MainThreadExecutor
from c7n.resources.rdscluster import RDSCluster, _run_cluster_method
from .common import BaseTest
class RDSClusterTest(BaseTest):
def remove_augments(self):
# This exists because we added tag augmentation after eight other tests
# were created and I did not want to re-create the state to re-record
# them with the extra API call. If those get re-recorded we can remove
# this. -scotwk
self.patch(RDSCluster, "augment", lambda x, y: y)
def test_rdscluster_security_group(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_sg_filter")
p = self.load_policy(
{
"name": "rdscluster-sg",
"resource": "rds-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DatabaseName"], "devtest")
def test_rdscluster_subnet(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_subnet")
p = self.load_policy(
{
"name": "rdscluster-sub",
"resource": "rds-cluster",
"filters": [
{"type": "subnet", "key": "MapPublicIpOnLaunch", "value": True}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DatabaseName"], "devtest")
def test_rdscluster_simple(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_simple")
p = self.load_policy(
{"name": "rdscluster-simple", "resource": "rds-cluster"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_simple_filter(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_simple")
p = self.load_policy(
{
"name": "rdscluster-simple-filter",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_delete")
p = self.load_policy(
{
"name": "rdscluster-delete",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "delete", "delete-instances": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete_with_instances(self):
self.remove_augments()
session_factory = self.replay_flight_data(
"test_rdscluster_delete_with_instances"
)
p = self.load_policy(
{
"name": "rdscluster-delete",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "delete", "delete-instances": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_retention(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_retention")
p = self.load_policy(
{
"name": "rdscluster-delete",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "retention", "days": 21}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot(self):
self.remove_augments()
session_factory = self.replay_flight_data("test_rdscluster_snapshot")
p = self.load_policy(
{
"name": "rdscluster-snapshot",
"resource": "rds-cluster",
"filters": [
{"type": "value", "key": "DBClusterIdentifier", "value": "bbb"}
],
"actions": [{"type": "snapshot"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_modify_rds_cluster(self):
session_factory = self.replay_flight_data("test_modify_rds_cluster")
p = self.load_policy(
{
"name": "modify-db-cluster",
"resource": "rds-cluster",
"filters": [{"DeletionProtection": True}],
"actions": [{
"type": "modify-db-cluster",
"attributes": {
"DeletionProtection": False}
}]
},
session_factory=session_factory, config={'account_id': '644160558196'}
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("rds")
cluster = client.describe_db_clusters(
DBClusterIdentifier='mytest')
self.assertFalse(cluster['DBClusters'][0]['DeletionProtection'])
def test_rdscluster_tag_augment(self):
session_factory = self.replay_flight_data("test_rdscluster_tag_augment")
p = self.load_policy(
{
"name": "rdscluster-tag-augment",
"resource": "rds-cluster",
"filters": [{"tag:cfoo": "cbar"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_tag_and_remove(self):
self.patch(RDSCluster, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_rdscluster_tag_and_remove")
client = session_factory().client("rds")
p = self.load_policy(
{
"name": "rds-cluster-tag",
"resource": "rds-cluster",
"filters": [{"DBClusterIdentifier": "c7ntest"}],
"actions": [{"type": "tag", "key": "xyz", "value": "hello world"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["DBClusterIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("xyz" in tag_map)
policy = self.load_policy(
{
"name": "rds-cluster-remove-tag",
"resource": "rds-cluster",
"filters": [{"tag:xyz": "not-null"}],
"actions": [{"type": "remove-tag", "tags": ["xyz"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertFalse("xyz" in tag_map)
def test_rdscluster_mark_match_unmark(self):
session_factory = self.replay_flight_data("test_rdscluster_mark_and_match")
client = session_factory().client("rds")
# mark
p = self.load_policy(
{
"name": "rds-mark",
"resource": "rds-cluster",
"filters": [{"DBClusterIdentifier": "c7ntest"}],
"actions": [
{
"type": "mark-for-op",
"tag": "custodian_next",
"days": 1,
"op": "delete",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# assert marked
arn = p.resource_manager.generate_arn(resources[0]["DBClusterIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("custodian_next" in tag_map)
# match marked
policy = self.load_policy(
{
"name": "rds-mark-filter",
"resource": "rds-cluster",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_next",
"op": "delete",
"skew": 1,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
# unmark
policy = self.load_policy(
{
"name": "rds-mark-filter",
"resource": "rds-cluster",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_next",
"op": "delete",
"skew": 1,
}
],
"actions": [{"type": "unmark", "tags": ["custodian_next"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
# assert unmarked
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertFalse("custodian_next" in tag_map)
def test_run_cluster_method(self):
output = self.capture_logging('custodian.rds-cluster')
def foobar(DBClusterIdentifier):
raise ValueError("eek")
# unspecified
self.assertRaises(
ValueError, _run_cluster_method, foobar, {'DBClusterIdentifier': 'mytest'})
# ignored
try:
_run_cluster_method(foobar, {'DBClusterIdentifier': 'mytest'}, ValueError)
except ValueError:
self.fail("Shouldn't raise")
finally:
self.assertEqual(output.getvalue(), "")
# warn
try:
_run_cluster_method(
foobar, {'DBClusterIdentifier': 'mytest'}, warn=(ValueError, KeyError))
except ValueError:
self.fail("Shouldn't raise")
finally:
self.assertTrue("eek" in output.getvalue())
def test_stop(self):
factory = self.replay_flight_data("test_rdscluster_stop")
p = self.load_policy(
{"name": "rdscluster",
"resource": "rds-cluster",
"filters": [{'DBClusterIdentifier': 'mytest'}],
'actions': ['stop']},
session_factory=factory, config={'account_id': '644160558196'})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Status'], 'available')
client = factory().client('rds')
cluster = client.describe_db_clusters(
DBClusterIdentifier='mytest').get('DBClusters')[0]
self.assertEqual(cluster['Status'], 'stopping')
def test_start(self):
factory = self.replay_flight_data("test_rdscluster_start")
p = self.load_policy(
{"name": "rdscluster",
"resource": "rds-cluster",
"filters": [{'DBClusterIdentifier': 'mytest'}],
'actions': ['start']},
session_factory=factory, config={'account_id': '644160558196'})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Status'], 'stopped')
client = factory().client('rds')
cluster = client.describe_db_clusters(
DBClusterIdentifier='mytest').get('DBClusters')[0]
self.assertEqual(cluster['Status'], 'starting')
class RDSClusterSnapshotTest(BaseTest):
def test_rdscluster_snapshot_simple(self):
session_factory = self.replay_flight_data("test_rdscluster_snapshot_simple")
p = self.load_policy(
{"name": "rdscluster-snapshot-simple", "resource": "rds-cluster-snapshot"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_simple_filter(self):
session_factory = self.replay_flight_data("test_rdscluster_snapshot_simple")
p = self.load_policy(
{
"name": "rdscluster-snapshot-simple-filter",
"resource": "rds-cluster-snapshot",
"filters": [
{"type": "value", "key": "StorageEncrypted", "value": False}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot_age_filter(self):
factory = self.replay_flight_data("test_rdscluster_snapshot_simple")
p = self.load_policy(
{
"name": "rdscluster-snapshot-age-filter",
"resource": "rds-cluster-snapshot",
"filters": [{"type": "age", "days": 7}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_trim(self):
factory = self.replay_flight_data("test_rdscluster_snapshot_delete")
p = self.load_policy(
{
"name": "rdscluster-snapshot-trim",
"resource": "rds-cluster-snapshot",
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
| |
# -*- coding: utf-8 -*-
"""
sync_wikipedia.py
superlachaise_api
Created by Maxime Le Moine on 09/06/2015.
Copyright (c) 2015 Maxime Le Moine.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http:www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json, os, re, requests, sys, traceback
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone, translation
from django.utils.translation import ugettext as _
from HTMLParser import HTMLParser
from superlachaise_api.models import *
def print_unicode(str):
print str.encode('utf-8')
def none_to_blank(s):
if s is None:
return u''
return unicode(s)
class WikipediaIntroHTMLParser(HTMLParser):
def __init__(self, language_code):
self.reset()
self.language_code = language_code
self.result = []
self.opened_tags = [{'tag': 'root', 'attrs': [], 'data': False, 'content': self.result}]
self.current_content = self.result
self.data = False
def can_read_data(self):
if len(self.opened_tags) > 1 and self.opened_tags[1]['tag'] == 'div':
return False
for opened_tag in self.opened_tags:
if opened_tag['tag'] == 'table':
return False
if opened_tag['tag'] == 'ref':
return False
if opened_tag['tag'] == 'ol':
for attr in opened_tag['attrs']:
if attr[0] in ['id', 'class']:
return False
if opened_tag['tag'] == 'ul':
for attr in opened_tag['attrs']:
if attr[0] in ['id', 'class']:
return False
if opened_tag['tag'] == 'strong':
for attr in opened_tag['attrs']:
if attr[0] == 'class' and 'error' in attr[1]:
return False
if opened_tag['tag'] == 'sup':
for attr in opened_tag['attrs']:
if attr[0] in ['id', 'class']:
return False
if opened_tag['tag'] == 'span':
for attr in opened_tag['attrs']:
if attr[0] == 'id' or (attr[0] == 'class' and attr[1] in ['noprint', 'unicode haudio']):
return False
if opened_tag['tag'] == 'small':
for attr in opened_tag['attrs']:
if attr[0] == 'id' or (attr[0] == 'class' and 'metadata' in attr[1]):
return False
if opened_tag['tag'] == 'li':
for attr in opened_tag['attrs']:
if attr[0] in ['id', 'class']:
return False
for attr in opened_tag['attrs']:
if attr[0] == 'style' and 'display:none' in attr[1]:
return False
return True
def handle_data(self, data):
if self.can_read_data():
self.current_content.append(data)
self.opened_tags[-1]['data'] = True
def handle_entityref(self, name):
if self.can_read_data():
self.current_content.append('&'+name+';')
self.opened_tags[-1]['data'] = True
def handle_charref(self, name):
if self.can_read_data():
self.current_content.append('&#'+name+';')
self.opened_tags[-1]['data'] = True
def handle_starttag(self, tag, attrs):
self.current_content = []
self.opened_tags.append({'tag': tag, 'attrs': attrs, 'data': False, 'content': self.current_content})
if self.can_read_data():
self.current_content.append('<%s' % tag)
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
if attr[1].startswith('/wiki/') or attr[1].startswith('/w/'):
self.current_content.append(' href="https://{language_code}.wikipedia.org{link}"'.format(language_code=self.language_code, link=attr[1]))
elif attr[1].startswith('//'):
self.current_content.append(' href="http:{link}"'.format(link=attr[1]))
self.current_content.append('>')
def handle_endtag(self, tag):
if self.can_read_data():
self.current_content.append('</%s>' % tag)
if self.can_read_data() and (self.opened_tags[-1]['data'] or self.opened_tags[-1]['tag'] == 'a'):
self.opened_tags[-2]['content'].append(''.join(self.current_content))
self.opened_tags[-2]['data'] = True
else:
# Delete last whitespace if any
content = self.opened_tags[-2]['content']
while isinstance(content, list):
if len(content) > 0:
if not isinstance(content[-1], list) and content[-1] in [u' ', u' ']:
del content[-1]
if len(content) < 2:
self.opened_tags[-2]['data'] = False
break
content = content[-1]
else:
content = None
self.opened_tags = self.opened_tags[:-1]
self.current_content = self.opened_tags[-1]['content']
def get_data(self):
return ''.join(self.result).strip()
class Command(BaseCommand):
def request_wikipedia_pages(self, language_code, wikipedia_titles):
pages = {}
last_continue = {
'continue': '',
}
titles = '|'.join(wikipedia_titles).encode('utf8')
while True:
# Request properties
params = {
'action': 'query',
'prop': 'revisions',
'rvprop': 'content',
'format': 'json',
'titles': titles,
}
params.update(last_continue)
if settings.MEDIAWIKI_USER_AGENT:
headers = {"User-Agent" : settings.MEDIAWIKI_USER_AGENT}
else:
raise 'no USER_AGENT defined in settings.py'
json_result = requests.get('https://%s.wikipedia.org/w/api.php' % (language_code), params=params, headers=headers).json()
if 'pages' in json_result['query']:
for page in json_result['query']['pages'].values():
pages[page['title']] = page
if 'continue' not in json_result: break
last_continue = json_result['continue']
return pages
def request_wikipedia_pre_section(self, language_code, title):
# Request properties
params = {
'action': 'parse',
'prop': 'text',
'section': '0',
'format': 'json',
'page': title.encode('utf8'),
}
if settings.MEDIAWIKI_USER_AGENT:
headers = {"User-Agent" : settings.MEDIAWIKI_USER_AGENT}
else:
raise 'no USER_AGENT defined in settings.py'
json_result = requests.get('https://%s.wikipedia.org/w/api.php' % (language_code), params=params, headers=headers).json()
return json_result['parse']['text']['*']
def get_wikipedia_intro(self, language_code, title):
# Get wikipedia pre-section (intro)
pre_section = self.request_wikipedia_pre_section(language_code, title)
# Process HTML
parser = WikipediaIntroHTMLParser(language_code)
parser.feed(pre_section)
return none_to_blank(parser.get_data())
def get_default_sort(self, page):
try:
if len(page['revisions']) != 1:
raise BaseException
wikitext = page['revisions'][0]['*']
default_sort = u''
for line in wikitext.split('\n'):
match_obj = re.search(r'^{{DEFAULTSORT:(.*)}}$', line)
if match_obj:
default_sort = match_obj.group(1).strip()
break
match_obj = re.search(r'^{{CLEDETRI:(.*)}}$', line)
if match_obj:
default_sort = match_obj.group(1).strip()
break
return default_sort
except:
return u''
def hande_wikidata_localized_entry(self, wikidata_localized_entry):
# Get values
values_dict = {
'title': wikidata_localized_entry.wikipedia,
'intro': self.get_wikipedia_intro(wikidata_localized_entry.language.code, wikidata_localized_entry.wikipedia),
}
# Get or create object in database
target_object_id_dict = {"wikidata_localized_entry": wikidata_localized_entry}
wikipedia_page, created = WikipediaPage.objects.get_or_create(**target_object_id_dict)
self.fetched_objects_pks.append(wikipedia_page.pk)
modified = False
if wikidata_localized_entry.language.code in self.default_sort and wikidata_localized_entry.wikipedia in self.default_sort[wikidata_localized_entry.language.code]:
values_dict['default_sort'] = self.default_sort[wikidata_localized_entry.language.code][wikidata_localized_entry.wikipedia]
else:
values_dict['default_sort'] = u''
if created:
self.created_objects = self.created_objects + 1
else:
# Search for modifications
for field, value in values_dict.iteritems():
if value != getattr(wikipedia_page, field):
modified = True
self.modified_objects = self.modified_objects + 1
break
if created or modified:
for field, value in values_dict.iteritems():
setattr(wikipedia_page, field, value)
wikipedia_page.save()
def sync_wikipedia(self, wikidata_localized_entry_ids):
if wikidata_localized_entry_ids:
wikidata_localized_entries = WikidataLocalizedEntry.objects.filter(id__in=wikidata_localized_entry_ids.split('|')).exclude(wikipedia__exact='')
else:
wikidata_localized_entries = WikidataLocalizedEntry.objects.exclude(wikipedia__exact='')
print_unicode(_('Requesting Wikipedia revisions...'))
self.default_sort = {}
total = len(wikidata_localized_entries)
count = 0
max_count_per_request = 25
for language in Language.objects.all():
self.default_sort[language.code] = {}
wikipedia_titles = wikidata_localized_entries.filter(language=language).values_list('wikipedia', flat=True)
for chunk in [wikipedia_titles[i:i+max_count_per_request] for i in range(0,len(wikipedia_titles),max_count_per_request)]:
print_unicode(str(count) + u'/' + str(total))
count += len(chunk)
pages_result = self.request_wikipedia_pages(language.code, chunk)
for title, page in pages_result.iteritems():
self.default_sort[language.code][title] = self.get_default_sort(page)
print_unicode(str(count) + u'/' + str(total))
print_unicode(_('Requesting Wikipedia page content...'))
total = len(wikidata_localized_entries)
count = 0
max_count_per_request = 25
self.fetched_objects_pks = []
for chunk in [wikidata_localized_entries[i:i+max_count_per_request] for i in range(0,len(wikidata_localized_entries),max_count_per_request)]:
print_unicode(str(count) + u'/' + str(total))
count += len(chunk)
for wikidata_localized_entry in chunk:
self.hande_wikidata_localized_entry(wikidata_localized_entry)
print_unicode(str(count) + u'/' + str(total))
if not wikidata_localized_entry_ids:
# Look for deleted elements
for wikipedia_page in WikipediaPage.objects.exclude(pk__in=self.fetched_objects_pks):
self.deleted_objects = self.deleted_objects + 1
wikipedia_page.delete()
def add_arguments(self, parser):
parser.add_argument('--wikidata_localized_entry_ids',
action='store',
dest='wikidata_localized_entry_ids')
def handle(self, *args, **options):
try:
self.synchronization = Synchronization.objects.get(name=os.path.basename(__file__).split('.')[0].split('sync_')[-1])
except:
raise CommandError(sys.exc_info()[1])
error = None
try:
translation.activate(settings.LANGUAGE_CODE)
self.created_objects = 0
self.modified_objects = 0
self.deleted_objects = 0
self.errors = []
print_unicode(_('== Start %s ==') % self.synchronization.name)
self.sync_wikipedia(options['wikidata_localized_entry_ids'])
print_unicode(_('== End %s ==') % self.synchronization.name)
self.synchronization.created_objects = self.created_objects
self.synchronization.modified_objects = self.modified_objects
self.synchronization.deleted_objects = self.deleted_objects
self.synchronization.errors = ', '.join(self.errors)
translation.deactivate()
except:
print_unicode(traceback.format_exc())
error = sys.exc_info()[1]
self.synchronization.errors = traceback.format_exc()
self.synchronization.last_executed = timezone.now()
self.synchronization.save()
if error:
raise CommandError(error)
| |
"""This module encapsulates billing logic and db access.
There are three pieces of information for each participant related to billing:
balanced_account_uri NULL - This participant has never been billed.
'deadbeef' - This participant has had a Balanced
account created for them, either by adding a
credit card or a bank account.
last_bill_result NULL - This participant has not had their credit
card charged yet.
'' - This participant has a working card.
<message> - An error message.
last_ach_result NULL - This participant has not wired up a bank
account yet.
'' - This participant has a working bank account.
<message> - An error message.
"""
from __future__ import unicode_literals
import balanced
import stripe
from aspen.utils import typecheck
from gittip import db
def get_balanced_account(participant_id, balanced_account_uri):
"""Find or create a balanced.Account.
"""
typecheck( participant_id, unicode
, balanced_account_uri, (unicode, None)
)
# XXX Balanced requires an email address
# https://github.com/balanced/balanced-api/issues/20
email_address = '{}@gittip.com'.format(participant_id)
if balanced_account_uri is None:
try:
account = \
balanced.Account.query.filter(email_address=email_address).one()
except balanced.exc.NoResultFound:
account = balanced.Account(email_address=email_address).save()
BALANCED_ACCOUNT = """\
UPDATE participants
SET balanced_account_uri=%s
WHERE id=%s
"""
db.execute(BALANCED_ACCOUNT, (account.uri, participant_id))
account.meta['participant_id'] = participant_id
account.save() # HTTP call under here
else:
account = balanced.Account.find(balanced_account_uri)
return account
def associate(thing, participant_id, balanced_account_uri, balanced_thing_uri):
"""Given four unicodes, return a unicode.
This function attempts to associate the credit card or bank account details
referenced by balanced_thing_uri with a Balanced Account. If it fails we
log and return a unicode describing the failure. Even for failure we keep
balanced_account_uri; we don't reset it to None/NULL. It's useful for
loading the previous (bad) info from Balanced in order to prepopulate the
form.
"""
typecheck( participant_id, unicode
, balanced_account_uri, (unicode, None, balanced.Account)
, balanced_thing_uri, unicode
, thing, unicode
)
if isinstance(balanced_account_uri, balanced.Account):
balanced_account = balanced_account_uri
else:
balanced_account = get_balanced_account( participant_id
, balanced_account_uri
)
SQL = "UPDATE participants SET last_%s_result=%%s WHERE id=%%s"
if thing == "credit card":
add = balanced_account.add_card
SQL %= "bill"
else:
assert thing == "bank account", thing # sanity check
add = balanced_account.add_bank_account
SQL %= "ach"
try:
add(balanced_thing_uri)
except balanced.exc.HTTPError as err:
error = err.message.decode('UTF-8') # XXX UTF-8?
else:
error = ''
typecheck(error, unicode)
db.execute(SQL, (error, participant_id))
return error
def clear(thing, participant_id, balanced_account_uri):
typecheck( thing, unicode
, participant_id, unicode
, balanced_account_uri, unicode
)
assert thing in ("credit card", "bank account"), thing
# XXX Things in balanced cannot be deleted at the moment.
# =======================================================
# Instead we mark all valid cards as invalid which will restrict against
# anyone being able to issue charges against them in the future.
#
# See: https://github.com/balanced/balanced-api/issues/22
account = balanced.Account.find(balanced_account_uri)
things = account.cards if thing == "credit card" else account.bank_accounts
for _thing in things:
if _thing.is_valid:
_thing.is_valid = False
_thing.save()
CLEAR = """\
UPDATE participants
SET last_%s_result=NULL
WHERE id=%%s
""" % ("bill" if thing == "credit card" else "ach")
db.execute(CLEAR, (participant_id,))
def store_error(thing, participant_id, msg):
typecheck(thing, unicode, participant_id, unicode, msg, unicode)
assert thing in ("credit card", "bank account"), thing
ERROR = """\
UPDATE participants
SET last_%s_result=%%s
WHERE id=%%s
""" % ("bill" if thing == "credit card" else "ach")
db.execute(ERROR, (msg, participant_id))
# Card
# ====
# While we're migrating data we need to support loading data from both Stripe
# and Balanced.
class StripeCard(object):
"""This is a dict-like wrapper around a Stripe PaymentMethod.
"""
_customer = None # underlying stripe.Customer object
def __init__(self, stripe_customer_id):
"""Given a Stripe customer id, load data from Stripe.
"""
if stripe_customer_id is not None:
self._customer = stripe.Customer.retrieve(stripe_customer_id)
def _get(self, name, default=""):
"""Given a name, return a string.
"""
out = ""
if self._customer is not None:
out = self._customer.get('active_card', {}).get(name, "")
if out is None:
out = default
return out
def __getitem__(self, name):
"""Given a name, return a string.
"""
if name == 'id':
out = self._customer.id if self._customer is not None else None
elif name == 'last4':
out = self._get('last4')
if out:
out = "************" + out
elif name == 'expiry':
month = self._get('expiry_month')
year = self._get('expiry_year')
if month and year:
out = "%d/%d" % (month, year)
else:
out = ""
else:
name = { 'address_1': 'address_line1'
, 'address_2': 'address_line2'
, 'state': 'address_state'
, 'zip': 'address_zip'
}.get(name, name)
out = self._get(name)
return out
class BalancedCard(object):
"""This is a dict-like wrapper around a Balanced Account.
"""
_account = None # underlying balanced.Account object
def __init__(self, balanced_account_uri):
"""Given a Balanced account_uri, load data from Balanced.
"""
if balanced_account_uri is not None:
self._account = balanced.Account.find(balanced_account_uri)
def _get_card(self):
"""Return the most recent card on file for this account.
"""
# XXX Indexing is borken. See:
# https://github.com/balanced/balanced-python/issues/10
return self._account.cards.all()[-1]
def _get(self, name, default=""):
"""Given a name, return a unicode.
"""
out = None
if self._account is not None:
try:
card = self._get_card()
out = getattr(card, name, None)
except IndexError: # no cards associated
pass
if out is None:
out = default
return out
def __getitem__(self, name):
"""Given a name, return a string.
"""
if name == 'id':
out = self._account.uri if self._account is not None else None
elif name == 'last4':
out = self._get('last_four')
if out:
out = "************" + unicode(out)
elif name == 'expiry':
month = self._get('expiration_month')
year = self._get('expiration_year')
if month and year:
out = "%d/%d" % (month, year)
else:
out = ""
elif name == 'address_2':
out = self._get('meta', {}).get('address_2', '')
elif name == 'state':
out = self._get('region')
if not out:
# There's a bug in balanced where the region does get persisted
# but doesn't make it back out. This is a workaround until such
# time as that's fixed.
out = self._get('meta', {}).get('region', '')
else:
name = { 'address_1': 'street_address'
, 'zip': 'postal_code'
}.get(name, name)
out = self._get(name)
return out
class BalancedBankAccount(object):
"""This is a dict-like wrapper around a Balanced Account.
"""
_account = None # underlying balanced.Account object
_bank_account = None
def __init__(self, balanced_account_uri):
"""Given a Balanced account_uri, load data from Balanced.
"""
if not balanced_account_uri:
return
self._account = balanced.Account.find(balanced_account_uri)
all_accounts = self._account.bank_accounts.all()
valid_accounts = [a for a in all_accounts if a.is_valid]
nvalid = len(valid_accounts)
if nvalid == 0:
self._bank_account = None
elif nvalid == 1:
self._bank_account = valid_accounts[0]
else:
msg = "%s has %d valid accounts"
msg %= (balanced_account_uri, len(valid_accounts))
raise RuntimeError(msg)
def __getitem__(self, item):
mapper = {
'id': 'uri',
'account_uri': 'account.uri',
'bank_name': 'bank_name',
'last_four': 'last_four',
}
if item not in mapper:
raise IndexError()
if not self._bank_account:
return None
# account.uri will become:
# tiem = getattr(self._bank_account, 'account')
# tiem = getattr(tiem, 'uri')
tiem = self._bank_account
for vals in mapper[item].split('.'):
tiem = getattr(tiem, vals)
return tiem
@property
def is_setup(self):
return self._bank_account is not None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.