code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
import array
import copy
import hashlib
import logging
import os
import pickle
import zlib
from google.appengine.api import users
from google.appengine.ext import db
def DerivedProperty(func=None, *args, **kwargs):
"""Implements a 'derived' datastore property.
Derived properties are not set directly, but are instead generated by a
function when required. They are useful to provide fields in the datastore
that can be used for filtering or sorting in ways that are not otherwise
possible with unmodified data - for example, filtering by the length of a
BlobProperty, or case insensitive matching by querying the lower cased version
of a string.
DerivedProperty can be declared as a regular property, passing a function as
the first argument, or it can be used as a decorator for the function that
does the calculation, either with or without arguments.
Example:
>>> class DatastoreFile(db.Model):
... name = db.StringProperty(required=True)
... name_lower = DerivedProperty(lambda self: self.name.lower())
...
... data = db.BlobProperty(required=True)
... @DerivedProperty
... def size(self):
... return len(self.data)
...
... @DerivedProperty(name='sha1')
... def hash(self):
... return hashlib.sha1(self.data).hexdigest()
You can read derived properties the same way you would regular ones:
>>> file = DatastoreFile(name='Test.txt', data='Hello, world!')
>>> file.name_lower
'test.txt'
>>> file.hash
'943a702d06f34599aee1f8da8ef9f7296031d699'
Attempting to set a derived property will throw an error:
>>> file.name_lower = 'foobar'
Traceback (most recent call last):
...
DerivedPropertyError: Cannot assign to a DerivedProperty
When persisted, derived properties are stored to the datastore, and can be
filtered on and sorted by:
>>> file.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'DatastoreFile', ...)
>>> DatastoreFile.all().filter('size =', 13).get().name
u'Test.txt'
"""
if func:
# Regular invocation, or used as a decorator without arguments
return _DerivedProperty(func, *args, **kwargs)
else:
# We're being called as a decorator with arguments
def decorate(decorated_func):
return _DerivedProperty(decorated_func, *args, **kwargs)
return decorate
class _DerivedProperty(db.Property):
def __init__(self, derive_func, *args, **kwargs):
"""Constructor.
Args:
func: A function that takes one argument, the model instance, and
returns a calculated value.
"""
super(_DerivedProperty, self).__init__(*args, **kwargs)
self.derive_func = derive_func
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
return self.derive_func(model_instance)
def __set__(self, model_instance, value):
raise db.DerivedPropertyError("Cannot assign to a DerivedProperty")
class LowerCaseProperty(_DerivedProperty):
"""A convenience class for generating lower-cased fields for filtering.
Example usage:
>>> class Pet(db.Model):
... name = db.StringProperty(required=True)
... name_lower = LowerCaseProperty(name)
>>> pet = Pet(name='Fido')
>>> pet.name_lower
'fido'
"""
def __init__(self, property, *args, **kwargs):
"""Constructor.
Args:
property: The property to lower-case.
"""
super(LowerCaseProperty, self).__init__(
lambda self: property.__get__(self, type(self)).lower(),
*args, **kwargs)
class LengthProperty(_DerivedProperty):
"""A convenience class for recording the length of another field
Example usage:
>>> class TagList(db.Model):
... tags = db.ListProperty(unicode, required=True)
... num_tags = LengthProperty(tags)
>>> tags = TagList(tags=[u'cool', u'zany'])
>>> tags.num_tags
2
"""
def __init__(self, property, *args, **kwargs):
"""Constructor.
Args:
property: The property to lower-case.
"""
super(LengthProperty, self).__init__(
lambda self: len(property.__get__(self, type(self))),
*args, **kwargs)
def TransformProperty(source, transform_func=None, *args, **kwargs):
"""Implements a 'transform' datastore property.
TransformProperties are similar to DerivedProperties, but with two main
differences:
- Instead of acting on the whole model, the transform function is passed the
current value of a single property which was specified in the constructor.
- Property values are calculated when the property being derived from is set,
not when the TransformProperty is fetched. This is more efficient for
properties that have significant expense to calculate.
TransformProperty can be declared as a regular property, passing the property
to operate on and a function as the first arguments, or it can be used as a
decorator for the function that does the calculation, with the property to
operate on passed as an argument.
Example:
>>> class DatastoreFile(db.Model):
... name = db.StringProperty(required=True)
...
... data = db.BlobProperty(required=True)
... size = TransformProperty(data, len)
...
... @TransformProperty(data)
... def hash(val):
... return hashlib.sha1(val).hexdigest()
You can read transform properties the same way you would regular ones:
>>> file = DatastoreFile(name='Test.txt', data='Hello, world!')
>>> file.size
13
>>> file.data
'Hello, world!'
>>> file.hash
'943a702d06f34599aee1f8da8ef9f7296031d699'
Updating the property being transformed automatically updates any
TransformProperties depending on it:
>>> file.data = 'Fubar'
>>> file.data
'Fubar'
>>> file.size
5
>>> file.hash
'df5fc9389a7567ddae2dd29267421c05049a6d31'
Attempting to set a transform property directly will throw an error:
>>> file.size = 123
Traceback (most recent call last):
...
DerivedPropertyError: Cannot assign to a TransformProperty
When persisted, transform properties are stored to the datastore, and can be
filtered on and sorted by:
>>> file.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'DatastoreFile', ...)
>>> DatastoreFile.all().filter('size =', 13).get().hash
'943a702d06f34599aee1f8da8ef9f7296031d699'
"""
if transform_func:
# Regular invocation
return _TransformProperty(source, transform_func, *args, **kwargs)
else:
# We're being called as a decorator with arguments
def decorate(decorated_func):
return _TransformProperty(source, decorated_func, *args, **kwargs)
return decorate
class _TransformProperty(db.Property):
def __init__(self, source, transform_func, *args, **kwargs):
"""Constructor.
Args:
source: The property the transformation acts on.
transform_func: A function that takes the value of source and transforms
it in some way.
"""
super(_TransformProperty, self).__init__(*args, **kwargs)
self.source = source
self.transform_func = transform_func
def __orig_attr_name(self):
return '_ORIGINAL' + self._attr_name()
def __transformed_attr_name(self):
return self._attr_name()
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
last_val = getattr(model_instance, self.__orig_attr_name(), None)
current_val = self.source.__get__(model_instance, model_class)
if last_val == current_val:
return getattr(model_instance, self.__transformed_attr_name())
transformed_val = self.transform_func(current_val)
setattr(model_instance, self.__orig_attr_name(), current_val)
setattr(model_instance, self.__transformed_attr_name(), transformed_val)
return transformed_val
def __set__(self, model_instance, value):
raise db.DerivedPropertyError("Cannot assign to a TransformProperty")
class KeyProperty(db.Property):
"""A property that stores a key, without automatically dereferencing it.
Example usage:
>>> class SampleModel(db.Model):
... sample_key = KeyProperty()
>>> model = SampleModel()
>>> model.sample_key = db.Key.from_path("Foo", "bar")
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'SampleModel', ...)
>>> model.sample_key # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'Foo', u'bar', ...)
"""
def validate(self, value):
"""Validate the value.
Args:
value: The value to validate.
Returns:
A valid key.
"""
if isinstance(value, basestring):
value = db.Key(value)
if value is not None:
if not isinstance(value, db.Key):
raise TypeError("Property %s must be an instance of db.Key"
% (self.name,))
return super(KeyProperty, self).validate(value)
class PickleProperty(db.Property):
"""A property for storing complex objects in the datastore in pickled form.
Example usage:
>>> class PickleModel(db.Model):
... data = PickleProperty()
>>> model = PickleModel()
>>> model.data = {"foo": "bar"}
>>> model.data
{'foo': 'bar'}
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'PickleModel', ...)
>>> model2 = PickleModel.all().get()
>>> model2.data
{'foo': 'bar'}
"""
data_type = db.Blob
def get_value_for_datastore(self, model_instance):
value = self.__get__(model_instance, model_instance.__class__)
if value is not None:
return db.Blob(pickle.dumps(value))
def make_value_from_datastore(self, value):
if value is not None:
return pickle.loads(str(value))
def default_value(self):
"""If possible, copy the value passed in the default= keyword argument.
This prevents mutable objects such as dictionaries from being shared across
instances."""
return copy.copy(self.default)
class SetProperty(db.ListProperty):
"""A property that stores a set of things.
This is a parameterized property; the parameter must be a valid
non-list data type, and all items must conform to this type.
Example usage:
>>> class SetModel(db.Model):
... a_set = SetProperty(int)
>>> model = SetModel()
>>> model.a_set = set([1, 2, 3])
>>> model.a_set
set([1, 2, 3])
>>> model.a_set.add(4)
>>> model.a_set
set([1, 2, 3, 4])
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'SetModel', ...)
>>> model2 = SetModel.all().get()
>>> model2.a_set
set([1L, 2L, 3L, 4L])
"""
def validate(self, value):
value = db.Property.validate(self, value)
if value is not None:
if not isinstance(value, (set, frozenset)):
raise db.BadValueError('Property %s must be a set' % self.name)
value = self.validate_list_contents(value)
return value
def default_value(self):
return set(db.Property.default_value(self))
def get_value_for_datastore(self, model_instance):
return list(super(SetProperty, self).get_value_for_datastore(model_instance))
def make_value_from_datastore(self, value):
if value is not None:
return set(super(SetProperty, self).make_value_from_datastore(value))
def get_form_field(self, **kwargs):
from django import newforms as forms
defaults = {'widget': forms.Textarea,
'initial': ''}
defaults.update(kwargs)
return super(SetProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
value = super(SetProperty, self).get_value_for_form(instance)
if not value:
return None
if isinstance(value, set):
value = '\n'.join(value)
return value
def make_value_from_form(self, value):
if not value:
return []
if isinstance(value, basestring):
value = value.splitlines()
return set(value)
class InvalidDomainError(Exception):
"""Raised when something attempts to access data belonging to another domain."""
class CurrentDomainProperty(db.Property):
"""A property that restricts access to the current domain.
Example usage:
>>> class DomainModel(db.Model):
... domain = CurrentDomainProperty()
>>> os.environ['HTTP_HOST'] = 'domain1'
>>> model = DomainModel()
The domain is set automatically:
>>> model.domain
u'domain1'
You cannot change the domain:
>>> model.domain = 'domain2' # doctest: +ELLIPSIS
Traceback (most recent call last):
...
InvalidDomainError: Domain 'domain1' attempting to illegally access data for domain 'domain2'
>>> key = model.put()
>>> model = DomainModel.get(key)
>>> model.domain
u'domain1'
You cannot write the data from another domain:
>>> os.environ['HTTP_HOST'] = 'domain2'
>>> model.put() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
InvalidDomainError: Domain 'domain2' attempting to allegally modify data for domain 'domain1'
Nor can you read it:
>>> DomainModel.get(key) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
InvalidDomainError: Domain 'domain2' attempting to illegally access data for domain 'domain1'
Admin users can read and write data for other domains:
>>> os.environ['USER_IS_ADMIN'] = '1'
>>> model = DomainModel.get(key)
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'DomainModel', ...)
You can also define models that should permit read or write access from
other domains:
>>> os.environ['USER_IS_ADMIN'] = '0'
>>> class DomainModel2(db.Model):
... domain = CurrentDomainProperty(allow_read=True, allow_write=True)
>>> model = DomainModel2()
>>> model.domain
u'domain2'
>>> key = model.put()
>>> os.environ['HTTP_HOST'] = 'domain3'
>>> model = DomainModel2.get(key)
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'DomainModel2', ...)
"""
def __init__(self, allow_read=False, allow_write=False, *args, **kwargs):
"""Constructor.
Args:
allow_read: If True, allow entities with this property to be read, but not
written, from other domains.
allow_write: If True, allow entities with this property to be modified
from other domains.
"""
self.allow_read = allow_read
self.allow_write = allow_write
super(CurrentDomainProperty, self).__init__(*args, **kwargs)
def __set__(self, model_instance, value):
if not value:
value = unicode(os.environ['HTTP_HOST'])
elif (value != os.environ['HTTP_HOST'] and not self.allow_read
and not users.is_current_user_admin()):
raise InvalidDomainError(
"Domain '%s' attempting to illegally access data for domain '%s'"
% (os.environ['HTTP_HOST'], value))
super(CurrentDomainProperty, self).__set__(model_instance, value)
def get_value_for_datastore(self, model_instance):
value = super(CurrentDomainProperty, self).get_value_for_datastore(
model_instance)
if (value != os.environ['HTTP_HOST'] and not users.is_current_user_admin()
and not self.allow_write):
raise InvalidDomainError(
"Domain '%s' attempting to allegally modify data for domain '%s'"
% (os.environ['HTTP_HOST'], value))
return value
class ChoiceProperty(db.IntegerProperty):
"""A property for efficiently storing choices made from a finite set.
This works by mapping each choice to an integer. The choices must be hashable
(so that they can be efficiently mapped back to their corresponding index).
Example usage:
>>> class ChoiceModel(db.Model):
... a_choice = ChoiceProperty(enumerate(['red', 'green', 'blue']))
... b_choice = ChoiceProperty([(0,None), (1,'alpha'), (4,'beta')])
You interact with choice properties using the choice values:
>>> model = ChoiceModel(a_choice='green')
>>> model.a_choice
'green'
>>> model.b_choice == None
True
>>> model.b_choice = 'beta'
>>> model.b_choice
'beta'
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'ChoiceModel', ...)
>>> model2 = ChoiceModel.all().get()
>>> model2.a_choice
'green'
>>> model.b_choice
'beta'
To get the int representation of a choice, you may use either access the
choice's corresponding attribute or use the c2i method:
>>> green = ChoiceModel.a_choice.GREEN
>>> none = ChoiceModel.b_choice.c2i(None)
>>> (green == 1) and (none == 0)
True
The int representation of a choice is needed to filter on a choice property:
>>> ChoiceModel.gql("WHERE a_choice = :1", green).count()
1
"""
def __init__(self, choices, make_choice_attrs=True, *args, **kwargs):
"""Constructor.
Args:
choices: A non-empty list of 2-tuples of the form (id, choice). id must be
the int to store in the database. choice may be any hashable value.
make_choice_attrs: If True, the uppercase version of each string choice is
set as an attribute whose value is the choice's int representation.
"""
super(ChoiceProperty, self).__init__(*args, **kwargs)
self.index_to_choice = dict(choices)
self.choice_to_index = dict((c,i) for i,c in self.index_to_choice.iteritems())
if make_choice_attrs:
for i,c in self.index_to_choice.iteritems():
if isinstance(c, basestring):
setattr(self, c.upper(), i)
def get_choices(self):
"""Gets a list of values which may be assigned to this property."""
return self.choice_to_index.keys()
def c2i(self, choice):
"""Converts a choice to its datastore representation."""
return self.choice_to_index[choice]
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
index = super(ChoiceProperty, self).__get__(model_instance, model_class)
return self.index_to_choice[index]
def __set__(self, model_instance, value):
try:
index = self.c2i(value)
except KeyError:
raise db.BadValueError('Property %s must be one of the allowed choices: %s' %
(self.name, self.get_choices()))
super(ChoiceProperty, self).__set__(model_instance, index)
def get_value_for_datastore(self, model_instance):
# just use the underlying value from the parent
return super(ChoiceProperty, self).__get__(model_instance, model_instance.__class__)
def make_value_from_datastore(self, value):
if value is None:
return None
return self.index_to_choice[value]
class CompressedProperty(db.UnindexedProperty):
"""A unindexed property that is stored in a compressed form.
CompressedTextProperty and CompressedBlobProperty derive from this class.
"""
def __init__(self, level, *args, **kwargs):
"""Constructor.
Args:
level: Controls the level of zlib's compression (between 1 and 9).
"""
super(CompressedProperty, self).__init__(*args, **kwargs)
self.level = level
def get_value_for_datastore(self, model_instance):
value = self.value_to_str(model_instance)
if value is not None:
return db.Blob(zlib.compress(value, self.level))
def make_value_from_datastore(self, value):
if value is not None:
ds_value = zlib.decompress(value)
return self.str_to_value(ds_value)
# override value_to_str and str_to_value to implement a new CompressedProperty
def value_to_str(self, model_instance):
"""Returns the value stored by this property encoded as a (byte) string,
or None if value is None. This string will be stored in the datastore.
By default, returns the value unchanged."""
return self.__get__(model_instance, model_instance.__class__)
@staticmethod
def str_to_value(s):
"""Reverse of value_to_str. By default, returns s unchanged."""
return s
class CompressedBlobProperty(CompressedProperty):
"""A byte string that will be stored in a compressed form.
Example usage:
>>> class CompressedBlobModel(db.Model):
... v = CompressedBlobProperty()
You can create a CompressedBlobProperty and set its value with your raw byte
string (anything of type str). You can also retrieve the (decompressed) value
by accessing the field.
>>> model = CompressedBlobModel(v='\x041\x9f\x11')
>>> model.v = 'green'
>>> model.v
'green'
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'CompressedBlobModel', ...)
>>> model2 = CompressedBlobModel.all().get()
>>> model2.v
'green'
Compressed blobs are not indexed and therefore cannot be filtered on:
>>> CompressedBlobModel.gql("WHERE v = :1", 'green').count()
0
"""
data_type = db.Blob
def __init__(self, level=6, *args, **kwargs):
super(CompressedBlobProperty, self).__init__(level, *args, **kwargs)
class CompressedTextProperty(CompressedProperty):
"""A string that will be stored in a compressed form (encoded as UTF-8).
Example usage:
>>> class CompressedTextModel(db.Model):
... v = CompressedTextProperty()
You can create a CompressedTextProperty and set its value with your string.
You can also retrieve the (decompressed) value by accessing the field.
>>> ustr = u'\u043f\u0440\u043e\u0440\u0438\u0446\u0430\u0442\u0435\u043b\u044c'
>>> model = CompressedTextModel(v=ustr)
>>> model.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'CompressedTextModel', ...)
>>> model2 = CompressedTextModel.all().get()
>>> model2.v == ustr
True
Compressed text is not indexed and therefore cannot be filtered on:
>>> CompressedTextModel.gql("WHERE v = :1", ustr).count()
0
"""
data_type = db.Text
def __init__(self, level=6, *args, **kwargs):
super(CompressedTextProperty, self).__init__(level, *args, **kwargs)
def value_to_str(self, model_instance):
return self.__get__(model_instance, model_instance.__class__).encode('utf-8')
@staticmethod
def str_to_value(s):
return s.decode('utf-8')
class ArrayProperty(db.UnindexedProperty):
"""An array property that is stored as a string.
Example usage:
>>> class ArrayModel(db.Model):
... v = ArrayProperty('i')
>>> m = ArrayModel()
If you do not supply a default the array will be empty.
>>> m.v
array('i')
>>> m.v.extend(range(5))
>>> m.v
array('i', [0, 1, 2, 3, 4])
>>> m.put() # doctest: +ELLIPSIS
datastore_types.Key.from_path(u'ArrayModel', ...)
>>> m2 = ArrayModel.all().get()
>>> m2.v
array('i', [0, 1, 2, 3, 4])
"""
data_type = array.array
def __init__(self, typecode, *args, **kwargs):
self._typecode = typecode
kwargs.setdefault('default', array.array(typecode))
super(ArrayProperty, self).__init__(typecode, *args, **kwargs)
def get_value_for_datastore(self, model_instance):
value = super(ArrayProperty, self).get_value_for_datastore(model_instance)
return db.Blob(value.tostring())
def make_value_from_datastore(self, value):
if value is not None:
return array.array(self._typecode, value)
def empty(self, value):
return value is None
def validate(self, value):
if not isinstance(value, array.array) or value.typecode != self._typecode:
raise db.BadValueError(
"Property %s must be an array instance with typecode '%s'" % (
self.name, self._typecode))
return super(ArrayProperty, self).validate(value)
def default_value(self):
return array.array(self._typecode,
super(ArrayProperty, self).default_value())
| zhaiduo/wancp | aetycoon/__init__.py | Python | gpl-2.0 | 23,229 |
# coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for recsim.agents.tabular_q_agent."""
from gym import spaces
import numpy as np
from recsim.agents import tabular_q_agent
from recsim.testing import test_environment as te
import tensorflow.compat.v1 as tf
class TabularQAgentTest(tf.test.TestCase):
def init_agent_and_env(self,
slate_size=1,
num_candidates=10,
learning_rate=0.8,
gamma=0.0,
policy='epsilon_greedy',
ordinal_slates=False,
starting_probs=(1.0, 0.0, 0.0, 0.0, 0.0, 0.0)):
env_config = {
'num_candidates': num_candidates,
'slate_size': slate_size,
'resample_documents': False,
'seed': 42,
'starting_probs': starting_probs
}
te_sim = te.create_environment(env_config)
agent = tabular_q_agent.TabularQAgent(
te_sim.observation_space,
te_sim.action_space,
gamma=gamma,
exploration_policy=policy,
learning_rate=learning_rate,
ordinal_slates=ordinal_slates)
return te_sim, agent
def test_step(self):
te_sim, agent = self.init_agent_and_env()
observation0 = te_sim.reset()
slate1 = agent.step(0, observation0)
selected_doc0 = list(observation0['doc'].values())[slate1[0]]
# Environment always starts at state 0.
self.assertEqual(agent._previous_state_action_index, (selected_doc0, 0))
observation1, reward1, _, _ = te_sim.step(slate1)
slate2 = agent.step(reward1, observation1)
selected_doc1 = list(observation1['doc'].values())[slate2[0]]
observed_state = observation1['user']
self.assertEqual(agent._previous_state_action_index,
(selected_doc1, observed_state))
self.assertEqual(agent._q_value_table,
{(selected_doc0, 0): agent._learning_rate * -10.0})
self.assertEqual(agent._state_action_counts, {(selected_doc0, 0): 1})
def test_myopic_value_estimation(self):
te_sim, agent = self.init_agent_and_env()
observation0 = te_sim.reset()
slate = agent.step(0, observation0)
for _ in range(1000):
observation, reward, _, _ = te_sim.step(slate)
slate = agent.step(reward, observation)
for state in range(6):
for action in range(4):
self.assertAlmostEqual(agent._q_value_table[(action, state)],
te.QVALUES0[state][action])
def test_gamma05_value_estimation(self):
te_sim, agent = self.init_agent_and_env(gamma=0.5)
observation = te_sim.reset()
reward = 0
for i in range(100, 50100):
slate = agent.step(reward, observation)
observation, reward, _, _ = te_sim.step(slate)
agent._learning_rate = 100.0 / float(i)
for state in range(6):
for action in range(4):
self.assertAlmostEqual(
agent._q_value_table[(action, state)],
te.QVALUES05[state][action],
delta=0.25)
def test_dicretize_gym_leaf(self):
_, agent = self.init_agent_and_env()
self.assertEqual(
agent._discretize_gym_leaf(spaces.Discrete(5), [
4,
]), [
4,
])
box = spaces.Box(
low=agent._discretization_bins[0],
high=agent._discretization_bins[-1],
shape=(1, 1),
dtype=np.float32)
# Some corner cases in 1d and 2x2d.
self.assertEqual(
agent._discretize_gym_leaf(box, [
agent._discretization_bins[0] - 10E-5,
]), [
0,
])
self.assertEqual(
agent._discretize_gym_leaf(box, [
agent._discretization_bins[0],
]), [
1,
])
self.assertEqual(
agent._discretize_gym_leaf(box, [
agent._discretization_bins[-1] - 10E-5,
]), [
len(agent._discretization_bins) - 1,
])
self.assertEqual(
agent._discretize_gym_leaf(box, [
agent._discretization_bins[-1] + 1.0,
]), [
len(agent._discretization_bins),
])
box2x2 = spaces.Box(
low=agent._discretization_bins[0],
high=agent._discretization_bins[-1],
shape=(2, 2),
dtype=np.float32)
self.assertEqual(
agent._discretize_gym_leaf(box2x2, [
np.array([[
agent._discretization_bins[0] - 10E-6,
agent._discretization_bins[-1] - 10E-6
],
[
agent._discretization_bins[-1] + 1.0,
agent._discretization_bins[0]
]]),
]), [0, 99, 100, 1])
def test_slate_enumeration(self):
te_sim, agent = self.init_agent_and_env(slate_size=2, num_candidates=4)
observation0 = te_sim.reset()
non_ordinal_slates = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
enumerated_slates = [
slate for slate, _ in agent._enumerate_slates(observation0['doc'])
]
self.assertCountEqual(non_ordinal_slates, enumerated_slates)
te_sim, agent = self.init_agent_and_env(slate_size=2, num_candidates=4,
ordinal_slates=True)
ordinal_slates = non_ordinal_slates + [
(1, 0), (2, 0), (3, 0), (2, 1), (3, 1), (3, 2)
]
enumerated_slates = [
slate for slate, _ in agent._enumerate_slates(observation0['doc'])
]
self.assertCountEqual(ordinal_slates, enumerated_slates)
def test_bundle_and_unbundle(self):
te_sim, agent = self.init_agent_and_env(
slate_size=1, num_candidates=4, policy='min_count')
# Make a few steps to populate counts and Q-table.
observation0 = te_sim.reset()
slate1 = agent.step(0, observation0)
observation1, reward1, _, _ = te_sim.step(slate1)
agent.step(reward1, observation1)
bundle_dict = {
'q_value_table': agent._q_value_table,
'sa_count': agent._state_action_counts
}
self.assertEqual(bundle_dict, agent.bundle_and_checkpoint('', 0))
_, new_agent = self.init_agent_and_env(slate_size=1, num_candidates=4)
self.assertTrue(new_agent.unbundle('', 0, bundle_dict))
self.assertEqual(bundle_dict['q_value_table'], new_agent._q_value_table)
self.assertEqual(bundle_dict['sa_count'], new_agent._state_action_counts)
if __name__ == '__main__':
tf.test.main()
| google-research/recsim | recsim/agents/tabular_q_agent_test.py | Python | apache-2.0 | 6,956 |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for google-app-engine-django project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'dummy' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hvhxfm5u=^*v&doo#oq8x*eg8+1&9sxbye@=umutgn^t_sg_nx'
# Ensure that email is not sent via SMTP by default to match the standard App
# Engine SDK behaviour. If you want to sent email via SMTP then add the name of
# your mailserver here.
EMAIL_HOST = ''
TEMPLATE_DIRS = ("mysite.templates")
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'google.appengine.ext.ndb.django_middleware.NdbDjangoMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.doc.XViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
# 'django.core.context_processors.media', # 0.97 only.
# 'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates')
)
INSTALLED_APPS = (
'astro',
'astro.location',
'astro.chart',
# 'appengine_django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
| sandeva/appspot | settings.py | Python | apache-2.0 | 3,965 |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import os,sys
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
| maxlagerz/Tadam_bot | acrcloud/__init__.py | Python | mit | 260 |
"""This module implements the computation of the correlation matrix between
clusters."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from collections import Counter
import numpy as np
from statstools import matrix_of_pairs
# -----------------------------------------------------------------------------
# Correlation matrix
# -----------------------------------------------------------------------------
def compute_statistics(Fet1, Fet2, spikes_in_clusters, masks):
"""Return Gaussian statistics about each cluster."""
nPoints = Fet1.shape[0]
nDims = Fet1.shape[1]
nClusters = len(spikes_in_clusters)
# precompute the mean and variances of the masked points for each feature
# contains 1 when the corresponding point is masked
masked = np.zeros_like(masks)
masked[masks == 0] = 1
nmasked = np.sum(masked, axis=0)
nmasked[nmasked == 0] = 1
nu = np.sum(Fet2 * masked, axis=0) / nmasked
nu = nu.reshape((1, -1))
sigma2 = np.sum(((Fet2 - nu) * masked) ** 2, axis=0) / nmasked
sigma2 = sigma2.reshape((1, -1))
# expected features
y = Fet1 * masks + (1 - masks) * nu
z = masks * Fet1**2 + (1 - masks) * (nu ** 2 + sigma2)
eta = z - y ** 2
# print nu
# print nmasked
LogP = np.zeros((nPoints, nClusters))
stats = {}
for c in xrange(nClusters):
MyPoints = spikes_in_clusters[c]
# print MyPoints
# now, take the modified features here
MyFet2 = np.take(y, MyPoints, axis=0)
# log of the proportion in cluster c
LogProp = np.log(len(MyPoints) / float(nPoints))
Mean = np.mean(MyFet2, axis=0).reshape((1, -1))
# stats for cluster c
CovMat = np.cov(MyFet2, rowvar=0)
# HACK: avoid instability issues, kind of works
CovMat += np.diag(1e-3 * np.ones(nDims))
# now, add the diagonal modification to the covariance matrix
# the eta just for the current cluster
etac = np.take(eta, MyPoints, axis=0)
d = np.sum(etac, axis=0) / nmasked
# add diagonal
CovMat += np.diag(d)
CovMatinv = np.linalg.inv(CovMat)
LogDet = np.log(np.linalg.det(CovMat))
stats[c] = (Mean, CovMat, CovMatinv, LogDet, len(MyPoints))
return stats
def compute_correlations(features, clusters, masks,
clusters_to_update=None):
"""Compute the correlation matrix between every pair of clusters.
Use an approximation of the original Klusters grouping assistant, with
an integral instead of a sum (integral of the product of the Gaussian
densities).
A dictionary pairs => value is returned.
"""
nPoints = features.shape[0]
nDims = features.shape[1]
c = Counter(clusters)
spikes_in_clusters = [np.nonzero(clusters == clu)[0] for clu in sorted(c)]
stats = compute_statistics(features, features, spikes_in_clusters, masks)
# print stats
if clusters_to_update is None:
clusters_to_update = sorted(stats.keys())
else:
clusters_to_update = np.intersect1d(clusters_to_update, stats.keys())
nClusters = len(clusters_to_update)
matrix = {}
coeff = np.log(2 * np.pi) * (-nDims / 2.)
for i, ci in enumerate(clusters_to_update):
mui, Ci, Ciinv, logdeti, npointsi = stats[ci]
for k, cj in enumerate(clusters_to_update[i:]):
j = i + k
muj, Cj, Cjinv, logdetj, npointsj = stats[cj]
dmu = (muj - mui).reshape((-1, 1))
Csum = Ci + Cj
Csuminv = np.linalg.inv(Csum)
p = (coeff +
(-.5 * np.log(np.linalg.det(Csum))) +
(-.5) * np.dot(np.dot(dmu.T, Csuminv), dmu))
alpha = float(npointsi) / nPoints
# matrix[i, j] = p# + np.log(alpha)*
expp = np.exp(p)[0,0]
matrix[(ci, cj)] = expp
matrix[(cj, ci)] = expp
# # Symmetrize the matrix.
# matrix = matrix + matrix.T
# # Remove the diagonal and replace it with the minimum value.
# matrix[range(nClusters), range(nClusters)] = 0
# nonzero = matrix[matrix != 0]
# if nonzero.size > 0:
# matrix[matrix == 0] = nonzero.min()
return matrix
# if __name__ == '__main__':
# filename = r"D:\Git\spiky\_test\data\test.clu.1"
# from spiky.io.loader import KlustersLoader
# l = KlustersLoader(filename)
# features = l.get_features()
# clusters = l.get_clusters()
# masks = l.get_masks(full=True)
# C = compute_correlations(features, clusters, masks)
# C = matrix_of_pairs(C)
# from pylab import imshow, show
# imshow(C, interpolation='none')
# show()
| rossant/spiky | experimental/_correlation/correlations.py | Python | bsd-3-clause | 4,957 |
# """
# A simple Python module to obtain energy levels of superconducting qubits by sparse Hamiltonian diagonalization.
# """
#
import numpy as np
import sympy
from scipy.sparse.linalg import *
from abc import ABCMeta
from abc import abstractmethod
#
# import scqubits.core.constants as constants
# import scqubits.core.descriptors as descriptors
# import scqubits.core.discretization as discretization
# # import scqubits.core.qubit_base as base
# import scqubits.core.storage as storage
# import scqubits.io_utils.fileio_serializers as serializers
# import scqubits.utils.plot_defaults as defaults
# import scqubits.utils.plotting as plot
#
class CircuitNode:
"""
super class
"""
def __init__(self, name):
self.name = name
#
#
class Variable:
"""
Represents a variable of the circuit wavefunction or an constant external bias flux or voltage.
"""
def __init__(self, name):
self.variable_type = 'parameter'
self.phase_grid = np.asarray([0])
self.charge_grid = np.asarray([0])
self.phase_step = np.inf
self.charge_step = np.inf
self.nodeNo = 1
self.name = name
def create_grid(self, nodeNo, phase_periods, centre=0):
"""
Creates a discrete grid for wavefunction variables.
:param nodeNo: number of discrete points on the grid.
:type nodeNo: int
:param phase_periods: number of :math:`2\pi` intervals in the grid.
:type phase_periods: float
:param centre: additional flux offset.
:type centre: float
"""
self.variable_type = 'variable'
min_node = np.round(-nodeNo/2)
max_node = np.round(nodeNo/2)
self.phase_grid = np.linspace(-np.pi*phase_periods+centre, np.pi*phase_periods+centre, nodeNo, endpoint=False)
self.charge_grid = np.linspace(min_node/phase_periods, max_node/phase_periods, nodeNo, endpoint=False)
self.phase_step = 2*np.pi*phase_periods/nodeNo
self.charge_step = 1.0/phase_periods
self.nodeNo = nodeNo
def set_parameter(self, phase_value, charge_value):
"""
Sets an external flux and/or charge bias.
:param phase_value: external flux bias in :math:`\Phi_{0}/2\pi`.
:type phase_value: float.
:param charge_value: external charge bias in cooper pairs
:type charge_value: float.
"""
self.variable_type = 'parameter'
self.phase_grid = np.asarray([phase_value])
self.charge_grid = np.asarray([charge_value])
self.phase_step = np.inf
self.charge_step = np.inf
self.nodeNo = 1
def get_phase_grid(self):
return self.phase_grid
def get_charge_grid(self):
return self.charge_grid
def get_phase_step(self):
return self.phase_step
def get_charge_step(self):
return self.charge_step
def get_nodeNo(self):
return self.nodeNo
#
#
# class CircuitElement:
# """
# Abstract class for circuit elements. All circuit elements defined in the QCircuit library derive from this base class.
# """
#
# __metaclass__ = ABCMeta
#
# def __init__(self, name):
# self.name = name
#
# @abstractmethod
# def is_phase(self):
# pass
#
# @abstractmethod
# def is_charge(self):
# pass
#
# @abstractmethod
# def energy_term(self, node_phases, node_charges):
# return None
#
# @abstractmethod
# def symbolic_energy_term(self, node_phases, node_charges):
# return None
#
#
# class Capacitance(CircuitElement):
# """
# Circuit element representing a capacitor.
# """
#
# def __init__(self, name, capacitance=0):
# super().__init__(name)
# self.capacitance = capacitance
#
# def set_capacitance(self, capacitance):
# self.capacitance = capacitance
#
# def get_capacitance(self):
# return self.capacitance
#
# def is_phase(self):
# return False
#
# def is_charge(self):
# return True
#
# def energy_term(self, node_phases, node_charges):
# return None
#
# def symbolic_energy_term(self, node_phases, node_charges):
# return None
#
# class JosephsonJunction(CircuitElement):
# """
# Circuit element representing a Josephson junction.
# """
# def __init__(self, name, critical_current=0):
# super().__init__(name)
# self.critical_current = critical_current
#
# def set_critical_current(self, critical_current):
# self.critical_current = critical_current
#
# def get_critical_current(self):
# return self.critical_current
#
# def energy_term(self, node_phases, node_charges):
# if len(node_phases) != 2:
# raise Exception('ConnectionError',
# 'Josephson junction {0} has {1} nodes connected instead of 2.'.format(self.name, len(node_phases)))
# return self.critical_current*(1-np.cos(node_phases[0]-node_phases[1]))
#
# def symbolic_energy_term(self, node_phases, node_charges):
# if len(node_phases) != 2:
# raise Exception('ConnectionError',
# 'Josephson junction {0} has {1} nodes connected instead of 2.'.format(self.name, len(node_phases)))
# return self.critical_current*(1-sympy.cos(node_phases[0]-node_phases[1]))
#
# def is_phase(self):
# return True
#
# def is_charge(self):
# return False
#
#
# class Inductance(CircuitElement):
# """
# Circuit element representing a linear inductor.
# """
# def __init__(self, name, inductance=0):
# super().__init__(name)
# self.inductance = inductance
#
# def set_inductance(self, inductance):
# self.inductance = inductance
#
# def get_inductance(self):
# return self.inductance
#
# def energy_term(self, node_phases, node_charges):
# if len(node_phases) != 2:
# raise Exception('ConnectionError',
# 'Inductance {0} has {1} nodes connected instead of 2.'.format(self.name, len(node_phases)))
# return (node_phases[0]-node_phases[1])**2/(2*self.inductance)
#
# def symbolic_energy_term(self, node_phases, node_charges):
# if len(node_phases) != 2:
# raise Exception('ConnectionError',
# 'Inductance {0} has {1} nodes connected instead of 2.'.format(self.name, len(node_phases)))
# return (node_phases[0]-node_phases[1])**2/(2*self.inductance)
#
# def is_phase(self):
# return True
#
# def is_charge(self):
# return False
#
#
# class LagrangianCurrentSource(CircuitElement):
# """
# Circuit element representing a Josephson junction.
# """
# def __init__(self, name, current=0):
# super().__init__(name)
# self.current = current
#
# def set_current(self, current):
# self.current = current
#
# def get_current(self):
# return self.current
#
# def energy_term(self, node_phases, node_charges):
# if len(node_phases) != 2:
# raise Exception('ConnectionError',
# 'Lagrangian current source {0} has {1} nodes connected instead of 2.'.format(self.name, len(node_phases)))
# return self.current*(node_phases[0]-node_phases[1])
#
# def symbolic_energy_term(self, node_phases, node_charges):
# return self.energy_term(node_phases, node_charges)
#
# def is_phase(self):
# return True
#
# def is_charge(self):
# return False
#
#
# class Circuit():
# """
# The containing references to nodes, elements, variables, \\
# variable-to-node mappings, in addition it contains all methods
# used for calculations.
# """
# def __init__(self, tolerance=1e-18):
# """
# Default constructor.
# :param tolerance: capacitances below this value are considered to be computational errors when determining the inverse capacitance matrix.
# :type tolerance: float
# """
# self.nodes = [CircuitNode('GND')]
# self.elements = []
# self.wires = []
# self.variables = []
# self.linear_coordinate_transform = np.asarray(0)
# self.invalidation_flag = True
# self.tolerance = tolerance
# self.best_permutation_cache = {}
# self.phase_potential = None
# self.charge_potential = None
# self.nodes_graph=[]
#
# # TODO: add something
# @staticmethod
# def default_params():
# return {
# }
#
# # TODO: add something
# @staticmethod
# def nonfit_params():
# return []
#
# def hilbertdim(self):
# """Returns Hilbert space dimension"""
# return np.prod(self.grid_shape())
#
# def potential(self, *args):
# """Circuit phase-basis potential evaluated at `*args`, in order of `variables`. Variables of parameter type
# are skipped.
#
# Parameters
# ----------
# *args: floats
# phase variables value
#
# Returns
# -------
# float
# """
# phase_values = []
# variable_values = args.__iter__()
# for variable in self.variables:
# if variable.variable_type == 'variable':
# phase_values.append(variable_values.__next__())
# else:
# phase_values.append(variable.phase_grid[0])
#
# energy = 0
# for element in self.elements:
# if element.is_phase():
# element_node_ids = []
# for wire in self.wires:
# if wire[0] == element.name:
# for node_id, node in enumerate(self.nodes):
# if wire[1] == node.name:
# element_node_ids.append(node_id)
# energy += element.energy_term(np.asarray(self.linear_coordinate_transform)[
# element_node_ids, :]@phase_values, None)
# return energy
#
# def hamiltonian(self):
# """Returns Hamiltonian in charge basis"""
# dim = len(self.variables)
# phase_grid = np.reshape(self.create_phase_grid(), (dim, 1, -1))
# charge_grid = np.reshape(self.create_charge_grid(), (dim, -1, 1))
# unitary = np.exp(1j*np.sum(phase_grid*charge_grid, axis=0))/np.sqrt(self.hilbertdim())
# hamiltonian_mat = unitary@np.diag(self.calculate_phase_potential().ravel())@np.conj(unitary.T)
# hamiltonian_mat += np.diag(self.calculate_charge_potential().ravel())
# return hamiltonian_mat
#
# def find_element(self, element_name):
# """
# Find an element inside the circuit with the specified name.
# :returns: the element, if found, else None
# """
# for element in self.elements:
# if element.name == element_name:
# return element
#
# def find_variable(self, variable_name):
# """
# Find a variable of the circuit with the specified name.
# :returns: the variable, if found
# """
# for variable in self.variables:
# if variable.name == variable_name:
# return variable
#
# def add_element(self, element, node_names):
# """
# Connect an element to the circuit.
# :param element: circuit element to insert into the circuit
# :type element: CircuitElement
# :param node_names: list of names of the nodes to which the element should be connected
# :type node_names: list of str
# """
# self.elements.append(element)
# self.nodes_graph.append(tuple(node_names))
# for node_name in node_names:
# nodes_found = 0
# for node in self.nodes:
# if node.name == node_name:
# self.wires.append((element.name, node.name))
# nodes_found += 1
# if nodes_found == 0:
# self.nodes.append(CircuitNode(node_name))
# self.wires.append((element.name, node_name))
# self.invalidation_flag = True
#
# def add_variable(self, variable):
# """
# Add variable to circuit.
# :param variable:
# :type variable: Variable
# """
# self.variables.append(variable)
# self.invalidation_flag = True
#
# def map_nodes_linear(self, node_names, variable_names, coefficients):
# """
# Sets the value of node phases (and, respectively, their conjugate charges) as a linear combination of the circuit variables.
# :param node_names: the names of the nodes to be expressed through the variables, in the order of the coefficient matrix rows.
# :param variable_names: the variables to express the node phases through, in the order of the coefficient matrix columns.
# :param coefficients: the transfrmation matrix
# """
# node_ids = []
# variable_ids = []
# for node_name in node_names:
# for node_id, node in enumerate(self.nodes):
# if node.name == node_name:
# node_ids.append(node_id)
# for variable_name in variable_names:
# for variable_id, variable in enumerate(self.variables):
# if variable.name == variable_name:
# variable_ids.append(variable_id)
# if len(variable_ids) != len(self.variables):
# raise Exception('VariableError',
# 'Wrong number of variables in variable list. Got {0}, expected {1}'.format(
# len(variable_ids), len(self.variables)))
# if len(node_ids) != len(self.nodes):
# raise Exception('VariableError',
# 'Wrong number of nodes in node list. Got {0}, expected {1}'.format(
# len(node_ids), len(self.nodes)))
# variable_idx, node_idx = np.meshgrid(variable_ids, node_ids)
# self.linear_coordinate_transform = np.zeros(coefficients.shape, coefficients.dtype)
# self.linear_coordinate_transform[node_idx, variable_idx] = coefficients
# self.invalidation_flag = True
#
# def grid_shape(self):
# return tuple([v.get_nodeNo() for v in self.variables])
#
# def create_phase_grid(self):
# """
# Creates a n-d grid of the phase variables, where n is the number of variables in the circuit, on which the circuit wavefunction depends.
# :returns: tuple of numpy ndarray
# """
# self.invalidation_flag = True
# axes = []
# for variable in self.variables:
# axes.append(variable.get_phase_grid())
# return np.meshgrid(*tuple(axes), indexing='ij')
#
# def create_charge_grid(self):
# """
# Creates a n-d grid of the charge variables, where n is the number of variables in the circuit, on which the circuit wavefunction, when transformed into charge representation, depends.
# :returns: tuple of numpy ndarray
# """
# self.invalidation_flag = True
# axes = []
# for variable in self.variables:
# axes.append(variable.get_charge_grid())
# return np.meshgrid(*tuple(axes), indexing='ij')
#
# def hamiltonian_phase_action(self, state_vector):
# """
# Implements the action of the hamiltonian on the state vector describing the system in phase representation.
# :param state_vector: wavefunction to act upon
# :type state_vector: ndarray
# :returns: wavefunction after action of the hamiltonian
# """
# psi = np.reshape(state_vector, self.charge_potential.shape)
# phi = np.fft.fftshift(np.fft.fftn(np.fft.fftshift(psi)))
# Up = self.phase_potential.ravel()*state_vector
# Tp = np.fft.ifftshift(np.fft.ifftn(np.fft.ifftshift(self.charge_potential*phi))).ravel()
# return Up+Tp
#
# def capacitance_matrix(self, symbolic=False):
# """
# Calculates the linear capacitance matrix of the circuit with respect
# to the circuit nodes from the capacitances between them.
# :returns: the capacitance matrix with respect to the nodes, where the rows and columns are sorted accoring to the order in which the nodes are in the nodes attribute.
# """
# if symbolic:
# capacitance_matrix = sympy.Matrix(np.zeros((len(self.nodes), len(self.nodes))))
# else:
# capacitance_matrix = np.zeros((len(self.nodes), len(self.nodes)))
# for element in self.elements:
# if element.is_charge():
# element_node_ids = []
# for wire in self.wires:
# if wire[0] == element.name:
# for node_id, node in enumerate(self.nodes):
# if wire[1] == node.name:
# element_node_ids.append(node_id)
# if len(element_node_ids) != 2:
# raise Exception('VariableError',
# 'Wrong number of ports on capacitance, expected 2, got {0}'.format(len(element_node_ids)))
# capacitance_matrix[element_node_ids[0], element_node_ids[0]] += element.get_capacitance()
# capacitance_matrix[element_node_ids[0], element_node_ids[1]] += -element.get_capacitance()
# capacitance_matrix[element_node_ids[1], element_node_ids[0]] += -element.get_capacitance()
# capacitance_matrix[element_node_ids[1], element_node_ids[1]] += element.get_capacitance()
# return capacitance_matrix
#
# def capacitance_matrix_variables(self, symbolic=False):
# """
# Calculates the capacitance matrix for the energy term of the qubit Lagrangian in the variable respresentation.
# """
#
# if symbolic:
# C = self.linear_coordinate_transform.T*self.capacitance_matrix(symbolic)*self.linear_coordinate_transform
# C = sympy.Matrix([sympy.nsimplify(sympy.ratsimp(x)) for x in C]).reshape(*(C.shape))
# else:
# C = np.einsum('ji,jk,kl->il', self.linear_coordinate_transform,self.capacitance_matrix(symbolic),self.linear_coordinate_transform)
# return C
#
# def capacitance_matrix_legendre_transform(self, symbolic=False):
# """
# Calculates the principle pivot transform of the capacitance matrix in variable representation with respect to "variables" as opposed to "parameters" for the Legendre transform
# """
# inverted_indices = [variable_id for variable_id, variable in enumerate(self.variables) if variable.variable_type=='variable' ]
# noninverted_indices = [variable_id for variable_id, variable in enumerate(self.variables) if variable.variable_type=='parameter' ]
# if symbolic:
# aii = self.capacitance_matrix_variables(symbolic)[inverted_indices, inverted_indices]
# ain = self.capacitance_matrix_variables(symbolic)[inverted_indices, noninverted_indices]
# ani = self.capacitance_matrix_variables(symbolic)[noninverted_indices, inverted_indices]
# # Ann = self.capacitance_matrix_variables(symbolic)[noninverted_indices, noninverted_indices]
# bii = aii.inv()
# bin = sympy.Matrix(-aii.inv()*ain)
# bni = sympy.Matrix(-ani*aii.inv())
# bnn = ani*aii.inv()*ain#-Ann
# B = sympy.Matrix(np.zeros(self.capacitance_matrix_variables(symbolic).shape))
# else:
# aii = self.capacitance_matrix_variables(symbolic)[tuple(np.meshgrid(inverted_indices, inverted_indices))].T
# ain = self.capacitance_matrix_variables(symbolic)[tuple(np.meshgrid(inverted_indices, noninverted_indices))].T
# ani = self.capacitance_matrix_variables(symbolic)[tuple(np.meshgrid(noninverted_indices, inverted_indices))].T
# # Ann = self.capacitance_matrix_variables(symbolic)[np.meshgrid(noninverted_indices, noninverted_indices)].T
# bii = np.linalg.inv(aii)
# bin = -np.dot(np.linalg.inv(aii), ain)
# bni = -np.dot(ani,np.linalg.inv(aii))
# bnn = np.einsum('ij,jk,kl->il', ani, np.linalg.inv(aii), ain)#-Ann
# B = np.empty(self.capacitance_matrix_variables(symbolic).shape)
# # if sympy could do indexing properly, we would have 3 time less code!!
# for i1, i2 in enumerate(inverted_indices):
# for j1, j2 in enumerate(inverted_indices):
# B[j2, i2] = bii[j1, i1]
# for i1, i2 in enumerate(noninverted_indices):
# for j1, j2 in enumerate(inverted_indices):
# B[j2, i2] = bin[j1, i1]
# for i1, i2 in enumerate(inverted_indices):
# for j1, j2 in enumerate(noninverted_indices):
# B[j2, i2] = bni[j1, i1]
# for i1, i2 in enumerate(noninverted_indices):
# for j1, j2 in enumerate(noninverted_indices):
# B[j2, i2] = bnn[j1, i1]
# return B
#
# def calculate_ndiagonal_hamiltonian(self, d1scheme, d2scheme):
# """
# Calculates the hamiltonian in phase representation in n-diagonal form
# :param d1scheme: finite difference scheme for first order derivatives
# :param d2scheme: finite difference scheme for second order derivatives
# :returns: the m-ndiagonal kinetic operator
# """
# n = len(d1scheme)
# if len(d1scheme)!=len(d2scheme):
# raise Exception('ValueError', 'd1scheme and d2scheme lengths are not equal')
# if n<3:
# raise Exception('ValueError', 'dscheme length is less than 3')
# if (n-1)%2>0:
# raise Exception('ValueError', 'dscheme length is even')
#
# self.ndiagonal_operator = np.zeros(tuple(n*np.ones((len(self.variables),), dtype=int))+self.grid_shape())
# slice_diagonal = [(n-1)/2 for v in self.variables]+[slice(0, v.get_nodeNo(), 1) for v in self.variables]
#
# ECmat = -0.5*self.capacitance_matrix_legendre_transform()
# # d^2/dxi^2 type elements (C*_ii)
# for i in range(len(self.variables)):
# EC = ECmat[i,i]
# for column_id in range(n):
# slice_column = list(slice_diagonal)
# slice_column[i] = column_id
# self.ndiagonal_operator[slice_column] += EC/(self.variables[i].get_phase_step()**2)*d2scheme[column_id]
# # d^2/dxidxj type elements (C*_ij)
# for i in range(len(self.variables)):
# nondiagonal = (x for x in range(len(self.variables)) if x!=i)
# for j in nondiagonal:
# EC = ECmat[i,j]
# for column_id_i in range(n):
# for column_id_j in range(n):
# slice_column = list(slice_diagonal)
# slice_column[i] = column_id_i
# slice_column[j] = column_id_j
# self.ndiagonal_operator[slice_column] += EC/(self.variables[i].get_phase_step()*self.variables[j].get_phase_step())*(d1scheme[column_id_i]*d1scheme[column_id_j])
#
# self.ndiagonal_operator[slice_diagonal] += self.phase_potential
#
# self.hamiltonian_ndiagonal = LinearOperator((np.prod(self.grid_shape()), np.prod(self.grid_shape())), matvec=self.ndiagonal_operator_action)
# return self.ndiagonal_operator
#
# def ndiagonal_operator_action(self, psi):
# diagonal_shape = tuple([1]*len(self.variables))+self.grid_shape()
# psi = np.reshape(psi, diagonal_shape)
# action = self.ndiagonal_operator*psi
# ndiagonal_columns = np.meshgrid(*tuple([range(self.ndiagonal_operator.shape[v_id]) for v_id in range(len(self.variables))]), indexing='ij')
# ndiagonal_columns = np.reshape(ndiagonal_columns, (len(self.variables), np.prod(self.ndiagonal_operator.shape[0:len(self.variables)])))
# ndiagonal_shifts = np.meshgrid(*tuple([np.linspace(
# -(self.ndiagonal_operator.shape[v_id]-1)/2,
# (self.ndiagonal_operator.shape[v_id]-1)/2,
# self.ndiagonal_operator.shape[v_id], dtype=int) for v_id in range(len(self.variables))]), indexing='ij')
# ndiagonal_shifts = np.reshape(ndiagonal_shifts, ndiagonal_columns.shape)
#
# result = np.zeros(self.grid_shape(), dtype=np.complex)
# for i in range(np.prod(self.ndiagonal_operator.shape[0:len(self.variables)])):
# psii = action[tuple(ndiagonal_columns[:, i])+tuple([slice(None, None, None)]*len(self.variables))]
# for v_id in range(len(self.variables)):
# psii = np.roll(psii, ndiagonal_shifts[v_id, i], axis=v_id)
# result += psii
# return result
#
# def calculate_phase_potential(self):
# """
# Calculates the potential landspace of the circuit phase-dependent energy in phase representation.
# :returns: the phase potential landscape on the wavefunction grid.
# """
# grid_shape = self.grid_shape()
# grid_size = np.prod(grid_shape)
# phase_grid = self.create_phase_grid()
# self.phase_potential = np.zeros(grid_shape)
# for element in self.elements:
# element_node_ids = []
# for wire in self.wires:
# if wire[0] == element.name:
# for node_id, node in enumerate(self.nodes):
# if wire[1] == node.name:
# element_node_ids.append(node_id)
# phase_grid = np.reshape(np.asarray(phase_grid), (len(self.variables), grid_size))
# node_phases = np.einsum('ij,jk->ik', self.linear_coordinate_transform, phase_grid)[element_node_ids, :]
# # break
# # return node_phases
# node_phases = np.reshape(node_phases, (len(element_node_ids),)+grid_shape)
# if element.is_phase():
# self.phase_potential += element.energy_term(node_phases=node_phases, node_charges=np.zeros(node_phases.shape))
# return self.phase_potential
#
# def calculate_charge_potential(self):
# """
# Calculates the potential landspace of the circuit charge-dependent energy in charge representation.
# :returns: the charge potential landscape on the wavefunction grid.
# """
# grid_shape = self.grid_shape()
# grid_size = np.prod(grid_shape)
# charge_grid = np.reshape(np.asarray(self.create_charge_grid()), (len(self.variables), grid_size))
# ECmat = 0.5*self.capacitance_matrix_legendre_transform()
# self.charge_potential = np.einsum('ij,ik,kj->j', charge_grid, ECmat, charge_grid)
# self.charge_potential = np.reshape(self.charge_potential, grid_shape)
# return self.charge_potential
#
# def calculate_potentials(self):
# """
# Calculate potentials for Fourier-based hamiltonian action.
# """
#
# phase_potential = self.calculate_phase_potential()
# charge_potential = self.calculate_charge_potential()
# self.hamiltonian_Fourier = LinearOperator((np.prod(self.grid_shape()), np.prod(self.grid_shape())),
# matvec=self.hamiltonian_phase_action)
# return self.charge_potential, self.phase_potential
#
# def diagonalize_phase(self, num_states=2, use_sparse=True, hamiltonian_type='Fourier', maxiter=1000):
# """Performs sparse diagonalization of the circuit hamiltonian.
# :param num_states: number of states, starting from the ground state, to be obtained.
# :returns: energies and wavefunctions of the first num_states states.
# """
# if hamiltonian_type == 'Fourier':
# energies, wavefunctions = eigs(self.hamiltonian_Fourier, k=num_states, which='SR', maxiter=maxiter)
# elif hamiltonian_type == 'ndiagonal':
# energies, wavefunctions = eigs(self.hamiltonian_ndiagonal, k=num_states, which='SR', maxiter=maxiter)
# energy_order = np.argsort(np.real(energies))
# energies = energies[energy_order]
# wavefunctions = wavefunctions[:, energy_order]
# wavefunctions = np.reshape(wavefunctions, self.charge_potential.shape+(num_states,))
# return energies, wavefunctions
#
# def symbolic_lagrangian(self):
# variable_phase_symbols = []
# variable_voltage_symbols = []
# for variable_id, variable in enumerate(self.variables):
# variable.phase_symbol = sympy.Symbol(variable.name)
# variable.voltage_symbol = sympy.Symbol('\\partial_t'+variable.name)
# variable_phase_symbols.append(variable.phase_symbol)
# variable_voltage_symbols.append(variable.voltage_symbol)
# variable_phase_symbols = sympy.Matrix(variable_phase_symbols)
# variable_voltage_symbols = sympy.Matrix(variable_voltage_symbols)
# node_phase_symbols = self.linear_coordinate_transform*variable_phase_symbols
# node_voltage_symbols = self.linear_coordinate_transform*variable_voltage_symbols
# for node_id, node in enumerate(self.nodes):
# node.phase_symbol = node_phase_symbols[node_id]
# node.voltage_symbol = node_voltage_symbols[node_id]
# kinetic_energy = sympy.nsimplify((0.5*node_voltage_symbols.T*self.capacitance_matrix(symbolic=True)*node_voltage_symbols)[0, 0])
# potential_energy = 0
# for element in self.elements:
# if element.is_phase():
# element_node_phases = []
# element_node_voltages = []
# for wire in self.wires:
# if wire[0] == element.name:
# for node_id, node in enumerate(self.nodes):
# if wire[1] == node.name:
# element_node_phases.append(sympy.nsimplify(node.phase_symbol))
# element_node_voltages.append(sympy.nsimplify(node.voltage_symbol))
# potential_energy += element.symbolic_energy_term(element_node_phases, 0)
# return kinetic_energy - potential_energy
#
# def symbolic_hamiltonian(self):
# variable_phase_symbols = []
# variable_charge_symbols = []
# for variable_id, variable in enumerate(self.variables):
# variable.phase_symbol = sympy.Symbol(variable.name)
# if variable.variable_type=='variable':
# variable.charge_symbol = -sympy.I*sympy.Symbol('\\partial_{'+variable.name+'}')
# else:
# variable.charge_symbol = sympy.Symbol('\\partial_t'+variable.name)
# variable_phase_symbols.append(variable.phase_symbol)
# variable_charge_symbols.append(variable.charge_symbol)
# variable_phase_symbols = sympy.Matrix(variable_phase_symbols)
# variable_charge_symbols = sympy.Matrix(variable_charge_symbols)
#
# node_phase_symbols = self.linear_coordinate_transform*variable_phase_symbols
# for node_id, node in enumerate(self.nodes):
# node.phase_symbol = node_phase_symbols[node_id]
# kinetic_energy = 0.5*sympy.nsimplify((variable_charge_symbols.T * self.capacitance_matrix_legendre_transform(symbolic=True) * variable_charge_symbols)[0, 0])
# potential_energy = 0
# for element in self.elements:
# if element.is_phase():
# element_node_phases = []
# element_node_voltages = []
# for wire in self.wires:
# if wire[0] == element.name:
# for node_id, node in enumerate(self.nodes):
# if wire[1] == node.name:
# element_node_phases.append(sympy.nsimplify(node.phase_symbol))
# potential_energy += element.symbolic_energy_term(element_node_phases, 0)
# return kinetic_energy + potential_energy
#
# def phase_operator(self, index=0):
# """
# Returns
# -------
# ndarray
# Returns the select phi operator in Phi basis
#
# index - phase variable (default index=0)
# """
# return self.create_phase_grid()[index]
#
# def phase_operator_action(self, state_vector, index=0):
# """
# Returns
# -------
# ndarray
#
# Implements the action of the phase operator on the state vector describing the system in phase representation.
# :param state_vector: wavefunction to act upon
# :param index - phase variable (default index=0)
# :returns: wavefunction after action of the hamiltonian
# """
# shape = state_vector.shape
# return np.reshape(self.phase_operator(index=index).ravel() * state_vector.ravel(), shape)
#
# def charge_operator(self, index=0):
# """
# Returns
# -------
# ndarray
# Returns the select phi operator in charge basis
#
# index - charge variable (default index=0)
# """
# return self.create_charge_grid()[index]
#
# def charge_operator_action(self, state_vector, index=0):
# """
# Returns
# -------
# ndarray
#
# Implements the action of the charge operator on the state vector describing the system in phase representation.
# :param state_vector: wavefunction to act upon
# :param index - charge variable (default index=0)
# :returns: wavefunction in phase representation after action of the charge operator
# """
# shape = state_vector.shape
# charge_wave = np.fft.fftshift(np.fft.fftn(np.fft.fftshift(state_vector)))
# w = np.fft.ifftshift(np.fft.ifftn(np.fft.ifftshift(self.charge_operator(index=index) * charge_wave)))
# # It can also be done in charge basis as
# # w_charge = self.charge_operator(index)*self.wave_function_charge(state_vector, index)
# return np.reshape(w, shape)
#
# def wave_function_charge(self, state_vector, index):
# """
# Returns
# -------
# ndarray
#
# Returns wave_function in charge basis
# :param state_vector: wavefunction in phase representation
# :param index - charge variable (default index=0)
# :returns: wavefunction in charge representation
# """
# charge_wave = np.fft.fftshift(np.fft.fft(np.fft.fftshift(state_vector, axes=index), norm='ortho', axis=index),
# axes=index)
# return charge_wave
#
# def exp_i_phi_operator(self, index=0):
# """
# Returns
# -------
# ndarray
# Returns the :math:`e^{i\\phi}` operator in phase basis
# """
# exponent = 1j * self.phase_operator(index=index)
# # shape=exponent.shape
# return np.exp(exponent) # np.reshape(sp.linalg.expm(exponent.ravel()), shape)
#
# def cos_phi_operator(self, index=0):
# """
# Returns
# -------
# ndarray
# Returns the :math:`\\cos \\phi` operator in phase basis
# """
# # cos_phi_op = 0.5 * exp_i_phi_operator(index = index)
# # cos_phi_op += cos_phi_op.conjugate()
# cos_phi_op = np.cos(self.phase_operator(index=index))
# return cos_phi_op
#
# def sin_phi_operator(self, index=0):
# """
# Returns
# -------
# ndarray
# Returns the :math:`\\sin \\phi` operator in phase basis
# """
# # sin_phi_op = -1j * 0.5 * exp_i_phi_operator(index = index)
# # sin_phi_op += sin_phi_op.conjugate()
# sin_phi_op = np.sin(self.phase_operator(index=index))
# return sin_phi_op
#
# def operator_action_phase(self, operator, state_vector):
# """
# Returns
# -------
# number
#
# Implements the action of the selected operator on the state vector describing the system in phase representation.
#
# :param state_vector: wavefunction to act upon
# :param operator: selected operator to act on state vector in phase representation
# :returns: wavefunction after action of the operator
# """
# return operator * state_vector
#
# def operator_matrix_elements(self, operator, state_vector1, state_vector2):
# """
# Returns
# -------
# number
#
# Calculation matrix elements for the selected operator in phase representation.
#
# :param state_vector1: wavefunction to act upon (ket)
# :param operator: selected operator to act on state vector1 in phase representation
# :param state_vector2: wavefunction (bra)
# :returns: matrix element <state_vector2|operator|state_vector1>
# """
#
# return np.sum(np.conj(state_vector2) * operator * state_vector1) | ooovector/qtlab_replacement | circuit.py | Python | gpl-3.0 | 37,345 |
import numpy as np
import copy
from mpi4py import MPI
from pymatgen import Lattice, Structure, Element, PeriodicSite
from pymatgen.io.vasp import Poscar, VaspInput
from pymatgen.analysis.structure_matcher import StructureMatcher, FrameworkComparator
from py_mc.mc import CanonicalMonteCarlo, grid_1D, observer_base
from py_mc.mc_mpi import RX_MPI_init, TemperatureRX_MPI
from py_mc.applications.latgas_abinitio_interface.model_setup \
import group, defect_sublattice, config, dft_latgas, g_r
from py_mc.applications.latgas_abinitio_interface.run_vasp_mpi \
import test_runner, vasp_runner
kB = 8.6173e-5
comm, nreplicas, nprocs_per_replica = RX_MPI_init()
################## RXMC parameters ################################
# specify temperatures for each replica, number of steps, etc.
kTstart = 500.0
kTend = 1500.0
kTs = kB*np.linspace(kTstart,kTend,nreplicas)
#kTstep = 1.1
#kTs = kB*np.array([kTstart*kTstep**i for i in range(nreplicas)])
#eqsteps = 2000 # Number of steps for equilibration.
# Set Lreload to True when restarting
Lreload = False
#Lreload = True
nsteps = 1000 # Number of steps for sampling
RXtrial_frequency = 2
sample_frequency = 1
print_frequency = 1
# specify grid for calculating g(r)
#dr = 0.01
#maxr = 5
#grid = grid_1D(dr, dr, maxr)
################### model setup ###############################
# we first choose a "model" defining how to perform energy calculations and trial steps
# on the "configuration" defined below
baseinput = VaspInput.from_directory("baseinput")
energy_calculator = vasp_runner(base_input_dir="./baseinput",
path_to_vasp="/home/i0009/i000900/src/vasp.5.3/vasp.spawnready.gamma",
nprocs_per_vasp=nprocs_per_replica,
comm=MPI.COMM_SELF, perturb=0.1)
#energy_calculator = test_runner()
model = dft_latgas(energy_calculator,save_history=False)
##############################################################
############## defect sublattice setup #######################
# The POSCAR file contains primitive cell of MgAl2O4 spinel structure
# with 8 O, 4 Al, and 2 Mg
spinel_str = Structure.from_file("POSCAR")
spinel_str.make_supercell([2,2,2])
# The "base structure" contains ion sites with no disorder
# In our case, we consider perfectly ordered O sublattice
base_str = spinel_str.copy()
base_str.remove_species(["Al", "Mg"])
# We consider disorder in cation sublattice
cation_str = spinel_str.copy()
cation_str.remove_species(["O"])
cation_sites = cation_str.frac_coords
numMg = int(cation_str.composition["Mg"])
numAl = int(cation_str.composition["Al"])
# Define groups that we want to sample;
# we only consider single-atom groups here
#V = group("V", [], []) # for vacancy
Al = group("Al", ["Al"])
Mg = group("Mg", ["Mg"])
cations = [Al, Mg]
# Define defect sublattices using fractional coordinates of sites and atom groups that
# occupy those sites
defect_sublattices = [defect_sublattice(cation_sites, cations)]
num_defects = [{"Mg":numMg,"Al":numAl}]
# Finally, prepare configuration definition
spinel_config = config(base_str, defect_sublattices, num_defects, [1,1,1])
spinel_config.shuffle()
configs = []
for i in range(nreplicas):
configs.append(copy.deepcopy(spinel_config))
################### Observer definition #####################
class observer_spinel(observer_base):
def __init__(self, Asite_struct, Bspecie):
super(observer_spinel, self).__init__()
self.Asite_struct = Asite_struct
self.Bspecie = Bspecie
self.site_matcher = StructureMatcher(ltol=0.1, primitive_cell=False,
allow_subset=True,
comparator=FrameworkComparator(), ignored_species=["O"])
def DOI(self, calc_state):
asites = self.site_matcher.get_mapping(calc_state.config.structure,
self.Asite_struct)
#print asites
#print spinel_config.structure
#print spinel_config.Asite_struct
x = 0
for i in asites:
if calc_state.config.structure.species[i] == Element(self.Bspecie):
x += 1
x /= float(len(asites))
return x
def logfunc(self, calc_state):
return calc_state.energy, self.DOI(calc_state)
Asite_struct = cation_str.copy()
Asite_struct.remove_species(["Al"])
myobserver = observer_spinel(Asite_struct, "Al")
################### RXMC calculation #########################
RXcalc = TemperatureRX_MPI(comm, CanonicalMonteCarlo, model, configs, kTs)
#obs = RXcalc.run(eqsteps, RXtrial_frequency, sample_frequency, observer=myobserver, subdirs=True)
if Lreload:
RXcalc.reload()
obs = RXcalc.run(nsteps, RXtrial_frequency, sample_frequency, print_frequency, observer=myobserver, subdirs=True)
if comm.Get_rank() == 0:
print(obs)
| skasamatsu/py_mc | examples/dft_latgas_spinel/spinel_catmix.py | Python | gpl-3.0 | 4,903 |
from models import Connection
from django import forms
class ConnectionForm(forms.ModelForm):
class Meta:
model = Connection
exclude = ('d_object_id',)
| CIGNo-project/CIGNo | cigno/mdtools/forms.py | Python | gpl-3.0 | 173 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Mani
# @Date: 2017-08-28 19:20:58
# @Last Modified time: 2017-09-27 13:23:38
#
##############################################
import os, configparser
def read_config():
config_file = os.path.join(os.path.dirname(__file__), "..", "config", "config.ini");
Config = configparser.ConfigParser()
Config.read(config_file);
return Config
CONFIG = read_config();
# Get your timezone
DEFAULT_TZ = CONFIG.get('Global', 'timezone')
# Get the base location where all configs will be stored
BASE_INSTALL_DIR = CONFIG.get('Global', 'base_dir')
# Get the location where all media will be written
BASE_MEDIA_DIR = CONFIG.get('Global', 'media_dir')
# Get the location where user libraries are created
LIBRARIES_DIR = CONFIG.get('Global', 'libraries')
# Get the location where downloads are actioned
DOWNLOADS_DIR = CONFIG.get('Global', 'downloads_dir')
DOCKER = {
'embyserver': {
'name': "Emby Server",
'description': "Emby Server",
'type': "docker",
'install_command': "docker ps -a",
'setup_command': "docker ps",
'source': "emby/embyserver",
'docker_create': {
'container_config' : {
'name' : 'embyserver',
'image' : 'emby/embyserver:latest',
'environment':{
'TZ': DEFAULT_TZ
},
'volumes' : [
'/libraries',
'/media',
'/config'
]
},
'host_config': {
'network_mode':'host',
'binds' : {
LIBRARIES_DIR: {
'bind': '/libraries',
'mode': 'ro',
},
BASE_MEDIA_DIR: {
'bind': '/media',
'mode': 'rw',
},
BASE_INSTALL_DIR+'/config/emby/config': {
'bind': '/config',
'mode': 'rw',
}
}
}
}
},
'ombi': {
'name': "Ombi",
'description': "Ombi",
'type': "docker",
'source': "linuxserver/ombi",
'docker_create': {
'container_config' : {
'name' : 'ombi',
'image' : 'linuxserver/ombi:latest',
'environment':{
'TZ': DEFAULT_TZ
},
'volumes' : [
'/etc/localtime',
'/config'
]
},
'host_config': {
'network_mode':'host',
'binds' : {
'/etc/localtime': {
'bind': '/etc/localtime',
'mode': 'ro',
},
BASE_INSTALL_DIR+'/config/ombi/config': {
'bind': '/config',
'mode': 'rw',
}
}
}
}
},
'sickrage': {
'name': "SickRage",
'description': "SickRage",
'type': "docker",
'source': "linuxserver/sickrage",
'docker_create': {
'container_config' : {
'name' : 'sickrage',
'image' : 'linuxserver/sickrage:latest',
'environment':{
'TZ': DEFAULT_TZ
},
'volumes' : [
'/downloads',
'/config',
'/media'
]
},
'host_config': {
'network_mode':'host',
'binds' : {
DOWNLOADS_DIR: {
'bind': '/downloads',
'mode': 'rw',
},
BASE_INSTALL_DIR+'/config/sickrage/config': {
'bind': '/config',
'mode': 'rw',
},
BASE_MEDIA_DIR: {
'bind': '/media',
'mode': 'rw',
},
}
}
}
},
'couchpotato': {
'name': "CouchPotato",
'description': "CouchPotato",
'type': "docker",
'source': "linuxserver/couchpotato",
'docker_create': {
'container_config' : {
'name' : 'couchpotato',
'image' : 'linuxserver/couchpotato:latest',
'environment':{
'TZ': DEFAULT_TZ
},
'volumes' : [
'/downloads',
'/config',
'/media'
]
},
'host_config': {
'network_mode':'host',
'binds' : {
DOWNLOADS_DIR: {
'bind': '/downloads',
'mode': 'rw',
},
BASE_INSTALL_DIR+'/config/couchpotato/config': {
'bind': '/config',
'mode': 'rw',
},
BASE_MEDIA_DIR: {
'bind': '/media',
'mode': 'rw',
},
}
}
}
},
'transmission': {
'name': "Transmission",
'description': "Transmission",
'type': "docker",
'source': "linuxserver/transmission",
'docker_create': {
'container_config' : {
'name' : 'transmission',
'image' : 'linuxserver/transmission:latest',
'environment':{
'TZ': DEFAULT_TZ
},
'volumes' : [
'/downloads',
'/config',
'/watch'
]
},
'host_config': {
'network_mode':'host',
'binds' : {
DOWNLOADS_DIR: {
'bind': '/downloads',
'mode': 'rw',
},
BASE_INSTALL_DIR+'/config/transmission/config': {
'bind': '/config',
'mode': 'rw',
},
DOWNLOADS_DIR+'/watch': {
'bind': '/watch',
'mode': 'rw',
},
}
}
}
},
'sabnzbd': {
'name': "SABnzbd",
'description': "SABnzbd",
'type': "docker",
'source': "linuxserver/sabnzbd",
'docker_create': {
'container_config' : {
'name' : 'sabnzbd',
'image' : 'linuxserver/sabnzbd:latest',
'environment':{
'TZ': DEFAULT_TZ
},
'volumes' : [
'/downloads',
'/config',
'/incomplete-downloads'
]
},
'host_config': {
'network_mode':'host',
'binds' : {
DOWNLOADS_DIR: {
'bind': '/downloads',
'mode': 'rw',
},
BASE_INSTALL_DIR+'/config/sabnzbd/config': {
'bind': '/config',
'mode': 'rw',
},
DOWNLOADS_DIR+'/nzbincomplete': {
'bind': '/incomplete-downloads',
'mode': 'rw',
}
}
}
}
},
'radarr': {
'name': "Radarr",
'description': "Radarr",
'type': "docker",
'source': "linuxserver/radarr",
'docker_create': {
'container_config' : {
'name' : 'radarr',
'image' : 'linuxserver/radarr:latest',
'environment':{
'TZ': DEFAULT_TZ
},
'volumes' : [
'/downloads',
'/config',
'/media'
]
},
'host_config': {
'network_mode':'host',
'binds' : {
DOWNLOADS_DIR: {
'bind': '/downloads',
'mode': 'rw',
},
BASE_INSTALL_DIR+'/config/radarr/config': {
'bind': '/config',
'mode': 'rw',
},
BASE_MEDIA_DIR: {
'bind': '/media',
'mode': 'rw',
}
}
}
}
}
}
class Services(object):
def __init__(self):
self.AllServices = self.getEvents()
def getEvents(self):
SERVICES_LIST = [DOCKER]
allservices = {}
for s in SERVICES_LIST:
allservices.update(s)
return allservices
def find_service_meta(self, service):
meta = []
try:
items = self.AllServices.items();
except:
items = self.AllServices.iteritems();
for key, value in items:
if key == service:
meta.append(key)
meta.append(value)
return meta
def test_config_read():
config = read_config();
main_dict = {}
for section in config.sections():
main_dict[section] = {}
for key in config[section]:
main_dict[section][key] = config.get(section, key)
import json
print(json.dumps(main_dict, indent=4))
if __name__ == '__main__':
# Test config file
test_config_read();
| maninator/manimediaserver | setup/lib/mani_config.py | Python | gpl-3.0 | 9,976 |
import sys
from distutils.core import setup
if (sys.version_info.major, sys.version_info.minor) < (3, 4):
sys.exit("Python < 3.4 not supported.")
setup(
name='octopus-tools',
version='0.1',
license='LGPLv3',
url='https://github.com/octopus-platform/octopus-tools',
packages=['octopus', 'octopus.server', 'octopus.plugins', 'octopus.shell',
'octopus.shell.completer', 'octopus.shell.onlinehelp', 'octopus.shell.config', 'octopus.importer',
'octopus.shelltool'],
package_dir={
'octopus.shell': 'octopus/shell',
'octopus.shell.config': 'octopus/shell/config'
},
package_data={
'octopus.shell': ['data/banner.txt'],
'octopus.shell.config': ['data/octopus_shell.ini']
},
scripts=['scripts/octopus-project', 'scripts/octopus-plugin', 'scripts/octopus-shell', 'scripts/octopus-csvimport']
)
| octopus-platform/octopus-tools | setup.py | Python | lgpl-3.0 | 890 |
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Set up the cylc environment."""
import os
import sys
from parsec import LOG
def environ_init():
"""Initialise cylc environment."""
cylc_dir_lib = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
environ_path_add([cylc_dir_lib], 'PYTHONPATH')
# Ensure cylc library is at the front of "sys.path".
if sys.path[0:1] != [cylc_dir_lib]:
if cylc_dir_lib in sys.path:
sys.path.remove(cylc_dir_lib)
sys.path.insert(0, cylc_dir_lib)
os.environ['CYLC_DIR'] = os.path.dirname(cylc_dir_lib)
if os.getenv('CYLC_SUITE_DEF_PATH', ''):
environ_path_add([os.getenv('CYLC_SUITE_DEF_PATH')])
# Python output buffering delays appearance of stdout and stderr
# when output is not directed to a terminal (this occurred when
# running pre-5.0 cylc via the posix nohup command; is it still the
# case in post-5.0 daemon-mode cylc?)
os.environ['PYTHONUNBUFFERED'] = 'true'
def environ_path_add(dirs, key='PATH'):
"""For each dir_ in dirs, prepend dir_ to the PATH environment variable.
If key is specified, prepend dir_ to the named environment variable instead
of PATH.
"""
paths_str = os.getenv(key, '')
# ''.split(os.pathsep) gives ['']
if paths_str.strip():
paths = paths_str.split(os.pathsep)
else:
paths = []
for dir_ in dirs:
while dir_ in paths:
paths.remove(dir_)
paths.insert(0, dir_)
os.environ[key] = os.pathsep.join(paths)
environ_init()
__version__ = "8.0a0"
| matthewrmshin/cylc | lib/cylc/__init__.py | Python | gpl-3.0 | 2,324 |
from datetime import datetime, timedelta
from pprint import pprint
from django import forms
from utils.functions import shift_years
from .models import Account
class AccountForm(forms.ModelForm):
email = forms.CharField(
widget=forms.TextInput(attrs={"size": 40, "autofocus": "autofocus"}))
nome = forms.CharField(
widget=forms.TextInput(attrs={"size": 40}))
ddd_1 = forms.CharField(
widget=forms.NumberInput(attrs={"size": 2, "min": 1}))
ddd_2 = forms.CharField(
widget=forms.NumberInput(attrs={"size": 2, "min": 1}))
subdiretorio = forms.CharField(
widget=forms.TextInput(attrs={"size": 40}))
class Meta:
model = Account
fields = [
"tipo", "email", "nome", "setor",
"ddd_1", "num_1", "ddd_2", "num_2",
"diretorio", "subdiretorio"
]
| anselmobd/fo2 | src/email_signature/forms.py | Python | mit | 869 |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.rest.client import (
versions,
)
from synapse.rest.client.v1 import (
room,
events,
profile,
presence,
initial_sync,
directory,
voip,
admin,
pusher,
push_rule,
register as v1_register,
login as v1_login,
logout,
)
from synapse.rest.client.v2_alpha import (
sync,
filter,
account,
register,
auth,
receipts,
keys,
tokenrefresh,
tags,
account_data,
report_event,
openid,
notifications,
devices,
thirdparty,
sendtodevice,
)
from synapse.http.server import JsonResource
class ClientRestResource(JsonResource):
"""A resource for version 1 of the matrix client API."""
def __init__(self, hs):
JsonResource.__init__(self, hs, canonical_json=False)
self.register_servlets(self, hs)
@staticmethod
def register_servlets(client_resource, hs):
versions.register_servlets(client_resource)
# "v1"
room.register_servlets(hs, client_resource)
events.register_servlets(hs, client_resource)
v1_register.register_servlets(hs, client_resource)
v1_login.register_servlets(hs, client_resource)
profile.register_servlets(hs, client_resource)
presence.register_servlets(hs, client_resource)
initial_sync.register_servlets(hs, client_resource)
directory.register_servlets(hs, client_resource)
voip.register_servlets(hs, client_resource)
admin.register_servlets(hs, client_resource)
pusher.register_servlets(hs, client_resource)
push_rule.register_servlets(hs, client_resource)
logout.register_servlets(hs, client_resource)
# "v2"
sync.register_servlets(hs, client_resource)
filter.register_servlets(hs, client_resource)
account.register_servlets(hs, client_resource)
register.register_servlets(hs, client_resource)
auth.register_servlets(hs, client_resource)
receipts.register_servlets(hs, client_resource)
keys.register_servlets(hs, client_resource)
tokenrefresh.register_servlets(hs, client_resource)
tags.register_servlets(hs, client_resource)
account_data.register_servlets(hs, client_resource)
report_event.register_servlets(hs, client_resource)
openid.register_servlets(hs, client_resource)
notifications.register_servlets(hs, client_resource)
devices.register_servlets(hs, client_resource)
thirdparty.register_servlets(hs, client_resource)
sendtodevice.register_servlets(hs, client_resource)
| TribeMedia/synapse | synapse/rest/__init__.py | Python | apache-2.0 | 3,211 |
import os
from os import environ as env
from voxel_globe.common_tasks import shared_task, VipTask
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task(base=VipTask, bind=True)
def create_height_map(self, voxel_world_id, render_height):
import shutil
import urllib
import numpy as np
import brl_init
from boxm2_scene_adaptor import boxm2_scene_adaptor
from boxm2_adaptor import ortho_geo_cam_from_scene, scene_lvcs, scene_bbox
from vpgl_adaptor_boxm2_batch import convert_local_to_global_coordinates, geo2generic, save_geocam_to_tfw
from vil_adaptor_boxm2_batch import save_image, scale_and_offset_values, stretch_image, image_range
import vsi.io.image
import voxel_globe.tools
import voxel_globe.tools.hash
import voxel_globe.tools.camera
import voxel_globe.meta.models as models
import voxel_globe.ingest.payload.tools
with voxel_globe.tools.task_dir('height_map', cd=True) as processing_dir:
voxel_world = models.VoxelWorld.objects.get(id=voxel_world_id)
scene = boxm2_scene_adaptor(os.path.join(voxel_world.directory, 'scene.xml'), env['VIP_OPENCL_DEVICE'])
ortho_camera, cols, rows = ortho_geo_cam_from_scene(scene.scene)
tfw_camera = os.path.join(processing_dir, 'cam.tfw')
save_geocam_to_tfw(ortho_camera, tfw_camera)
with open(tfw_camera, 'r') as fid:
geo_transform = [float(x) for x in fid.readlines()]
(x0,y0,z0),(x1,y1,z1) = scene_bbox(scene.scene)
lvcs = scene_lvcs(scene.scene)
#lvcs = vpgl_adaptor.create_lvcs(lat=origin[1], lon=origin[0], el=origin[2],
# csname="wgs84")
_,_,min_height = convert_local_to_global_coordinates(lvcs, x0, y0, z0)
if render_height is None:
render_height = z1+(z1-z0)/1000
#z1+(z1-z0)/1000 is basically to say "just a little above the top" *2 is
#1) overkill and 2) doesn't work with sign, +1 could go crazy in an
#arbitrarily scaled system, so this just calculates ".1% more" which is
#more than good enough
else:
render_height = render_height - voxel_world.origin[2]
logger.critical("Render Height is %f (%s)", render_height, type(render_height))
generic_camera = geo2generic(ortho_camera, cols, rows, render_height, 0)
z_exp_img, z_var_img = scene.render_z_image(generic_camera, cols, rows)
#This is TECHNICALLY wrong, it assumes the earth is flat.
scale_and_offset_values(z_exp_img, 1, min_height)
height_filename = os.path.join(processing_dir, 'height.tif')
save_image(z_exp_img, height_filename)
checksum = voxel_globe.tools.hash.sha256_file(height_filename)
with voxel_globe.tools.image_sha_dir(checksum) as image_dir:
original_filename = os.path.join(image_dir, 'height_map.tif')
#If the exact file exist already, don't ingest it again. Unlikely
if not os.path.exists(original_filename):
img = vsi.io.image.imread(height_filename)
vsi.io.image.imwrite_geotiff(img.raster(), original_filename,
[geo_transform[x] for x in [4,0,1,5,2,3]])
zoomify_filename = os.path.join(image_dir, 'zoomify.tif')
img_min, img_max = image_range(z_exp_img)
if img_min == img_max:
zoomify_image = z_exp_img #At least it won't crash
else:
zoomify_image = stretch_image(z_exp_img, img_min, img_max, 'byte')
save_image(zoomify_image, zoomify_filename)
zoomify_name = os.path.join(image_dir, 'zoomify')
voxel_globe.ingest.payload.tools.zoomify_image(zoomify_filename, zoomify_name)
img = voxel_globe.meta.models.Image(
name="Height Map %s (%s)" % (voxel_world.name,
voxel_world.id),
image_width=cols, image_height=rows,
number_bands=1, pixel_format='f', file_format='zoom',
service_id=self.request.id)
img.filename_path=original_filename
img.save()
image_set = models.ImageSet.objects.get_or_create(name="Height Maps",
defaults={"_attributes":'{"autogen":true}'})[0]
image_set.images.add(img)
gsd = scene.description['voxelLength']
camera_center = ((x0+x1)/2, (y0+y1)/2, z1+10000)
d = z1-z0+10000
k=np.eye(3)
k[0,2] = cols/2
k[1,2] = rows/2
k[0,0] = k[1,1] = d/gsd
r=np.eye(3)
r[0,0]=-1
t = -r.T.dot(camera_center)
camera=voxel_globe.tools.camera.save_krt(self.request.id, img, k, r, t,
voxel_world.origin)
camera_set=voxel_globe.meta.models.CameraSet(\
name="Height Map %s (%s)" % (voxel_world.name, voxel_world.id), \
images=image_set, service_id=self.request.id)
camera_set.save()
camera_set.cameras.add(camera)
@shared_task(base=VipTask, bind=True)
def height_map_error(self, image_id):
import numpy as np
import vpgl_adaptor_boxm2_batch as vpgl_adaptor
from vsi.io.image import imread, GdalReader
from voxel_globe.meta import models
import voxel_globe.tools
from voxel_globe.tools.celery import Popen
from vsi.tools.file_util import lncp
tie_points_yxz = []
control_points_yxz = []
image = models.Image.objects.get(id=image_id)
height_reader = GdalReader(image.filename_path, autoload=True)
transform = height_reader.object.GetGeoTransform()
height = height_reader.raster()
del height_reader
tie_points = image.tiepoint_set.all()
for tie_point in tie_points:
lla_xyz = tie_point.control_point.point.coords
control_points_yxz.append([lla_xyz[x] for x in [1,0,2]])
tie_points_yxz.append([transform[4]*(tie_point.point.coords[0]+0.5) + transform[5]*(tie_point.point.coords[1]+0.5) + transform[3],
transform[1]*(tie_point.point.coords[0]+0.5) + transform[2]*(tie_point.point.coords[1]+0.5) + transform[0],
height[tie_point.point.coords[1], tie_point.point.coords[0]]])
origin_yxz = np.mean(np.array(control_points_yxz), axis=0)
tie_points_local = []
control_points_local = []
lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84')
for tie_point in tie_points_yxz:
tie_points_local.append(vpgl_adaptor.convert_to_local_coordinates2(lvcs, *tie_point))
for control_point in control_points_yxz:
control_points_local.append(vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_point))
error = np.linalg.norm(np.array(tie_points_local)-np.array(control_points_local), axis=0)/(len(tie_points_local)**0.5)
result={}
result['error'] = list(error)
result['horizontal_accuracy'] = 2.4477*0.5*(error[0]+error[1])
result['vertical_accuracy'] = 1.96*error[2]
return result | ngageoint/voxel-globe | voxel_globe/height_map/tasks.py | Python | mit | 6,796 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: .py
| zeroonegit/python | runoob/basic_tutorial/template.py | Python | mit | 64 |
#!/usr/bin/python
import unittest
import db
import lambda_functions
import logging
import os
class LambdaCommon(lambda_functions.LambdaCommon):
def createDb(self):
return db.DBMemory()
class TestLambdaFunctions(unittest.TestCase):
def testCommon(self):
obj = LambdaCommon()
def testPageBucket(self):
obj = LambdaCommon()
self.assertEqual("pagebucket",obj.getPageBucket())
os.environ["PAGE_BUCKET"] = "OVERRIDE_BUCKET"
self.assertEqual("OVERRIDE_BUCKET",obj.getPageBucket())
def testaddCorsHeaders(self):
resp = lambda_functions.addCorsHeaders({"statusCode":200,"body":"{}"})
self.assertEqual(200,resp["statusCode"])
self.assertEqual("{}",resp["body"])
self.assertTrue("Authorization" in resp["headers"]["Access-Control-Allow-Headers"].split(','))
self.assertTrue("Content-Type" in resp["headers"]["Access-Control-Allow-Headers"].split(','))
self.assertTrue("GET" in resp["headers"]["Access-Control-Allow-Methods"].split(','))
self.assertEqual("*",resp["headers"]["Access-Control-Allow-Origin"])
if __name__ == '__main__':
#FORMAT = "%(asctime)-15s %(message)s"
#logging.basicConfig(format=FORMAT)
unittest.main()
| intirix/serverless-wiki | lambda_functions_test.py | Python | apache-2.0 | 1,157 |
# Module: UnitTests.tBioNanoAssembly.py
# Version: 0.1
# Author: Aaron Sharp
# Date: 06/29/2015
#
# The purpose of this module is to provide unit tests for
# all modules in Operations.Assemble.BioNano
import unittest
import os
from collections import OrderedDict
from copy import copy
from UnitTests.Helper import Mock
from Operations.BioNano.files import BnxFile
from Operations.BioNano.Assemble.RefineB0 import RefineB0
from Operations.BioNano.Assemble.Assembly import RefineA
from Operations.BioNano.Assemble.Assembly import Assembly
from Operations.BioNano.Assemble.Assembly import GenericAssembly
from Operations.BioNano.Assemble.Summarize import Summarize
from Operations.BioNano.Assemble.Merge import Merge
from Operations.BioNano.Assemble.GroupManifest import GroupManifest
from Operations.BioNano.Assemble.Sort import Sort
from Operations.BioNano.Assemble.Split import Split
from Operations.BioNano.Assemble.PairwiseAlignment import PairwiseAlignment
from Operations.BioNano.Assemble.MoleculeStats import MoleculeStats
from Operations.BioNano.Assemble.Input import Input
class tAssembly(unittest.TestCase):
workspace=Mock(input_file="input_file", work_dir="work_dir")
vital_parameters=Mock(pval=1e-5, fp=1.5, fn=.150, min_molecule_len=100, min_molecule_sites=6)
native_autoGeneratePrereqs=Assembly.autoGeneratePrereqs
def dummy_autoGeneratePrereqs(self):
self.autoGeneratePrereqsCalled=True
def dummy_getResources(self):
return -1
def dummy_getOutputFile(self):
return "output_file"
def setUp(self):
Assembly.autoGeneratePrereqs=tAssembly.dummy_autoGeneratePrereqs.im_func
self.obj=Assembly(self.workspace, self.vital_parameters)
def tearDown(self):
Assembly.autoGeneratePrereqs=self.native_autoGeneratePrereqs
def test_default_init(self):
expectedDefault=Assembly(None, None)
expectedDefault.workspace=self.workspace
expectedDefault.vital_parameters=self.vital_parameters
expectedDefault.quality=None
expectedDefault.sd=0.2
expectedDefault.sf=0.2
expectedDefault.sr=0.03
expectedDefault.res=3.3
expectedDefault.color=1
expectedDefault.alignment_score_threshold=1
expectedDefault.max_rel_coverage_multiple=100
expectedDefault.max_rel_coverage_absolute=200
expectedDefault.max_rel_coverage_absolute_2=30
expectedDefault.bulge_coverage=20
expectedDefault.max_coverage=10
expectedDefault.min_coverage=10
expectedDefault.min_average_coverage=5
expectedDefault.min_maps=5
expectedDefault.min_contig_len=0.0
expectedDefault.end_trim=1
expectedDefault.chimera_pval=0.001
expectedDefault.chimera_num=3
expectedDefault.fast_bulge=1000
expectedDefault.fragile_preserve=False
expectedDefault.draftsize=1
expectedDefault.min_duplicate_len=1
expectedDefault.binary_output=True
expectedDefault.min_snr=2
expectedDefault.output_prefix="unrefined"
expectedDefault.add_alignment_filter=True
expectedDefault.alignment_filter_threshold=100
expectedDefault.alignment_filter_minlen_change=2.0
expectedDefault.alignment_filter_pval_change=0.5
expectedDefault.overwrite_output=True
expectedDefault.hide_branches=True
expectedDefault.send_output_to_file=True
expectedDefault.send_errors_to_file=True
expectedDefault.total_job_count=1
expectedDefault.autoGeneratePrereqsCalled=True
self.assertEqual(expectedDefault, self.obj)
def test_default_write_code(self):
self.workspace.binaries={"bng_assembler": "Assembler"}
native_getStepDir=Assembly.getStepDir
Assembly.getStepDir=self.dummy_getStepDir.im_func
native_getMem=Assembly.getMem
Assembly.getMem=self.dummy_getResources.im_func
native_getThreads=Assembly.getThreads
Assembly.getThreads=self.dummy_getResources.im_func
Mock.getOutputFile=self.dummy_getOutputFile.im_func
self.obj.split_summary=Mock()
self.obj.pairwise_summary=Mock()
self.obj.molecule_stats=Mock()
expected=["cd " + self.workspace.work_dir + "\n" +
"mkdir " + self.dummy_getStepDir() + "\n" +
"cd " + self.dummy_getStepDir() + "\n" +
"pwd\n" +
"Assembler -if ../" + self.dummy_getOutputFile() + " -af ../" + self.dummy_getOutputFile() + " -XmapStatRead ../" + self.dummy_getOutputFile() + " -usecolor 1 -FP " + str(self.vital_parameters.fp) + " -FN " + str(self.vital_parameters.fn) + " -sd 0.2 -sf 0.2 -sr 0.03 -res 3.3 -T " + str(self.vital_parameters.pval) + " -S 1 -MaxRelCoverage 100 200 30 -BulgeCoverage 20 -MaxCoverage 10 -MinCov 10 -MinAvCov 5 -MinMaps 5 -MinContigLen 0.0 -EndTrim 1 -refine 0 -PVchim 0.001 3 -FastBulge 1000 -FragilePreserve 0 -draftsize 1 -SideBranch 1 -contigs_format 1 -maxthreads " + str(self.dummy_getResources()) + " -maxmem 1 -minlen " + str(self.vital_parameters.min_molecule_len) + " -minsites " + str(self.vital_parameters.min_molecule_sites) + " -minSNR 2 -o unrefined -AlignmentFilter 100 2.0 0.5 -force -SideChain -stdout -stderr \n"]
actual=self.obj.writeCode()
del self.workspace.binaries
Assembly.getStepDir=native_getStepDir
Assembly.getMem=native_getMem
Assembly.getThreads=native_getThreads
del Mock.getOutputFile
self.assertEqual(expected, actual)
def dummy_inputDotGetStepDir(self):
return "input_file"
def test_default_get_step_dir(self):
Mock.getStepDir=tAssembly.dummy_inputDotGetStepDir.im_func
self.obj.inpt=Mock()
self.assertEqual("assembly_input_file_fp" + str(self.vital_parameters.fp) + "_fn" + str(self.vital_parameters.fn) + "_pval" + str(self.vital_parameters.pval) + "_minlen" + str(self.vital_parameters.min_molecule_len) + "_minsites" + str(self.vital_parameters.min_molecule_sites),self.obj.getStepDir())
del Mock.getStepDir
def dummy_getStepDir(self):
return "dummy"
def test_get_output_file(self):
native_getStepDir=Assembly.getStepDir
Assembly.getStepDir=tAssembly.dummy_getStepDir.im_func
self.obj.output_prefix="dummy"
self.assertEqual("dummy/dummy.contigs",self.obj.getOutputFile())
Assembly.getStepDir=native_getStepDir
def test_get_output_file_extension(self):
self.assertEqual("contigs",self.obj.getOutputFileExtension())
def test_auto_generate_prereqs(self):
expected=Assembly(self.workspace, self.vital_parameters)
Assembly.autoGeneratePrereqs=self.native_autoGeneratePrereqs
native_getTime_split=Split.getTime
Split.getTime=self.dummy_getLargeMemory.im_func
native_getTime_pairwise=PairwiseAlignment.getTime
PairwiseAlignment.getTime=self.dummy_getLargeMemory.im_func
self.obj.vital_parameters.blocks=1
self.vital_parameters.blocks=1
expected.inpt=Input(self.workspace)
expected.sort=Sort(self.workspace, copy(self.vital_parameters))
expected.molecule_stats=expected.sort.getMoleculeStats()
expected.split=Split(self.workspace, copy(self.vital_parameters))
expected.split_summary=Summarize(self.workspace, expected.split)
expected.pairwise_alignment=PairwiseAlignment(self.workspace, copy(self.vital_parameters))
expected.pairwise_summary=Summarize(self.workspace, expected.pairwise_alignment)
self.obj.autoGeneratePrereqs()
Split.getTime=native_getTime_split
PairwiseAlignment.getTime=native_getTime_pairwise
del self.obj.vital_parameters.blocks
self.assertEqual(expected, self.obj)
def test_get_prereq(self):
pairwise_summary=Mock()
self.obj.pairwise_summary=pairwise_summary
self.assertEqual(pairwise_summary,self.obj.getPrereq())
def test_isComplete_is(self):
native_getOutputFile=Assembly.getOutputFile
Assembly.getOutputFile=self.dummy_getOutputFile.im_func
with open(self.dummy_getOutputFile(), "w"):
actual = self.obj.isComplete()
os.remove(self.dummy_getOutputFile())
Assembly.getOutputFile=native_getOutputFile
self.assertTrue(actual)
def test_isComplete_isNot(self):
native_getOutputFile=Assembly.getOutputFile
Assembly.getOutputFile=self.dummy_getOutputFile.im_func
actual = self.obj.isComplete()
Assembly.getOutputFile=native_getOutputFile
self.assertFalse(actual)
def dummy_returnFalse(self):
return False
def test_createQualityObject_notComplete(self):
native_isComplete=Assembly.isComplete
Assembly.isComplete=self.dummy_returnFalse.im_func
expected="The step is not complete yet"
actual=""
try:
self.obj.createQualityObject()
except Exception as e:
actual=str(e)
Assembly.isComplete=native_isComplete
self.assertEqual(expected, actual)
@unittest.skip('Incomplete test')
def test_createQualityObject_complete(self):
self.assertEqual(1,2)
def dummy_loadQualityObjectFromFile(self):
self.loadQualityObjectFromFileCalled=True
mock_value=-1
self.quality=Mock(length=mock_value, count=mock_value)
def test_getQualityCount_noQuality(self):
self.obj.quality=None
native_loadQualityObjectFromFile=Assembly.loadQualityObjectFromFile
Assembly.loadQualityObjectFromFile=self.dummy_loadQualityObjectFromFile.im_func
expecteds=[True, -1] #Must be the same value as dummy_loadQualityObjectFromFile
actual_value=self.obj.getQuality_count()
actuals=[self.obj.loadQualityObjectFromFileCalled, actual_value]
Assembly.loadQualityObjectFromFile=native_loadQualityObjectFromFile
self.assertEqual(expecteds, actuals)
def dummy_throwException(self):
raise Exception("This function shouldn't have been called")
def test_getQualityCount_quality(self):
native_loadQualityObjectFromFile=Assembly.loadQualityObjectFromFile
Assembly.loadQualityObjectFromFile=self.dummy_throwException.im_func
expected=-1
self.obj.quality=Mock(count=expected)
actual=self.obj.getQuality_count()
Assembly.loadQualityObjectFromFile=native_loadQualityObjectFromFile
self.assertEqual(expected, actual)
def test_getQualityLength_noQuality(self):
self.obj.quality=None
native_loadQualityObjectFromFile=Assembly.loadQualityObjectFromFile
Assembly.loadQualityObjectFromFile=self.dummy_loadQualityObjectFromFile.im_func
expecteds=[True, -1] # Must be the same as dummy_loadQualityObjectFromFile
actual_value=self.obj.getQuality_length()
actuals=[self.obj.loadQualityObjectFromFileCalled, actual_value]
Assembly.loadQualityObjectFromFile=native_loadQualityObjectFromFile
self.assertEqual(expecteds, actuals)
def test_getQualityLength_quality(self):
expected=self.dummy_getResources()
self.obj.quality=Mock(length=expected)
native_loadQualityObjectFromFile=Assembly.loadQualityObjectFromFile
Assembly.loadQualityObjectFromFile=self.dummy_throwException.im_func
actual=self.obj.getQuality_length()
Assembly.loadQualityObjectFromFile=native_loadQualityObjectFromFile
self.assertEqual(expected, actual)
def dummy_getLargeMemory(self):
return -1
def test_get_mem(self):
Mock.getLargeMemory=tAssembly.dummy_getLargeMemory.im_func
self.workspace.resources=Mock()
self.assertEqual(self.dummy_getLargeMemory(), self.obj.getMem())
del Mock.getLargeMemory
del self.workspace.resources
def dummy_getMediumTime(self):
return -1
def test_get_time(self):
Mock.getMediumTime=tAssembly.dummy_getMediumTime.im_func
self.workspace.resources=Mock()
self.assertEqual(self.dummy_getMediumTime(), self.obj.getTime())
del Mock.getMediumTime
del self.workspace.resources
def test_get_threads(self):
self.assertEqual(1, self.obj.getThreads())
class tInput(unittest.TestCase):
workspace=Mock(work_dir="work_dir", input_file="input_file")
def setUp(self):
self.obj=Input(self.workspace)
def test_init_default(self):
expected=[self.workspace, None]
self.assertEqual(expected, [self.obj.workspace, self.obj.quality])
def test_hash(self):
expected=hash((self.workspace.input_file, self.workspace.work_dir, "Input"))
self.assertEqual(expected, self.obj.__hash__())
def test_str(self):
self.assertEqual(self.workspace.input_file, str(self.obj))
def test_writeCode(self):
self.assertEqual([], self.obj.writeCode())
def test_getStepDir(self):
self.assertEqual(self.workspace.input_file, self.obj.getStepDir())
def test_getOutputFile(self):
self.assertEqual(self.workspace.input_file, self.obj.getOutputFile())
def test_getOutputFileExtension(self):
self.assertEqual("bnx", self.obj.getOutputFileExtension())
def test_autoGeneratePrereqs(self):
expected=self.obj.__dict__
self.obj.autoGeneratePrereqs()
self.assertEqual(expected, self.obj.__dict__)
def test_getPrereq(self):
self.assertEqual(None, self.obj.getPrereq())
def test_isComplete_isComplete(self):
expected=True
with open(self.workspace.input_file, "w"):
actual=self.obj.isComplete()
os.remove(self.workspace.input_file)
self.assertEqual(expected, actual)
def test_isComplete_isNotComplete(self):
expected=False
actual=self.obj.isComplete()
self.assertEqual(expected, actual)
def test_getBnxFile_doesExist(self):
expected="expected"
actual="actual"
with open(self.workspace.input_file, "w"):
expected=BnxFile(self.workspace.input_file)
actual=self.obj.getBnxFile()
os.remove(self.workspace.input_file)
self.assertEqual(expected, actual)
def test_getBnxFile_doesNotExist(self):
with self.assertRaises(IOError):
self.obj.getBnxFile()
def dummy_getBnxFile(self):
return dummy_BnxFile()
def dummy_saveQualityObjectToFile(self):
self.savedQualityObjectToFile=True
def test_createQualityObject(self):
native_getBnxFile=Input.getBnxFile
Input.getBnxFile=self.dummy_getBnxFile.im_func
native_saveQualityObjectToFile=Input.saveQualityObjectToFile
Input.saveQualityObjectToFile=self.dummy_saveQualityObjectToFile.im_func
expected_count=0
expected_quantity=0.0
expected_labels=0
for item in dummy_BnxFile().parse():
expected_count+=1
expected_quantity+=item.length
expected_labels+=item.num_labels
expected=[expected_count, expected_quantity, expected_labels, True]
self.obj.createQualityObject()
actual=[self.obj.quality.count, self.obj.quality.quantity, self.obj.quality.labels, self.obj.savedQualityObjectToFile]
Input.getBnxFile=native_getBnxFile
Input.saveQualityObjectToFile=native_saveQualityObjectToFile
self.assertEqual(expected, actual)
def dummy_loadQuality_count(self):
return 1
def dummy_loadQuality_quantity(self):
return 1
def dummy_loadQuality_labels(self):
return 1
def dummy_loadQuality_density(self):
return 1
def dummy_loadQuality_averageLength(self):
return 1
def test_loadQualityReportItems(self):
native_loadQuality_count=Input.loadQuality_count
Input.loadQuality_count=self.dummy_loadQuality_count.im_func
native_loadQuality_quantity=Input.loadQuality_quantity
Input.loadQuality_quantity=self.dummy_loadQuality_quantity.im_func
native_loadQuality_labels=Input.loadQuality_labels
Input.loadQuality_labels=self.dummy_loadQuality_labels.im_func
native_loadQuality_density=Input.loadQuality_density
Input.loadQuality_density=self.dummy_loadQuality_density.im_func
native_loadQuality_averageLength=Input.loadQuality_averageLength
Input.loadQuality_averageLength=self.dummy_loadQuality_averageLength.im_func
expected=OrderedDict()
expected["File: " + self.obj.getOutputFile()]=3
expected["Molecule count: " + str(self.dummy_loadQuality_count())]=1
expected["Total quantity: " + str(self.dummy_loadQuality_quantity())]=1
expected["Total labels: " + str(self.dummy_loadQuality_labels())]=1
expected["Average label density: " + str(self.dummy_loadQuality_density())]=2
expected["Average length: " + str(self.dummy_loadQuality_averageLength())]=2
actual=self.obj.loadQualityReportItems()
Input.loadQuality_count=native_loadQuality_count
Input.loadQuality_quantity=native_loadQuality_quantity
Input.loadQuality_labels=native_loadQuality_labels
Input.loadQuality_density=native_loadQuality_density
Input.loadQuality_averageLength=native_loadQuality_averageLength
self.assertEqual(expected, actual)
def dummy_createQualityObject(self):
self.quality=Mock(createCalled=True, count=1, quantity=2, labels=3, density=4, average=5)
def test_loadQuality_count_noneQuantity(self):
native_createQualityObject=Input.createQualityObject
Input.createQualityObject=self.dummy_createQualityObject.im_func
self.dummy_createQualityObject()
expected=[self.quality, self.quality.count]
del self.quality
count=self.obj.loadQuality_count()
Input.createQualityObject=native_createQualityObject
actual=[self.obj.quality,count]
self.assertEqual(expected, actual)
def test_loadQuality_count_someQuantity(self):
quality=Mock(count=1)
self.obj.quality=quality
expected=[quality, quality.count]
count=self.obj.loadQuality_count()
self.assertEqual(expected, [self.obj.quality, count])
def test_loadQuality_quantity_noneQuality(self):
native_createQualityObject=Input.createQualityObject
Input.createQualityObject=self.dummy_createQualityObject.im_func
self.dummy_createQualityObject()
expected=[self.quality, self.quality.quantity]
del self.quality
quantity=self.obj.loadQuality_quantity()
Input.createQualityObject=native_createQualityObject
actual=[self.obj.quality,quantity]
self.assertEqual(expected, actual)
def test_loadQuality_quantity_someQuality(self):
quality=Mock(quantity=1)
self.obj.quality=quality
expected=[quality, quality.quantity]
quantity=self.obj.loadQuality_quantity()
self.assertEqual(expected, [self.obj.quality, quantity])
def test_loadQuality_labels_noneQuality(self):
native_createQualityObject=Input.createQualityObject
Input.createQualityObject=self.dummy_createQualityObject.im_func
self.dummy_createQualityObject()
expected=[self.quality, self.quality.labels]
del self.quality
labels=self.obj.loadQuality_labels()
Input.createQualityObject=native_createQualityObject
actual=[self.obj.quality,labels]
self.assertEqual(expected, actual)
def test_loadQuality_labels_someQuality(self):
quality=Mock(labels=1)
self.obj.quality=quality
expected=[quality, quality.labels]
labels=self.obj.loadQuality_labels()
self.assertEqual(expected, [self.obj.quality, labels])
def test_loadQuality_density_noneQuality(self):
native_createQualityObject=Input.createQualityObject
Input.createQualityObject=self.dummy_createQualityObject.im_func
self.dummy_createQualityObject()
expected=[self.quality, self.quality.density]
del self.quality
density=self.obj.loadQuality_density()
Input.createQualityObject=native_createQualityObject
actual=[self.obj.quality,density]
self.assertEqual(expected, actual)
def test_loadQuality_density_qualityWithoutDensity(self):
self.obj.quality=Mock(labels=2, quantity=1.0)
expected = 2 / 1.0
self.assertEqual(expected, self.obj.loadQuality_density())
def test_loadQuality_density_qualityWithDensity(self):
self.obj.quality=Mock(labels=2, quantity=1.0, density=10)
expected=10
self.assertEqual(expected, self.obj.loadQuality_density())
def test_loadQuality_averageLength_noneQuality(self):
native_createQualityObject=Input.createQualityObject
Input.createQualityObject=self.dummy_createQualityObject.im_func
self.dummy_createQualityObject()
expected=[self.quality, self.quality.average]
del self.quality
average=self.obj.loadQuality_averageLength()
Input.createQualityObject=native_createQualityObject
actual=[self.obj.quality,average]
self.assertEqual(expected, actual)
def test_loadQuality_averageLength_qualityWithoutDensity(self):
self.obj.quality=Mock(count=2, quantity=4.0)
expected = 4.0 / 2
self.assertEqual(expected, self.obj.loadQuality_averageLength())
def test_loadQuality_averageLength_qualityWithDensity(self):
self.obj.quality=Mock(count=2, quantity=4.0, average=10)
expected=10
self.assertEqual(expected, self.obj.loadQuality_averageLength())
def test_getMem(self):
self.assertEqual(-1, self.obj.getMem())
def test_getTime(self):
self.assertEqual(-1, self.obj.getTime())
def test_getThreads(self):
self.assertEqual(-1, self.obj.getThreads())
class dummy_BnxFile(object):
def parse(self):
return [ Mock(length=100.0, num_labels=13),
Mock(length=200.0, num_labels=26),
Mock(length=300.0, num_labels=39) ]
class tRefineA(unittest.TestCase):
workspace=Mock(work_dir="work_dir", input_file="input_file")
vital_parameters=Mock(fp=1.5, fn=.150, pval=1e-5, min_molecule_len=100, min_molecule_sites=6)
def setUp(self):
native_autoGeneratePrereqs=RefineA.autoGeneratePrereqs
RefineA.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
self.obj=RefineA(self.workspace, copy(self.vital_parameters))
RefineA.autoGeneratePrereqs=native_autoGeneratePrereqs
def dummy_getResources(self):
return -1
def dummy_autoGeneratePrereqs(self):
self.autoGeneratePrereqsCalled=True
def dummy_getStepDir(self):
return "step_dir"
def dummy_getOutputFileExtension(self):
return "ext"
def dummy_getOutputFile(self):
return "output_file.ext"
def test_constructor_default(self):
expected=Mock(
workspace=self.workspace,
vital_parameters=self.vital_parameters,
quality=None,
sd=0.2,
sf=0.2,
sr=0.03,
res=3.3,
usecolor=1,
use_multi_mode=True,
consensus_end_coverage=0.99,
bias_for_low_likelihood_ratio=1e2,
refinement_length_accuracy="",
largest_query_map_interval=4,
largest_reference_map_interval=6,
outlier_pval=1e-5,
end_outlier_prior_probability=0.00001,
contigs_format=1,
overwrite_output=True,
output_prefix="refineA",
send_output_to_file=True,
send_errors_to_file=True,
total_job_count=1,
autoGeneratePrereqsCalled=True)
self.assertEqual(expected, self.obj)
def test_writeCode(self):
self.workspace.binaries={"bng_assembler": "Assembler"}
native_getStepDir=RefineA.getStepDir
RefineA.getStepDir=self.dummy_getStepDir.im_func
native_getThreads=RefineA.getThreads
RefineA.getThreads=self.dummy_getResources.im_func
Mock.getOutputFile=self.dummy_getOutputFile.im_func
self.obj.sort=Mock()
self.obj.assembly=Mock()
self.obj.molecule_stats=Mock()
self.obj.group_manifest=Mock()
expected=["cd " + self.workspace.work_dir + "\n"
+
"mkdir " + self.dummy_getStepDir() + "\n" +
"cd " + self.dummy_getStepDir() + "\n" +
"pwd\n" +
"let contig_num=0\n" +
"while read line\n" +
"do\n" +
" if [[ $line == \"#\"* ]]; then continue; fi\n" +
" let contig_num+=1\n" +
" group_start=`echo $line | awk '{print $1}'`\n" +
" group_end=`echo $line | awk '{print $NF}'`\n" +
" Assembler -i ../" + self.dummy_getOutputFile() + " -contigs ../" + self.dummy_getOutputFile() + " $group_start $group_end -maxthreads -1 -T " + str(self.vital_parameters.pval) + " -usecolor 1 -extend 1 -refine 2 -MultiMode -EndTrim 0.99 -LRbias 100.0 -Mprobeval -deltaX 4 -deltaY 6 -outlier 1e-05 -endoutlier 1e-05 -contigs_format 1 -force -FP " + str(self.vital_parameters.fp) + " -FN " + str(self.vital_parameters.fn) + " -sd 0.2 -sf 0.2 -sr 0.03 -res 3.3 -o refineA -stdout -stderr -XmapStatRead ../" + self.dummy_getOutputFile() + "\n" +
"done < ../" + self.dummy_getOutputFile()]
actual=self.obj.writeCode()
del self.workspace.binaries
RefineA.getStepDir=native_getStepDir
RefineA.getThreads=native_getThreads
del Mock.getOutputFile
self.assertEqual(expected, actual)
def test_getStepDir(self):
Mock.getStepDir=self.dummy_getStepDir.im_func
self.obj.inpt=Mock()
expected="refineA_" + self.dummy_getStepDir() + "_fp" + str(self.vital_parameters.fp) + "_fn" + str(self.vital_parameters.fn) + "_pval" + str(self.vital_parameters.pval) + "_minlen" + str(self.vital_parameters.min_molecule_len) + "_minsites" + str(self.vital_parameters.min_molecule_sites)
actual=self.obj.getStepDir()
del Mock.getStepDir
self.assertEqual(expected, actual)
def test_getOutputFile(self):
native_getStepDir=RefineA.getStepDir
RefineA.getStepDir=self.dummy_getStepDir.im_func
native_getOutputFileExtension=RefineA.getOutputFileExtension
RefineA.getOutputFileExtension=self.dummy_getOutputFileExtension.im_func
self.obj.output_prefix="output_prefix"
expected=self.dummy_getStepDir() + "/" + self.obj.output_prefix + "." + self.dummy_getOutputFileExtension()
actual=self.obj.getOutputFile()
RefineA.getStepDir=native_getStepDir
RefineA.getOutputFileExtension=native_getOutputFileExtension
self.assertEqual(expected, actual)
def test_getOutputFileExtension(self):
self.assertEqual("contigs", self.obj.getOutputFileExtension())
def test_autoGeneratePrereqs(self):
native_getTime_split=Split.getTime
Split.getTime=self.dummy_getResources.im_func
native_getTime_pairwise=PairwiseAlignment.getTime
PairwiseAlignment.getTime=self.dummy_getResources.im_func
self.vital_parameters.blocks=1
self.obj.vital_parameters.blocks=1
self.obj.inpt=Input(self.workspace)
sort=Sort(self.workspace, copy(self.vital_parameters))
self.obj.sort=sort
self.obj.molecule_stats=sort.getMoleculeStats()
split=Split(self.workspace, copy(self.vital_parameters))
self.obj.split=split
self.obj.split_summary=Summarize(self.workspace, split)
pairwise_alignment=PairwiseAlignment(self.workspace, copy(self.vital_parameters))
self.obj.pairwise_alignment=pairwise_alignment
self.obj.pairwise_summary=Summarize(self.workspace, pairwise_alignment)
assembly=Assembly(self.workspace, copy(self.vital_parameters))
self.obj.assembly=assembly
self.obj.assembly_summary=Summarize(self.workspace, assembly)
self.obj.merge_assembly=Merge(self.workspace, assembly)
self.obj.group_manifest=GroupManifest(self.workspace, assembly)
actual=RefineA(self.workspace, copy(self.vital_parameters))
actual.autoGeneratePrereqs()
actual.autoGeneratePrereqsCalled=True
Split.getTime=native_getTime_split
PairwiseAlignment.getTime=native_getTime_pairwise
del self.vital_parameters.blocks
self.assertEqual(self.obj.__dict__, actual.__dict__)
def test_getPrereq(self):
expected=Mock()
self.obj.group_manifest=expected
actual=self.obj.getPrereq()
self.assertEqual(expected, actual)
def test_getMem(self):
Mock.getMediumMemory=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getMem()
del Mock.getMediumMemory
del self.obj.workspace.resources
self.assertEqual(expected, actual)
def test_getTime(self):
Mock.getLargeTime=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getTime()
del Mock.getLargeTime
del self.obj.workspace.resources
self.assertEqual(expected, actual)
def test_getThreads(self):
Mock.getMediumThreads=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getThreads()
del Mock.getMediumThreads
del self.obj.workspace.resources
self.assertEqual(expected, actual)
class tGroupManifest(unittest.TestCase):
workspace=Mock(work_dir="work_dir", input_file="input_file")
assembly=Mock(output_prefix="output_prefix", vital_parameters=Mock(fp=1.5, fn=.150, pval=1e-5, min_molecule_len=100, min_molecule_sites=6), quality=None)
native_autoGen=GroupManifest.autoGeneratePrereqs
def setUp(self):
GroupManifest.autoGeneratePrereqs=self.dummy_autoGen.im_func
self.obj=GroupManifest(self.workspace, self.assembly)
GroupManifest.autoGeneratePrereqs=self.native_autoGen
def dummy_autoGen(self):
self.autoGenCalled=True
def dummy_getResources(self):
return -1
def dummy_getStepDir(self):
return "step_dir"
def dummy_getOutputFile(self):
return "output_file.ext"
def dummy_makeWeightStatsFile(self):
self.makeWeightStatsFileCalled=True
with open("weight_stats.txt", "w"):
pass
def dummy__makeGroupManifestFile(self, dummy1, dummy2):
self._makeGroupManifestFileCalled=True
def test_constructor_default(self):
expected=Mock(
workspace=self.workspace,
assembly=self.assembly,
quality=None,
autoGenCalled=True)
self.assertEqual(expected, self.obj)
def test_writeCode(self):
native_getStepDir=GroupManifest.getStepDir
GroupManifest.getStepDir=self.dummy_getStepDir.im_func
expected="cd " + self.workspace.work_dir + "\n"
expected+="mkdir " + self.dummy_getStepDir() + "\n"
expected+="cd " + self.dummy_getStepDir() + "\n"
expected+="pwd\n"
expected+="python -c 'from Utils.Workspace import Workspace;"
expected+="from Operations.BioNano.Assemble.VitalParameters import VitalParameters;"
expected+="from Operations.BioNano.Assemble.Assembly import GenericAssembly;"
expected+="from Operations.BioNano.Assemble.GroupManifest import GroupManifest;"
expected+="ws=Workspace(\"" + self.workspace.work_dir + "\", \"" + self.workspace.input_file + "\");"
expected+="vp=VitalParameters(" + str(self.assembly.vital_parameters.fp) + ", " + str(self.assembly.vital_parameters.fn) + ", " + str(self.assembly.vital_parameters.pval) + ", " + str(self.assembly.vital_parameters.min_molecule_len) + ", " + str(self.assembly.vital_parameters.min_molecule_sites) + ");"
expected+="gm=GroupManifest(ws, GenericAssembly.createAssembly(ws, vp, \"\"));"
expected+="gm.makeGroupManifestFile()'"
actual=self.obj.writeCode()
GroupManifest.getStepDir=native_getStepDir
self.assertEqual([expected], actual)
def test_makeGroupManifestFile_weightStatsDoesNotExist(self):
self.obj.makeWeightStatsFileCalled=False
native_getOutputFile=GroupManifest.getOutputFile
GroupManifest.getOutputFile=self.dummy_getOutputFile.im_func
native_makeWeightStatsFile=GroupManifest.makeWeightStatsFile
GroupManifest.makeWeightStatsFile=self.dummy_makeWeightStatsFile.im_func
native__makeGroupManifestFile=GroupManifest._makeGroupManifestFile
GroupManifest._makeGroupManifestFile=self.dummy__makeGroupManifestFile.im_func
expected_makeWeightStatsFileCalled=True
expected__makeGroupManifestFileCalled=True
expected_weightStatsFileExists=True
expected_outputFileExists=True
expected_completeStatusExists=True
expected=[expected_makeWeightStatsFileCalled, expected__makeGroupManifestFileCalled, expected_weightStatsFileExists, expected_outputFileExists, expected_completeStatusExists]
self.obj.makeGroupManifestFile()
GroupManifest.getOutputFile=native_getOutputFile
GroupManifest.makeWeightStatsFile=native_makeWeightStatsFile
GroupManifest._makeGroupManifestFile=native__makeGroupManifestFile
actual_weightStatsFileExists=True
actual_outputFileExists=True
actual_completeStatusExists=True
try:
os.remove("weight_stats.txt")
except OSError:
actual_weightStatsFileExists=False
try:
os.remove("../" + self.dummy_getOutputFile())
except OSError:
acutal_outputFileExists=False
try:
os.remove("Complete.status")
except OSError:
actual_completeStatusExists=False
actual=[self.obj.makeWeightStatsFileCalled, self.obj._makeGroupManifestFileCalled, actual_weightStatsFileExists, actual_outputFileExists, actual_completeStatusExists]
self.assertEqual(expected, actual)
def test_makeGroupManifestFile_weightStatsDoesExist(self):
self.obj.makeWeightStatsFileCalled=False
native_getOutputFile=GroupManifest.getOutputFile
GroupManifest.getOutputFile=self.dummy_getOutputFile.im_func
native_makeWeightStatsFile=GroupManifest.makeWeightStatsFile
GroupManifest.makeWeightStatsFile=self.dummy_makeWeightStatsFile.im_func
native__makeGroupManifestFile=GroupManifest._makeGroupManifestFile
GroupManifest._makeGroupManifestFile=self.dummy__makeGroupManifestFile.im_func
expected_makeWeightStatsFileCalled=False
expected__makeGroupManifestFileCalled=True
expected_outputFileExists=True
expected_completeStatusExists=True
expected=[expected_makeWeightStatsFileCalled, expected__makeGroupManifestFileCalled, expected_outputFileExists, expected_completeStatusExists]
with open("weight_stats.txt", "w"):
self.obj.makeGroupManifestFile()
GroupManifest.getOutputFile=native_getOutputFile
GroupManifest.makeWeightStatsFile=native_makeWeightStatsFile
GroupManifest._makeGroupManifestFile=native__makeGroupManifestFile
actual_outputFileExists=True
actual_completeStatusExists=True
os.remove("weight_stats.txt")
try:
os.remove("../" + self.dummy_getOutputFile())
except OSError:
acutal_outputFileExists=False
try:
os.remove("Complete.status")
except OSError:
actual_completeStatusExists=False
actual=[self.obj.makeWeightStatsFileCalled, self.obj._makeGroupManifestFileCalled, actual_outputFileExists, actual_completeStatusExists]
self.assertEqual(expected, actual)
@unittest.skip('Incomplete test')
def test__makeGroupManifestFile(self):
self.assertEqual(1,2)
@unittest.skip('Incomplete test')
def test_makeWeightStatsFile(self):
self.assertEqual(1,2)
def test_getStepDir(self):
Mock.getStepDir=self.dummy_getStepDir.im_func
self.obj.merge_assembly=Mock()
expected=self.dummy_getStepDir()
actual=self.obj.getStepDir()
del Mock.getStepDir
self.assertEqual(expected, actual)
def test_getOutputFile(self):
native_getStepDir=GroupManifest.getStepDir
GroupManifest.getStepDir=self.dummy_getStepDir.im_func
Mock.getStepDir=self.dummy_getStepDir.im_func
expected=self.dummy_getStepDir() + "/" + self.assembly.output_prefix + ".group_manifest"
actual=self.obj.getOutputFile()
GroupManifest.getStepDir=native_getStepDir
del Mock.getStepDir
self.assertEqual(expected, actual)
def test_getOutputFileExtension(self):
self.assertEqual("group_manifest", self.obj.getOutputFileExtension())
def test_autoGeneratePrereqs(self):
native_Summarize_autoGen=Summarize.autoGeneratePrereqs
Summarize.autoGeneratePrereqs=self.dummy_autoGen.im_func
native_Merge_autoGen=Merge.autoGeneratePrereqs
Merge.autoGeneratePrereqs=self.dummy_autoGen.im_func
self.obj.assembly_summary=Summarize(self.workspace, self.assembly)
self.obj.merge_assembly=Merge(self.workspace, self.assembly)
actual=GroupManifest(self.workspace, self.assembly)
actual.autoGenCalled=True
Summarize.autoGeneratePrereqs=native_Summarize_autoGen
Merge.autoGeneratePrereqs=native_Merge_autoGen
self.assertEqual(self.obj.__dict__, actual.__dict__)
def test_getPrereq(self):
expected=Mock()
self.obj.merge_assembly=expected
self.assertEqual(expected, self.obj.getPrereq())
def test_getMem(self):
Mock.getSmallMemory=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getMem()
del self.obj.workspace.resources
del Mock.getSmallMemory
self.assertEqual(expected, actual)
def test_getTime(self):
Mock.getSmallTime=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getTime()
del self.obj.workspace.resources
del Mock.getSmallTime
self.assertEqual(expected, actual)
def test_getThreads(self):
self.assertEqual(1, self.obj.getThreads())
class tRefineB0(unittest.TestCase):
workspace=Mock(work_dir="work_dir", input_file="input_file")
vital_parameters=Mock(fp=1.5, fn=.150, pval=1e-5, min_molecule_len=100, min_molecule_sites=6)
def setUp(self):
native_autoGeneratePrereqs=RefineB0.autoGeneratePrereqs
RefineB0.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
self.obj=RefineB0(self.workspace, self.vital_parameters)
RefineB0.autoGeneratePrereqs=native_autoGeneratePrereqs
def dummy_autoGeneratePrereqs(self):
self.autoGeneratePrereqsCalled=True
def dummy_getResources(self):
return -1
def dummy_getString(self, block=None):
return "string"
def test_init_default(self):
expected={"autoGeneratePrereqsCalled": True,
"workspace": self.workspace,
"vital_parameters": self.vital_parameters,
"quality": None,
"output_prefix": "refineB0",
"color": 1,
"aligned_site_threshold": 5,
"max_coverage": 100,
"enable_multi_mode": True,
"internal_split_ratio": 0.20,
"internal_trimmed_coverage_ratio": 0.35,
"cnt_file": "refineB0_max_id",
"min_contig_len": 100.0,
"allow_no_splits": True,
"allow_infinite_splits": False,
"min_end_coverage": 6.99,
"scale_bias_wt": 0,
"min_likelihood_ratio": 1e2,
"max_query_alignment": 4,
"max_reference_alignment": 6,
"max_repeat_shift": 2,
"repeat_pval_ratio": 0.01,
"repeat_log_pval_ratio": 0.7,
"repeat_min_shift_ratio": 0.6,
"min_gap_flanking_sites": 2,
"output_trimmed_coverage": True,
"normalize_trimmed_coverage": True,
"min_gap_flanking_len": 55,
"last_non_chimeric_site_after_gap": 2,
"split_molecules_with_outliers": True,
"outlier_pvals_per_true_positive": 1e-5,
"end_outlier_prior_probability": 1e-4,
"pval_after_refinement": 1,
"faster_refinement_resolution": "",
"count_splits_with_largest_ids": True,
"contig_split_version": "",
"reduced_contig_resolution_divided_by_two": 2.0,
"overwrite_output": True,
"hash_window": 5,
"hash_min_sites": 3,
"hash_sd_max": 2.4,
"hash_sd_rms": 1.5,
"hash_relative_error": 0.05,
"hash_offset_kb": 5.0,
"hash_max_insert_errors": 1,
"hash_max_probe_errors": 1,
"hash_max_unresolved_sites": 1,
"hash_file": "",
"hash_threshold": "",
"hashdelta": 10,
"reduced_molecule_resolution": 1.2,
"insert_threads": 4,
"skip_alignment_statistic_computation": True,
"sd": 0.2,
"sf": 0.2,
"sr": 0.03,
"res": 3.3,
"regex_acceptible_output_file": ".*.bnx",
"write_output_to_file": True,
"write_errors_to_file": True,
"max_job_count": 2
}
self.maxDiff=None
self.assertEqual(expected, self.obj.__dict__)
def getCode(self, block):
return "bng_ref_aligner -i string -o refineB0 -maxthreads -1 -ref string -T 1e-05 -usecolor 1 -A 5 -extend 1 -MaxCov 100 -MultiMode -contigsplit 0.2 0.35 refineB0_max_id -MinSplitLen 100.0 -nosplit 2 -EndTrim 6.99 -biaswt 0 -LRbias 100.0 -deltaX 4 -deltaY 6 -RepeatMask 2 0.01 -RepeatRec 0.7 0.6 -CovTrim 2 -ReplaceCov -TrimNorm -CovTrimLen 55 -TrimNormChim 2 -TrimOutlier -outlier 1e-05 -endoutlier 0.0001 -endoutlierFinal 1 -Mprobeval -splitcnt -splitrev -rres 2.0 -f -refine 0 -hashgen 5 3 2.4 1.5 0.05 5.0 1 1 1 -hash -hashdelta 10 -mres 1.2 -insertThreasds 4 -nostat -maxmem -1 -FP 1.5 -FN 0.15 -sd 0.2 -sf 0.2 -sr 0.03 -res 3.3 -grouped ../string -mapped refineB0_id" + str(block) + "_mapped -output-filter .*.bnx -id " + str(block) + " -stdout -stderr -XmapStatRead ../string -minlen 100 -minsites 6\n"
def test_writeCode_lessThan2BlocksOdd(self):
self.obj.split=Mock(vital_parameters=Mock(blocks=1))
native_getStepDir=RefineB0.getStepDir
RefineB0.getStepDir=self.dummy_getString.im_func
native_getThreads=RefineB0.getThreads
RefineB0.getThreads=self.dummy_getResources.im_func
self.obj.merge_refineA=Mock()
Mock.getOutputFile=self.dummy_getString.im_func
native_getMem=RefineB0.getMem
RefineB0.getMem=self.dummy_getResources.im_func
self.obj.group_manifest=Mock()
self.obj.molecule_stats=Mock()
self.workspace.binaries={"bng_ref_aligner": "bng_ref_aligner"}
header="\n".join(["cd " + self.workspace.work_dir,
"mkdir -p " + self.dummy_getString(),
"cd " + self.dummy_getString(),
"pwd\n"])
code=self.getCode(1)
expected=[header+code]
actual=self.obj.writeCode()
RefineB0.getStepDir=native_getStepDir
RefineB0.getThreads=native_getThreads
del Mock.getOutputFile
RefineB0.getMem=native_getMem
del self.workspace.binaries
self.assertEqual(expected,actual)
def test_writeCode_moreThan2BlocksEven(self):
self.obj.split=Mock(vital_parameters=Mock(blocks=4))
native_getStepDir=RefineB0.getStepDir
RefineB0.getStepDir=self.dummy_getString.im_func
native_getThreads=RefineB0.getThreads
RefineB0.getThreads=self.dummy_getResources.im_func
self.obj.merge_refineA=Mock()
Mock.getOutputFile=self.dummy_getString.im_func
native_getMem=RefineB0.getMem
RefineB0.getMem=self.dummy_getResources.im_func
self.obj.group_manifest=Mock()
self.obj.molecule_stats=Mock()
self.workspace.binaries={"bng_ref_aligner": "bng_ref_aligner"}
header="\n".join(["cd " + self.workspace.work_dir,
"mkdir -p " + self.dummy_getString(),
"cd " + self.dummy_getString(),
"pwd\n"])
code=self.getCode(1)
expected=["".join([header,self.getCode(1), "", self.getCode(2)]),
"".join([header,self.getCode(3), "", self.getCode(4)])]
actual=self.obj.writeCode()
RefineB0.getStepDir=native_getStepDir
RefineB0.getThreads=native_getThreads
del Mock.getOutputFile
RefineB0.getMem=native_getMem
del self.workspace.binaries
self.assertEqual(expected,actual)
def test_getStepDir(self):
expected="_".join(["refineB0", "fp"+str(self.vital_parameters.fp), "fn"+str(self.vital_parameters.fn), "pval"+str(self.vital_parameters.pval), "minlen"+str(self.vital_parameters.min_molecule_len), "minsites"+str(self.vital_parameters.min_molecule_sites)])
actual=self.obj.getStepDir()
self.assertEqual(expected, actual)
def test_getOutputFileExtension(self):
self.assertEqual("bnx", self.obj.getOutputFileExtension())
def test_autoGeneratePrereqs(self):
Mock.getSmallTime=self.dummy_getResources.im_func
Mock.getLargeTime=self.dummy_getResources.im_func
self.workspace.resources=Mock()
self.obj.workspace.resources=Mock()
self.obj.vital_parameters.blocks=1
self.obj.inpt=Input(self.workspace)
sort=Sort(self.workspace, copy(self.vital_parameters))
self.obj.sort=sort
self.obj.molecule_stats=sort.getMoleculeStats()
split=Split(self.workspace, copy(self.vital_parameters))
self.obj.split=split
self.obj.split_summary=Summarize(self.workspace, split)
pairwise_alignment=PairwiseAlignment(self.workspace, copy(self.vital_parameters))
self.obj.pairwise_alignment=pairwise_alignment
self.obj.pairwise_summary=Summarize(self.workspace, pairwise_alignment)
assembly=Assembly(self.workspace, copy(self.vital_parameters))
self.obj.assembly=assembly
self.obj.assembly_summary=Summarize(self.workspace, assembly)
self.obj.merge_assembly=Merge(self.workspace, assembly)
refineA=RefineA(self.workspace, copy(self.vital_parameters))
self.obj.refineA=refineA
self.obj.refineA_summary=Summarize(self.workspace, refineA)
self.obj.merge_refineA=Merge(self.workspace, refineA)
self.obj.autoGeneratePrereqs()
actual=RefineB0(self.workspace, self.vital_parameters)
actual.autoGeneratePrereqsCalled=True
del self.workspace.resources
del self.vital_parameters.blocks
del Mock.getSmallTime
del Mock.getLargeTime
self.assertEqual(self.obj, actual)
def test_getPrereq(self):
expected=Mock(name="group_manifest")
self.obj.group_manifest=expected
actual=self.obj.getPrereq()
self.assertEqual(expected, actual)
def test_getMem(self):
Mock.getMediumMemory=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getMem()
del Mock.getMediumMemory
self.assertEqual(expected, actual)
def test_getTime(self):
Mock.getLargeTime=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getTime()
del Mock.getLargeTime
self.assertEqual(expected, actual)
def test_getThreads(self):
Mock.getMediumThreads=self.dummy_getResources.im_func
self.obj.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getThreads()
del Mock.getMediumThreads
self.assertEqual(expected, actual)
class tMerge(unittest.TestCase):
workspace=Mock(work_dir="work_dir", input_file="input_file")
assembly=Mock(quality=None, output_prefix="unrefined")
native_autoGeneratePrereqs=Merge.autoGeneratePrereqs
def dummy_autoGen(self):
self.autoGenCalled=True
def dummy_getStepDir(self):
return "step_dir"
def dummy_getOutputFile(self):
return "output_file"
def dummy_getOutputFileExtension(self):
return "cmap"
def setUp(self):
Merge.autoGeneratePrereqs=self.dummy_autoGen.im_func
self.obj=Merge(self.workspace, self.assembly)
Merge.autoGeneratePrereqs=self.native_autoGeneratePrereqs
def test_init(self):
expected=[self.workspace, self.assembly, self.assembly.quality, True, "merge_of_unrefined", True, True, True]
actual=[self.obj.workspace, self.obj.assembly, self.obj.assembly.quality, self.obj.autoGenCalled, self.obj.output_prefix, self.obj.overwrite_output, self.obj.write_output_to_file, self.obj.write_error_to_file]
self.assertEqual(expected, actual)
def test_writeCode(self):
native_getStepDir=Merge.getStepDir
Merge.getStepDir=self.dummy_getStepDir.im_func
self.workspace.binaries={"bng_ref_aligner": "binary"}
Mock.getOutputFile=self.dummy_getOutputFile.im_func
self.obj.assembly_summary=Mock()
expected=["cd " + self.workspace.work_dir + "\n" + "mkdir -p " + self.dummy_getStepDir() + "\n" + "cd " + self.dummy_getStepDir() + "\n" + "pwd\n" + "binary -if ../" + self.dummy_getOutputFile() + " -merge -o " + self.obj.output_prefix + " -f -stdout -stderr \nresult=`tail -n 1 ../" + self.dummy_getStepDir() + "/" + self.obj.output_prefix + ".stdout`\nif [[ \"$result\" != \"END of output\" ]]; then exit 1; else touch Complete.status; fi\n"]
actual=self.obj.writeCode()
Merge.getStepDir=native_getStepDir
del self.workspace.binaries
del Mock.getOutputFile
self.assertEqual(expected, actual)
def test_getStepDir(self):
Mock.getStepDir=self.dummy_getStepDir.im_func
expected="merged_"+self.dummy_getStepDir()
actual=self.obj.getStepDir()
del Mock.getStepDir
self.assertEqual(expected, actual)
def test_getOutputFile(self):
native_getStepDir=Merge.getStepDir
Merge.getStepDir=self.dummy_getStepDir.im_func
native_getOutputFileExtension=Merge.getOutputFileExtension
Merge.getOutputFileExtension=self.dummy_getOutputFileExtension.im_func
self.obj.output_prefix="output_prefix"
expected=self.dummy_getStepDir() + "/output_prefix." + self.dummy_getOutputFileExtension()
actual=self.obj.getOutputFile()
Merge.getStepDir=native_getStepDir
Merge.getOutputFileExtension=native_getOutputFileExtension
self.assertEqual(expected, actual)
def test_getOutputFileExtension(self):
self.assertEqual("cmap", self.obj.getOutputFileExtension())
def test_autoGeneratePrereqs(self):
self.obj.autoGeneratePrereqs()
expected=Summarize(self.workspace, self.assembly)
self.assertEqual(expected, self.obj.assembly_summary)
def test_getPrereq(self):
expected=Mock()
self.obj.assembly_summary=expected
self.assertEqual(expected, self.obj.getPrereq())
def dummy_getResources(self):
return -1
def test_getMem(self):
Mock.getSmallMemory=self.dummy_getResources.im_func
self.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getMem()
del Mock.getSmallMemory
del self.workspace.resources
self.assertEqual(expected, actual)
def test_getTime(self):
Mock.getSmallTime=self.dummy_getResources.im_func
self.workspace.resources=Mock()
expected=self.dummy_getResources()
actual=self.obj.getTime()
del Mock.getSmallTime
del self.workspace.resources
self.assertEqual(expected, actual)
def test_getThreads(self):
self.assertEqual(1, self.obj.getThreads())
class tSummarize(unittest.TestCase):
step=Mock()
workspace=Mock(work_dir="work_dir", input_file="input_file")
vital_parameters=Mock(pval=1e-5, fp=1.5, fn=.015, min_molecule_len=100, min_molecule_sites=6, blocks=1)
def dummy_autoGeneratePrereqs(self):
self.autoGeneratePrereqsCalled=True
def dummy_getString(self):
return "string"
def dummy_getNumber(self):
return -1
def setUp(self):
native_autoGeneratePrereqs=Summarize.autoGeneratePrereqs
Summarize.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
self.obj=Summarize(self.workspace, self.step)
Summarize.autoGeneratePrereqs=native_autoGeneratePrereqs
def test_init(self):
expected=Mock(workspace=self.workspace, step=self.step, autoGeneratePrereqsCalled=True)
self.assertEqual(expected, self.obj)
def test_hash(self):
self.obj.step.vital_parameters=self.vital_parameters
expected=hash((self.workspace.input_file, self.workspace.work_dir, self.vital_parameters.pval, self.vital_parameters.fp, self.vital_parameters.fn, self.vital_parameters.min_molecule_len, self.vital_parameters.min_molecule_sites, "Summarize"))
self.assertEqual(expected, self.obj.__hash__())
def test_eq_none(self):
other=None
self.assertFalse(self.obj==other)
def test_eq_diffClass(self):
other=Mock(workspace=self.workspace, step=self.step)
self.assertFalse(self.obj==other)
def test_eq_areEqual(self):
other=Summarize(self.workspace, self.step)
self.assertTrue(self.obj==other)
def test_writeCode_default(self):
native_getOutputFile=Summarize.getOutputFile
Summarize.getOutputFile=self.dummy_getString.im_func
native_getStepDir=Summarize.getStepDir
Summarize.getStepDir=self.dummy_getString.im_func
Mock.getOutputFileExtension=self.dummy_getString.im_func
total_job_count=-1
self.obj.step.total_job_count=total_job_count
expected="\n".join(["wd=`pwd`",
"rm -f " + self.dummy_getString() + "",
"let errors=0",
"let total=0",
"for stdout_file in " + self.dummy_getString() + "/*.stdout",
"do",
" let total+=1",
" result=`tail -n 1 $stdout_file`",
" if [[ $result != \"END of output\" ]]; then let errors+=1",
" else",
" file=`echo $stdout_file | sed 's/\.stdout/\." + self.dummy_getString() + "/'`",
" echo $wd/$file >> " + self.dummy_getString() + ";",
" fi",
"done",
"if [ $total -lt " + str(total_job_count) + " ]; then let errors+=1; fi",
"if [ $errors -ne 0 ]; then exit 1; else touch " + self.dummy_getString() + "/Complete.status; fi\n"])
actual=self.obj.writeCode()
Summarize.getOutputFile=native_getOutputFile
Summarize.getStepDir=native_getStepDir
del Mock.getOutputFileExtension
self.maxDiff=None
self.assertEqual([expected], actual)
def test_writeCode_assembly(self):
native_autoGen=Assembly.autoGeneratePrereqs
Assembly.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
self.obj.step=Assembly(self.workspace, self.vital_parameters)
native_getOutputFile=Summarize.getOutputFile
Summarize.getOutputFile=self.dummy_getString.im_func
native_getStepDir=Summarize.getStepDir
Summarize.getStepDir=self.dummy_getString.im_func
Mock.getOutputFileExtension=self.dummy_getString.im_func
total_job_count=-1
self.obj.step.total_job_count=total_job_count
expected="\n".join(["wd=`pwd`",
"rm -f " + self.dummy_getString() + "",
"let errors=0",
"let total=0",
"for stdout_file in " + self.dummy_getString() + "/*.stdout",
"do",
" let total+=1",
" result=`tail -n 1 $stdout_file`",
" if [[ $result != \"END of output\" ]]; then let errors+=1",
" fi",
"done",
"if [ $total -lt " + str(total_job_count) + " ]; then let errors+=1; fi",
"if [ $errors -ne 0 ]; then exit 1; else touch " + self.dummy_getString() + "/Complete.status; fi",
"ls " + self.dummy_getString() + "/*.cmap | while read file; do echo $wd/$file >> " + self.dummy_getString() + "; done;\n"])
actual=self.obj.writeCode()
Summarize.getOutputFile=native_getOutputFile
Summarize.getStepDir=native_getStepDir
del Mock.getOutputFileExtension
Assembly.autoGeneratePrereqs=native_autoGen
self.assertEqual([expected], actual)
def test_writeCode_refineA(self):
native_autoGen=RefineA.autoGeneratePrereqs
RefineA.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
self.obj.step=RefineA(self.workspace, self.vital_parameters)
native_getOutputFile=Summarize.getOutputFile
Summarize.getOutputFile=self.dummy_getString.im_func
native_getStepDir=Summarize.getStepDir
Summarize.getStepDir=self.dummy_getString.im_func
Mock.getOutputFileExtension=self.dummy_getString.im_func
total_job_count=-1
self.obj.step.total_job_count=total_job_count
expected="\n".join(["wd=`pwd`",
"rm -f " + self.dummy_getString() + "",
"let errors=0",
"let total=0",
"for stdout_file in " + self.dummy_getString() + "/*.stdout",
"do",
" let total+=1",
" result=`tail -n 1 $stdout_file`",
" if [[ $result != \"END of output\" ]]; then let errors+=1",
" fi",
"done",
"if [ $total -lt " + str(total_job_count) + " ]; then let errors+=1; fi",
"if [ $errors -ne 0 ]; then exit 1; else touch " + self.dummy_getString() + "/Complete.status; fi",
"ls " + self.dummy_getString() + "/*.cmap | while read file; do echo $wd/$file >> " + self.dummy_getString() + "; done;\n"])
actual=self.obj.writeCode()
Summarize.getOutputFile=native_getOutputFile
Summarize.getStepDir=native_getStepDir
del Mock.getOutputFileExtension
RefineA.autoGeneratePrereqs=native_autoGen
self.assertEqual([expected], actual)
def test_getStepDir(self):
Mock.getStepDir=self.dummy_getString.im_func
expected=self.dummy_getString()
actual=self.obj.getStepDir()
del Mock.getStepDir
self.assertEqual(expected, actual)
def test_autoGeneratePrereqs(self):
self.obj.autoGeneratePrereqs()
def test_getPrereq(self):
self.assertEqual(self.step, self.obj.getPrereq())
def test_getOutputFile_split(self):
native_getStepDir=Summarize.getStepDir
Summarize.getStepDir=self.dummy_getString.im_func
native_getOutputFileExtension=Summarize.getOutputFileExtension
Summarize.getOutputFileExtension=self.dummy_getString.im_func
native_autoGeneratePrereqs=Split.autoGeneratePrereqs
Split.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
native_getTime=Split.getTime
Split.getTime=self.dummy_getNumber.im_func
self.obj.step=Split(self.workspace, self.vital_parameters)
expected=self.dummy_getString() + "/split." + self.dummy_getString()
actual=self.obj.getOutputFile()
Summarize.getStepDir=native_getStepDir
Summarize.getOutputFileExtension=native_getOutputFileExtension
Split.autoGeneratePrereqs=native_autoGeneratePrereqs
Split.getTime=native_getTime
self.assertEqual(expected, actual)
def test_getOutputFile_pairwise(self):
native_getStepDir=Summarize.getStepDir
Summarize.getStepDir=self.dummy_getString.im_func
native_getOutputFileExtension=Summarize.getOutputFileExtension
Summarize.getOutputFileExtension=self.dummy_getString.im_func
native_split_autoGeneratePrereqs=Split.autoGeneratePrereqs
Split.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
native_split_getTime=Split.getTime
Split.getTime=self.dummy_getNumber.im_func
native_pairwise_autoGeneratePrereqs=PairwiseAlignment.autoGeneratePrereqs
PairwiseAlignment.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
native_pairwise_getTime=PairwiseAlignment.getTime
PairwiseAlignment.getTime=self.dummy_getNumber.im_func
self.obj.step=PairwiseAlignment(self.workspace, self.vital_parameters)
expected=self.dummy_getString() + "/align." + self.dummy_getString()
actual=self.obj.getOutputFile()
Summarize.getStepDir=native_getStepDir
Summarize.getOutputFileExtension=native_getOutputFileExtension
Split.autoGeneratePrereqs=native_split_autoGeneratePrereqs
Split.getTime=native_split_getTime
PairwiseAlignment.autoGeneratePrereqs=native_pairwise_autoGeneratePrereqs
PairwiseAlignment.getTime=native_pairwise_getTime
self.assertEqual(expected, actual)
def test_getOutputFile_assembly(self):
native_getStepDir=Summarize.getStepDir
Summarize.getStepDir=self.dummy_getString.im_func
native_getOutputFileExtension=Summarize.getOutputFileExtension
Summarize.getOutputFileExtension=self.dummy_getString.im_func
native_autoGeneratePrereqs=Assembly.autoGeneratePrereqs
Assembly.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
native_getTime=Assembly.getTime
Assembly.getTime=self.dummy_getNumber.im_func
self.obj.step=Assembly(self.workspace, self.vital_parameters)
expected=self.dummy_getString() + "/contigs." + self.dummy_getString()
actual=self.obj.getOutputFile()
Summarize.getStepDir=native_getStepDir
Summarize.getOutputFileExtension=native_getOutputFileExtension
Assembly.autoGeneratePrereqs=native_autoGeneratePrereqs
Assembly.getTime=native_getTime
self.assertEqual(expected, actual)
def test_getOutputFile_refineA(self):
native_getStepDir=Summarize.getStepDir
Summarize.getStepDir=self.dummy_getString.im_func
native_getOutputFileExtension=Summarize.getOutputFileExtension
Summarize.getOutputFileExtension=self.dummy_getString.im_func
native_autoGeneratePrereqs=Assembly.autoGeneratePrereqs
Assembly.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
native_getTime=Assembly.getTime
Assembly.getTime=self.dummy_getNumber.im_func
native_autoGeneratePrereqs_refineA=RefineA.autoGeneratePrereqs
RefineA.autoGeneratePrereqs=self.dummy_autoGeneratePrereqs.im_func
self.obj.step=RefineA(self.workspace, self.vital_parameters)
expected=self.dummy_getString() + "/contigs." + self.dummy_getString()
actual=self.obj.getOutputFile()
Summarize.getStepDir=native_getStepDir
Summarize.getOutputFileExtension=native_getOutputFileExtension
Assembly.autoGeneratePrereqs=native_autoGeneratePrereqs
Assembly.getTime=native_getTime
RefineA.autoGeneratePrereqs=native_autoGeneratePrereqs_refineA
self.assertEqual(expected, actual)
def test_getOutputFileExtension(self):
self.assertEqual("list", self.obj.getOutputFileExtension())
def test_getMem(self):
Mock.getSmallMemory=self.dummy_getNumber.im_func
self.workspace.resources=Mock()
actual=self.obj.getMem()
del self.workspace.resources
del Mock.getSmallMemory
self.assertEqual(self.dummy_getNumber(), actual)
def test_getTime(self):
Mock.getSmallTime=self.dummy_getNumber.im_func
self.workspace.resources=Mock()
actual=self.obj.getTime()
del self.workspace.resources
del Mock.getSmallTime
self.assertEqual(self.dummy_getNumber(), actual)
def test_getThread(self):
self.assertEqual(1, self.obj.getThreads())
class tGenericAssembly(unittest.TestCase):
workspace=Mock(work_dir="work_dir", input_file="input_file")
vital_parameters=Mock(fp=1.5, fn=.150, pval=1e-5, min_molecule_len=100, min_molecule_sites=6)
def dummy_autoGen(self):
self.autoGenCalled=True
def test_createAssembly_assembly(self):
native_autoGen=Assembly.autoGeneratePrereqs
Assembly.autoGeneratePrereqs=self.dummy_autoGen.im_func
expected=Assembly(self.workspace, self.vital_parameters)
actual=GenericAssembly.createAssembly(self.workspace, self.vital_parameters, "assembly")
Assembly.autoGeneratePrereqs=native_autoGen
self.assertEqual(expected, actual)
def test_createAssembly_refineA(self):
native_autoGen=RefineA.autoGeneratePrereqs
RefineA.autoGeneratePrereqs=self.dummy_autoGen.im_func
expected=RefineA(self.workspace, self.vital_parameters)
actual=GenericAssembly.createAssembly(self.workspace, self.vital_parameters, "refineA")
RefineA.autoGeneratePrereqs=native_autoGen
self.assertEqual(expected, actual)
| sharpa/OMWare | UnitTests/tBioNanoAssembly.py | Python | gpl-2.0 | 58,499 |
import pytest
from conda_smithy.ci_skeleton import generate
CONDA_FORGE_YML = """recipe_dir: myrecipe
skip_render:
- README.md
- LICENSE.txt
- .gitattributes
- .gitignore
- build-locally.py
- LICENSE
- .github/CONTRIBUTING.md
- .github/ISSUE_TEMPLATE.md
- .github/PULL_REQUEST_TEMPLATE.md
- .github/workflows"""
META_YAML = """{% set name = "my-package" %}
{% set version = environ.get('GIT_DESCRIBE_TAG', 'untagged')|string|replace('-','_') %}
{% set build_number = environ.get('GIT_DESCRIBE_NUMBER', '0') %}
package:
name: {{ name|lower }}
version: {{ version }}
source:
git_url: {{ environ.get('FEEDSTOCK_ROOT', '..') }}
build:
# Uncomment the following line if the package is pure Python and the recipe
# is exactly the same for all platforms. It is okay if the dependencies are
# not built for all platforms/versions, although selectors are still not allowed.
# See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-python
# for more details.
# noarch: python
number: {{ build_number }}
string: {{ [build_number, ('h' + PKG_HASH), environ.get('GIT_DESCRIBE_HASH', '')]|join('_') }}
# If the installation is complex, or different between Unix and Windows,
# use separate bld.bat and build.sh files instead of this key. By default,
# the package will be built for the Python versions supported by conda-forge
# and for all major OSs. Add the line "skip: True # [py<35]" (for example)
# to limit to Python 3.5 and newer, or "skip: True # [not win]" to limit
# to Windows.
script: "{{ PYTHON }} -m pip install . -vv"
requirements:
build:
# If your project compiles code (such as a C extension) then add the required
# compilers as separate entries here. Compilers are named 'c', 'cxx' and 'fortran'.
- {{ compiler('c') }}
host:
- python
- pip
run:
- python
test:
# Some packages might need a `test/commands` key to check CLI.
# List all the packages/modules that `run_test.py` imports.
imports:
- my_package
# Run your test commands here
commands:
- my-package --help
- pytest
# declare any test-only requirements here
requires:
- pytest
# copy over any needed test files here
source_files:
- tests/
# Uncomment and fill in my-package metadata
#about:
# home: https://github.com/conda-forge/conda-smithy
# license: BSD-3-Clause
# license_family: BSD
# license_file: LICENSE
# Uncomment the following if this will be on a forge
# Remove these lines if this is only be used for CI
#extra:
# recipe-maintainers:
# - BobaFett
# - LisaSimpson"""
GITIGNORE = """# conda smithy ci-skeleton start
*.pyc
build_artifacts
# conda smithy ci-skeleton end
"""
def test_generate(tmpdir):
generate(
package_name="my-package",
feedstock_directory=str(tmpdir),
recipe_directory="myrecipe",
)
with open(tmpdir / "conda-forge.yml") as f:
conda_forge_yml = f.read()
assert conda_forge_yml == CONDA_FORGE_YML
with open(tmpdir / "myrecipe" / "meta.yaml") as f:
meta_yaml = f.read()
assert meta_yaml == META_YAML
with open(tmpdir / ".gitignore") as f:
gitignore = f.read()
assert gitignore == GITIGNORE
| ocefpaf/conda-smithy | tests/test_ci_skeleton.py | Python | bsd-3-clause | 3,231 |
from django_webtest import WebTest
from .settings import SettingsMixin
class TestLanguageSwitcher(SettingsMixin, WebTest):
def test_switch_language(self):
response = self.app.get('/')
response.mustcontain('Open data API')
form = response.forms['language_switcher']
form['language'] = 'cy-gb'
response = form.submit().follow()
response.mustcontain('Amdanom Ni')
| mysociety/yournextmp-popit | candidates/tests/test_language_switcher.py | Python | agpl-3.0 | 420 |
x is 1
y is None
| ratnania/pyccel | tests/errors/semantic/ex4.py | Python | mit | 17 |
# Copyright 2013 Red Hat, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from nova import context
from nova import exception
from nova.i18n import _, _LI, _LE
from nova import objects
from nova.objects import base as objects_base
from nova.objects import service as service_obj
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='The topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services. If you '
'plan to do a live upgrade from an old version to a newer '
'version, you should set this option to the old version before '
'beginning the live upgrade procedure. Only upgrading to the '
'next version is supported, so you cannot skip a release for '
'the live upgrade procedure.')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
LOG = logging.getLogger(__name__)
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance.host:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance.uuid)
return instance.host
class ComputeAPI(object):
'''Client side of the compute rpc API.
API version history:
* 1.0 - Initial version.
* 1.1 - Adds get_host_uptime()
* 1.2 - Adds check_can_live_migrate_[destination|source]
* 1.3 - Adds change_instance_metadata()
* 1.4 - Remove instance_uuid, add instance argument to
reboot_instance()
* 1.5 - Remove instance_uuid, add instance argument to
pause_instance(), unpause_instance()
* 1.6 - Remove instance_uuid, add instance argument to
suspend_instance()
* 1.7 - Remove instance_uuid, add instance argument to
get_console_output()
* 1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
* 1.9 - Remove instance_uuid, add instance argument to attach_volume()
* 1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
* 1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
* 1.12 - Remove instance_uuid, add instance argument to
confirm_resize()
* 1.13 - Remove instance_uuid, add instance argument to detach_volume()
* 1.14 - Remove instance_uuid, add instance argument to finish_resize()
* 1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
* 1.16 - Remove instance_uuid, add instance argument to
get_diagnostics()
* 1.17 - Remove instance_uuid, add instance argument to
get_vnc_console()
* 1.18 - Remove instance_uuid, add instance argument to inject_file()
* 1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
* 1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
* 1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
* 1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
* 1.23 - Remove instance_id, add instance argument to
pre_live_migration()
* 1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
* 1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
* 1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
* 1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
* 1.28 - Remove instance_uuid, add instance argument to reset_network()
* 1.29 - Remove instance_uuid, add instance argument to
resize_instance()
* 1.30 - Remove instance_uuid, add instance argument to
resume_instance()
* 1.31 - Remove instance_uuid, add instance argument to revert_resize()
* 1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
* 1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
* 1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
* 1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
* 1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
* 1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
* 1.38 - Changes to prep_resize():
* remove instance_uuid, add instance
* remove instance_type_id, add instance_type
* remove topic, it was unused
* 1.39 - Remove instance_uuid, add instance argument to run_instance()
* 1.40 - Remove instance_id, add instance argument to live_migration()
* 1.41 - Adds refresh_instance_security_rules()
* 1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
* 1.43 - Add migrate_data to live_migration()
* 1.44 - Adds reserve_block_device_name()
* 2.0 - Remove 1.x backwards compat
* 2.1 - Adds orig_sys_metadata to rebuild_instance()
* 2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
* 2.3 - Adds volume_id to reserve_block_device_name()
* 2.4 - Add bdms to terminate_instance
* 2.5 - Add block device and network info to reboot_instance
* 2.6 - Remove migration_id, add migration to resize_instance
* 2.7 - Remove migration_id, add migration to confirm_resize
* 2.8 - Remove migration_id, add migration to finish_resize
* 2.9 - Add publish_service_capabilities()
* 2.10 - Adds filter_properties and request_spec to prep_resize()
* 2.11 - Adds soft_delete_instance() and restore_instance()
* 2.12 - Remove migration_id, add migration to revert_resize
* 2.13 - Remove migration_id, add migration to finish_revert_resize
* 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
* 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
* 2.16 - Add instance_type to resize_instance
* 2.17 - Add get_backdoor_port()
* 2.18 - Add bdms to rebuild_instance
* 2.19 - Add node to run_instance
* 2.20 - Add node to prep_resize
* 2.21 - Add migrate_data dict param to pre_live_migration()
* 2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
* 2.23 - Remove network_info from reboot_instance
* 2.24 - Added get_spice_console method
* 2.25 - Add attach_interface() and detach_interface()
* 2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
* 2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
* 2.28 - Adds check_instance_shared_storage()
* 2.29 - Made start_instance() and stop_instance() take new-world
instance objects
* 2.30 - Adds live_snapshot_instance()
* 2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
* 2.32 - Make reboot_instance take a new world instance object
* 2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
* 2.34 - Added swap_volume()
* 2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
* 2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
* 2.37 - Added the legacy_bdm_in_spec parameter to run_instance
* 2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
* 2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
* 2.40 - Made reset_network() take new-world instance object
* 2.41 - Make inject_network_info take new-world instance object
* 2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
* 2.43 - Made prep_resize() take new-world instance object
* 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
* 2.45 - Made resize_instance() take new-world objects
* 2.46 - Made finish_resize() take new-world objects
* 2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
* 2.48 - Make add_aggregate_host() and remove_aggregate_host() take
new-world objects
* ... - Remove live_snapshot() that was never actually used
* 3.0 - Remove 2.x compatibility
* 3.1 - Update get_spice_console() to take an instance object
* 3.2 - Update get_vnc_console() to take an instance object
* 3.3 - Update validate_console_port() to take an instance object
* 3.4 - Update rebuild_instance() to take an instance object
* 3.5 - Pass preserve_ephemeral flag to rebuild_instance()
* 3.6 - Make volume_snapshot_{create,delete} use new-world objects
* 3.7 - Update change_instance_metadata() to take an instance object
* 3.8 - Update set_admin_password() to take an instance object
* 3.9 - Update rescue_instance() to take an instance object
* 3.10 - Added get_rdp_console method
* 3.11 - Update unrescue_instance() to take an object
* 3.12 - Update add_fixed_ip_to_instance() to take an object
* 3.13 - Update remove_fixed_ip_from_instance() to take an object
* 3.14 - Update post_live_migration_at_destination() to take an object
* 3.15 - Adds filter_properties and node to unshelve_instance()
* 3.16 - Make reserve_block_device_name and attach_volume use new-world
objects, and add disk_bus and device_type params to
reserve_block_device_name, and bdm param to attach_volume
* 3.17 - Update attach_interface and detach_interface to take an object
* 3.18 - Update get_diagnostics() to take an instance object
* Removed inject_file(), as it was unused.
* 3.19 - Update pre_live_migration to take instance object
* 3.20 - Make restore_instance take an instance object
* 3.21 - Made rebuild take new-world BDM objects
* 3.22 - Made terminate_instance take new-world BDM objects
* 3.23 - Added external_instance_event()
* build_and_run_instance was added in Havana and not used or
documented.
... Icehouse supports message version 3.23. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.23.
* 3.24 - Update rescue_instance() to take optional rescue_image_ref
* 3.25 - Make detach_volume take an object
* 3.26 - Make live_migration() and
rollback_live_migration_at_destination() take an object
* ... Removed run_instance()
* 3.27 - Make run_instance() accept a new-world object
* 3.28 - Update get_console_output() to accept a new-world object
* 3.29 - Make check_instance_shared_storage accept a new-world object
* 3.30 - Make remove_volume_connection() accept a new-world object
* 3.31 - Add get_instance_diagnostics
* 3.32 - Add destroy_disks and migrate_data optional parameters to
rollback_live_migration_at_destination()
* 3.33 - Make build_and_run_instance() take a NetworkRequestList object
* 3.34 - Add get_serial_console method
* 3.35 - Make reserve_block_device_name return a BDM object
... Juno supports message version 3.35. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.35.
* 3.36 - Make build_and_run_instance() send a Flavor object
* 3.37 - Add clean_shutdown to stop, resize, rescue, shelve, and
shelve_offload
* 3.38 - Add clean_shutdown to prep_resize
* 3.39 - Add quiesce_instance and unquiesce_instance methods
* 3.40 - Make build_and_run_instance() take a new-world topology
limits object
... Kilo supports messaging version 3.40. So, any changes to
existing methods in 3.x after that point should be done so that they
can handle the version_cap being set to 3.40
... Version 4.0 is equivalent to 3.40. Kilo sends version 4.0 by
default, can accept 3.x calls from Juno nodes, and can be pinned to
3.x for Juno compatibility. All new changes should go against 4.x.
* 4.0 - Remove 3.x compatibility
* 4.1 - Make prep_resize() and resize_instance() send Flavor object
* 4.2 - Add migration argument to live_migration()
* 4.3 - Added get_mks_console method
* 4.4 - Make refresh_instance_security_rules send an instance object
* 4.5 - Add migration, scheduler_node and limits arguments to
rebuild_instance()
... Liberty supports messaging version 4.5. So, any changes to
existing methods in 4.x after that point should be done so that they
can handle the version_cap being set to 4.5
* ... - Remove refresh_security_group_members()
* ... - Remove refresh_security_group_rules()
'''
VERSION_ALIASES = {
'icehouse': '3.23',
'juno': '3.35',
'kilo': '4.0',
'liberty': '4.5',
}
def __init__(self):
super(ComputeAPI, self).__init__()
target = messaging.Target(topic=CONF.compute_topic, version='4.0')
upgrade_level = CONF.upgrade_levels.compute
if upgrade_level == 'auto':
version_cap = self._determine_version_cap(target)
else:
version_cap = self.VERSION_ALIASES.get(upgrade_level,
upgrade_level)
serializer = objects_base.NovaObjectSerializer()
self.client = self.get_client(target, version_cap, serializer)
def _determine_version_cap(self, target):
service_version = objects.Service.get_minimum_version(
context.get_admin_context(), 'nova-compute')
history = service_obj.SERVICE_VERSION_HISTORY
try:
version_cap = history[service_version]['compute_rpc']
except IndexError:
LOG.error(_LE('Failed to extract compute RPC version from '
'service history because I am too '
'old (minimum version is now %(version)i)'),
{'version': service_version})
raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION,
minver=service_version)
except KeyError:
LOG.error(_LE('Failed to extract compute RPC version from '
'service history for version %(version)i'),
{'version': service_version})
return target.version
LOG.info(_LI('Automatically selected compute RPC version %(rpc)s '
'from minimum service version %(service)i'),
{'rpc': version_cap,
'service': service_version})
return version_cap
def _compat_ver(self, current, legacy):
if self.client.can_send_version(current):
return current
else:
return legacy
# Cells overrides this
def get_client(self, target, version_cap, serializer):
return rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'attach_interface',
instance=instance, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, bdm):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'attach_volume', instance=instance, bdm=bdm)
def change_instance_metadata(self, ctxt, instance, diff):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance, diff=diff)
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
version = '4.0'
cctxt = self.client.prepare(server=destination, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
version = '4.0'
source = _compute_host(None, instance)
cctxt = self.client.prepare(server=source, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data, host=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
instance=instance, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_console_output',
instance=instance, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_diagnostics', instance=instance)
def get_instance_diagnostics(self, ctxt, instance):
# TODO(danms): This needs to be fixed for objects
instance_p = jsonutils.to_primitive(instance)
kwargs = {'instance': instance_p}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs)
def get_vnc_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_spice_console',
instance=instance, console_type=console_type)
def get_rdp_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_rdp_console',
instance=instance, console_type=console_type)
def get_mks_console(self, ctxt, instance, console_type):
version = '4.3'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_mks_console',
instance=instance, console_type=console_type)
def get_serial_console(self, ctxt, instance, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_serial_console',
instance=instance, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'validate_console_port',
instance=instance, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_network_info(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migration, migrate_data=None):
args = {'migration': migration}
version = '4.2'
if not self.client.can_send_version(version):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'live_migration', instance=instance,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data, **args)
def pause_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'post_live_migration_at_destination',
instance=instance, block_migration=block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None,
clean_shutdown=True):
image_p = jsonutils.to_primitive(image)
msg_args = {'instance': instance,
'instance_type': instance_type,
'image': image_p,
'reservations': reservations,
'request_spec': request_spec,
'filter_properties': filter_properties,
'node': node,
'clean_shutdown': clean_shutdown}
version = '4.1'
if not self.client.can_send_version(version):
version = '4.0'
msg_args['instance_type'] = objects_base.obj_to_primitive(
instance_type)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize', **msg_args)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None, node=None,
preserve_ephemeral=False, migration=None, limits=None,
kwargs=None):
# NOTE(danms): kwargs is only here for cells compatibility, don't
# actually send it to compute
extra = {'preserve_ephemeral': preserve_ephemeral,
'migration': migration,
'scheduled_node': node,
'limits': limits}
version = '4.5'
if not self.client.can_send_version(version):
version = '4.0'
extra.pop('migration')
extra.pop('scheduled_node')
extra.pop('limits')
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
recreate=recreate, on_shared_storage=on_shared_storage,
**extra)
def refresh_provider_fw_rules(self, ctxt, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password,
rescue_image_ref=None, clean_shutdown=True):
version = '4.0'
msg_args = {'rescue_password': rescue_password,
'clean_shutdown': clean_shutdown,
'rescue_image_ref': rescue_image_ref,
'instance': instance,
}
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'rescue_instance', **msg_args)
def reset_network(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None, clean_shutdown=True):
msg_args = {'instance': instance, 'migration': migration,
'image': image, 'reservations': reservations,
'instance_type': instance_type,
'clean_shutdown': clean_shutdown,
}
version = '4.1'
if not self.client.can_send_version(version):
msg_args['instance_type'] = objects_base.obj_to_primitive(
instance_type)
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def resume_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks=True,
migrate_data=None):
version = '4.0'
extra = {'destroy_disks': destroy_disks,
'migrate_data': migrate_data,
}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance, **extra)
def set_admin_password(self, ctxt, instance, new_pass):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'set_admin_password',
instance=instance, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id,
disk_bus=None, device_type=None):
kw = {'instance': instance, 'device': device,
'volume_id': volume_id, 'disk_bus': disk_bus,
'device_type': device_type}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
volume_bdm = cctxt.call(ctxt, 'reserve_block_device_name', **kw)
if not isinstance(volume_bdm, objects.BlockDeviceMapping):
volume_bdm = objects.BlockDeviceMapping.get_by_volume_id(
ctxt, volume_id)
return volume_bdm
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def snapshot_instance(self, ctxt, instance, image_id):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id)
def start_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
msg_args = {'instance': instance,
'clean_shutdown': clean_shutdown}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', **msg_args)
def suspend_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None,
delete_type=None):
# NOTE(rajesht): The `delete_type` parameter is passed because
# the method signature has to match with `terminate_instance()`
# method of cells rpcapi.
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance)
def soft_delete_instance(self, ctxt, instance, reservations=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance)
def shelve_instance(self, ctxt, instance, image_id=None,
clean_shutdown=True):
msg_args = {'instance': instance, 'image_id': image_id,
'clean_shutdown': clean_shutdown}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_instance', **msg_args)
def shelve_offload_instance(self, ctxt, instance,
clean_shutdown=True):
msg_args = {'instance': instance, 'clean_shutdown': clean_shutdown}
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', **msg_args)
def unshelve_instance(self, ctxt, instance, host, image=None,
filter_properties=None, node=None):
version = '4.0'
msg_kwargs = {
'instance': instance,
'image': image,
'filter_properties': filter_properties,
'node': node,
}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
def external_instance_event(self, ctxt, instances, events):
cctxt = self.client.prepare(
server=_compute_host(None, instances[0]),
version='4.0')
cctxt.cast(ctxt, 'external_instance_event', instances=instances,
events=events)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'build_and_run_instance', instance=instance,
image=image, request_spec=request_spec,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping, node=node,
limits=limits)
def quiesce_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'quiesce_instance', instance=instance)
def unquiesce_instance(self, ctxt, instance, mapping=None):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unquiesce_instance', instance=instance,
mapping=mapping)
def refresh_instance_security_rules(self, ctxt, host, instance):
version = '4.4'
if not self.client.can_send_version(version):
version = '4.0'
instance = objects_base.obj_to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance)
| apporc/nova | nova/compute/rpcapi.py | Python | apache-2.0 | 45,182 |
"""Module that is responsible for parsing parameterized header values
encoded in accordance to rfc2231 (new style) or rfc1342 (old style)
"""
from collections import deque
from itertools import groupby
import regex as re
import six
from six.moves import urllib_parse
from flanker.mime.message import charsets
from flanker.mime.message.headers import encodedword
_PARAM_STYLE_OLD = 'old'
_PARAM_STYLE_NEW = 'new'
def decode(header):
"""Accepts parameterized header value (encoded in accordance to
rfc2231 (new style) or rfc1342 (old style)
and returns tuple:
value, {'key': u'val'}
returns None in case of any failure
"""
if six.PY3 and isinstance(header, six.binary_type):
header = header.decode('utf-8')
value, rest = split(encodedword.unfold(header))
if value is None:
return None, {}
return value, decode_parameters(rest)
def is_parametrized(name, value):
return name in ('Content-Type',
'Content-Disposition',
'Content-Transfer-Encoding')
def fix_content_type(value, default=None):
"""Content-Type value may be badly broken"""
if not value:
return default or ('text', 'plain')
values = value.lower().split('/')
if len(values) >= 2:
return values[:2]
elif len(values) == 1:
if values[0] == 'text':
return 'text', 'plain'
elif values[0] == 'html':
return 'text', 'html'
return 'application', 'octet-stream'
def split(header):
"""Splits value part and parameters part,
e.g.
split("MULTIPART/MIXED;boundary=hal_9000")
becomes:
["multipart/mixed", "boundary=hal_9000"]
"""
match = _RE_HEADER_VALUE.match(header)
if not match:
return (None, None)
return match.group(1).lower(), header[match.end():]
def decode_parameters(string):
"""Parameters can be splitted into several parts, e.g.
title*0*=us-ascii'en'This%20is%20even%20more%20
title*1*=%2A%2A%2Afun%2A%2A%2A%20
title*2="isn't it!"
decode them to the dictionary with keys and values"""
parameters = collect_parameters(string)
groups = {}
for k, parts in groupby(parameters, get_key):
groups[k] = concatenate(list(parts))
return groups
def collect_parameters(rest):
"""Scans the string and collects parts
that look like parameter, returns deque of strings
"""
parameters = deque()
p, rest = match_parameter(rest)
while p:
parameters.append(p)
p, rest = match_parameter(rest)
return parameters
def concatenate(parts):
""" Concatenates splitted parts of a parameter in a single parameter,
e.g.
URL*0="ftp://";
URL*1="cs.utk.edu/pub/moore/bulk-mailer/bulk-mailer.tar"
becomes:
URL="ftp://cs.utk.edu/pub/moore/bulk-mailer/bulk-mailer.tar"
"""
part = parts[0]
if is_old_style(part):
# old-style parameters do not support any continuations
return encodedword.mime_to_unicode(get_value(part))
return ''.join(decode_new_style(p) for p in partition(parts))
def match_parameter(rest):
for match in (match_old, match_new):
p, rest = match(rest)
if p:
return p, rest
return None, rest
def match_old(rest):
match = _RE_OLD_STYLE_PARAM.match(rest)
if match:
name = match.group('name')
value = match.group('value')
return parameter(_PARAM_STYLE_OLD, name, value), rest[match.end():]
return None, rest
def match_new(rest):
match = _RE_NEW_STYLE_PARAM.match(rest)
if match:
name = parse_parameter_name(match.group('name'))
value = match.group('value')
return parameter(_PARAM_STYLE_NEW, name, value), rest[match.end():]
return None, rest
def reverse(string):
"""Native reverse of a string looks a little bit cryptic,
just a readable wrapper"""
return string[::-1]
def parse_parameter_name(key):
"""New style parameter names can be splitted into parts,
e.g.
title*0* means that it's the first part that is encoded
title*1* means that it's the second part that is encoded
title*2 means that it is the third part that is unencoded
title means single unencoded
title* means single encoded part
I found it easier to match against a reversed string,
as regexp is simpler
"""
m = _RE_REVERSE_CONTINUATION.match(reverse(key))
key = reverse(m.group('key'))
part = reverse(m.group('part')) if m.group('part') else None
encoded = m.group('encoded')
return key, part, encoded
def decode_new_style(parameter):
"""Decodes parameter values, quoted or percent encoded, to unicode"""
if is_quoted(parameter):
return unquote(parameter)
if is_encoded(parameter):
return decode_charset(parameter)
return get_value(parameter)
def partition(parts):
"""Partitions the parts in accordance to the algo here:
http://tools.ietf.org/html/rfc2231#section-4.1
"""
encoded = deque()
for part in parts:
if is_encoded(part):
encoded.append(part)
continue
if encoded:
yield join_parameters(encoded)
encoded = deque()
yield part
if encoded:
yield join_parameters(encoded)
def decode_charset(parameter):
"""Decodes things like:
"us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20"
to unicode """
v = get_value(parameter)
parts = v.split("'", 2)
if len(parts) != 3:
return v
charset, language, val = parts
val = urllib_parse.unquote(val)
return charsets.convert_to_unicode(charset, val)
def unquote(parameter):
"""Simply removes quotes"""
return get_value(parameter).strip('"')
def parameter(ptype, key, value):
"""Parameter is stored as a tuple,
and below are conventional
"""
return (ptype, key, value)
def is_quoted(part):
return get_value(part)[0] == '"'
def is_new_style(parameter):
return parameter[0] == _PARAM_STYLE_NEW
def is_old_style(parameter):
return parameter[0] == _PARAM_STYLE_OLD
def is_encoded(part):
return part[1][2] == '*'
def get_key(parameter):
if is_old_style(parameter):
return parameter[1].lower()
else:
return parameter[1][0].lower()
def get_value(parameter):
return parameter[2]
def join_parameters(parts):
joined = "".join(get_value(p) for p in parts)
for p in parts:
return parameter(p[0], p[1], joined)
# used to split header value and parameters
_RE_HEADER_VALUE = re.compile(r'''
# don't care about the spaces
^[\ \t]*
#main type and sub type or any other value
([^\ \t;]+)
# grab the trailing spaces, colons
[\ \t;]*''', re.IGNORECASE | re.VERBOSE)
_RE_OLD_STYLE_PARAM = re.compile(r'''
# according to rfc1342, param value can be encoded-word
# and it's actually very popular, so detect this parameter first
^
# skip spaces
[\ \t]*
# parameter name
(?P<name>
[^\x00-\x1f\s\(\)<>@,;:\\"/\[\]\?=]+
)
# skip spaces
[\ \t]*
=
# skip spaces
[\ \t]*
#optional quoting sign
"?
# skip spaces
[\ \t]*
# and a glorious encoded-word sequence
(?P<value>
=\?
.* # non-greedy to match the end sequence chars
\?=
)
# ends with optional quoting sign that we ignore
"?
''', re.IGNORECASE | re.VERBOSE)
_RE_NEW_STYLE_PARAM = re.compile(r'''
# Here we grab anything that looks like a parameter
^
# skip spaces
[\ \t]*
# parameter name
(?P<name>
[^\x00-\x1f\s\(\)<>@,;:\\"/\[\]\?=]+
)
# skip spaces
[\ \t]*
=
# skip spaces
[\ \t]*
(?P<value>
(?:
"(?:
# so this works for unicode too
[^\x00-\x10\x12-\x19\x22\x5c\x7f]
|
(?:\\[\x21-\x7e\t\ ])
)+"
)
|
# any (US-ASCII) CHAR except SPACE, CTLs, or tspecials
[^\x00-\x1f\s\(\)<>@,;:\\"/\[\]\?=]+
)
# skip spaces
[\ \t]*
;?
''', re.IGNORECASE | re.VERBOSE)
_RE_REVERSE_CONTINUATION = re.compile(
r'^(?P<encoded>\*)?(?P<part>\d+\*)?(?P<key>.*)')
| mailgun/flanker | flanker/mime/message/headers/parametrized.py | Python | apache-2.0 | 8,312 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for TensorFlow "Eager" Mode's Tensor class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import sys
import unittest
import numpy as np
import six
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
def _create_tensor(value, device=None, dtype=None):
context.ensure_initialized()
ctx = context.context()
if device is None:
device = ctx.device_name
if dtype is not None:
dtype = dtype.as_datatype_enum
try:
return ops.EagerTensor(value, device=device, dtype=dtype)
except core._NotOkStatusException as e: # pylint: disable=protected-access
raise core._status_to_exception(e.code, e.message)
class TFETensorTest(test_util.TensorFlowTestCase):
def testScalarTensor(self):
t = _create_tensor(3, dtype=dtypes.int32)
self.assertAllEqual(t, _create_tensor(np.array(3)))
self.assertEqual(dtypes.int32, t.dtype)
self.assertEqual(0, t.shape.ndims)
self.assertAllEqual([], t.shape.as_list())
self.assertIn("tf.Tensor", str(t))
self.assertIn("tf.Tensor", repr(t))
def testBadConstructorArgs(self):
context.ensure_initialized()
ctx = context.context()
device = ctx.device_name
# Missing device.
with self.assertRaisesRegex(TypeError, r".*argument 'device' \(pos 2\).*"):
ops.EagerTensor(1)
# Bad dtype type.
with self.assertRaisesRegex(TypeError,
"Expecting a DataType value for dtype. Got"):
ops.EagerTensor(1, device=device, dtype="1")
# Following errors happen when trying to copy to GPU.
if not test_util.is_gpu_available():
self.skipTest("No GPUs found")
with ops.device("/device:GPU:0"):
# Bad device.
with self.assertRaisesRegex(TypeError, "Error parsing device argument"):
ops.EagerTensor(1.0, device=1)
def testNumpyValue(self):
values = np.array([3.0])
t = _create_tensor(values)
self.assertAllEqual(values, t)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNumpyDtypeSurvivesThroughTensorConversion(self):
scalar_creators = [np.int32, np.int64, np.float32, np.float64]
conversion_functions = [ops.convert_to_tensor, constant_op.constant]
for scalar_creator in scalar_creators:
for conversion_function in conversion_functions:
np_val = scalar_creator(3)
tensor_val = conversion_function(np_val)
self.assertEqual(tensor_val.numpy().dtype, np_val.dtype)
self.assertEqual(tensor_val.numpy(), np_val)
def testNumpyValueWithCast(self):
values = np.array([3.0], dtype=np.float32)
t = _create_tensor(values, dtype=dtypes.float64)
self.assertAllEqual(values, t)
ctx = context.context()
# Bad dtype value.
with self.assertRaisesRegex(TypeError, "Invalid dtype argument value"):
ops.EagerTensor(values, device=ctx.device_name, dtype=12345)
def testNumpyOrderHandling(self):
n = np.array([[1, 2], [3, 4]], order="F")
t = _create_tensor(n)
self.assertAllEqual([[1, 2], [3, 4]], t)
def testNumpyArrayDtype(self):
tensor = constant_op.constant([1.0, 2.0, 3.0])
numpy_tensor = np.asarray(tensor, dtype=np.int32)
self.assertAllEqual(numpy_tensor, [1, 2, 3])
def testNdimsAgreesWithNumpy(self):
numpy_tensor = np.asarray(1.0)
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([1.0, 2.0, 3.0])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
def testLenAgreesWithNumpy(self):
numpy_tensor = np.asarray(1.0)
tensor = constant_op.constant(numpy_tensor)
with self.assertRaises(TypeError):
len(numpy_tensor)
with self.assertRaisesRegex(TypeError, r"Scalar tensor has no `len[(][)]`"):
len(tensor)
numpy_tensor = np.asarray([1.0, 2.0, 3.0])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(len(numpy_tensor), len(tensor))
numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(len(numpy_tensor), len(tensor))
def testCopy(self):
t = constant_op.constant(1.0)
tt = copy.copy(t)
self.assertAllEqual(tt, 1.0)
del tt
tt = copy.deepcopy(t)
self.assertAllEqual(tt, 1.0)
del tt
self.assertAllEqual(t, 1.0)
def testConstantDtype(self):
self.assertEqual(
constant_op.constant(1, dtype=np.int64).dtype, dtypes.int64)
def testTensorAndNumpyMatrix(self):
expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32)
actual = _create_tensor([[1.0, 2.0], [3.0, 4.0]])
self.assertAllEqual(expected, actual)
self.assertEqual(np.float32, actual.dtype)
self.assertEqual(dtypes.float32, actual.dtype)
self.assertAllEqual([2, 2], actual.shape.as_list())
def testFloatDowncast(self):
# Unless explicitly specified, float64->float32
t = _create_tensor(3.0)
self.assertEqual(dtypes.float32, t.dtype)
t = _create_tensor(3.0, dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t.dtype)
def testBool(self):
self.assertFalse(bool(_create_tensor(False)))
self.assertFalse(bool(_create_tensor([False])))
self.assertFalse(bool(_create_tensor([[False]])))
self.assertFalse(bool(_create_tensor([0])))
self.assertFalse(bool(_create_tensor([0.])))
self.assertTrue(bool(_create_tensor([1])))
self.assertTrue(bool(_create_tensor([1.])))
@unittest.skipUnless(six.PY2, "long has been removed in PY3")
def testLong(self):
self.assertEqual(long(_create_tensor(long(42))), 42)
def testIndex(self):
self.assertEqual([42][_create_tensor(0)], 42)
with self.assertRaises(TypeError):
_ = [42][_create_tensor([0])]
def testIntDowncast(self):
t = _create_tensor(3)
self.assertEqual(dtypes.int32, t.dtype)
t = _create_tensor(3, dtype=dtypes.int64)
self.assertEqual(dtypes.int64, t.dtype)
t = _create_tensor(2**33)
self.assertEqual(dtypes.int64, t.dtype)
def testTensorCreationFailure(self):
with self.assertRaises(ValueError):
# Should fail because the each row of the Python object has a different
# number of columns.
self.assertEqual(None, _create_tensor([[1], [1, 2]]))
def testMultiLineTensorStr(self):
t = _create_tensor(np.eye(3))
tensor_str = str(t)
self.assertIn("shape=%s, dtype=%s" % (t.shape, t.dtype.name), tensor_str)
self.assertIn(str(t), tensor_str)
def testMultiLineTensorRepr(self):
t = _create_tensor(np.eye(3))
tensor_repr = repr(t)
self.assertTrue(tensor_repr.startswith("<"))
self.assertTrue(tensor_repr.endswith(">"))
self.assertIn("shape=%s, dtype=%s, numpy=\n%r" %
(t.shape, t.dtype.name, t.numpy()), tensor_repr)
def testTensorStrReprObeyNumpyPrintOptions(self):
orig_threshold = np.get_printoptions()["threshold"]
orig_edgeitems = np.get_printoptions()["edgeitems"]
np.set_printoptions(threshold=2, edgeitems=1)
t = _create_tensor(np.arange(10, dtype=np.int32))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t)))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t)))
# Clean up: reset to previous printoptions.
np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems)
def testZeroDimTensorStr(self):
t = _create_tensor(42)
self.assertIn("42, shape=(), dtype=int32", str(t))
def testZeroDimTensorRepr(self):
t = _create_tensor(42)
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("shape=(), dtype=int32, numpy=42", repr(t))
def testZeroSizeTensorStr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertIn("[], shape=(0,), dtype=float32", str(t))
def testZeroSizeTensorRepr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("shape=(0,), dtype=float32, numpy=%r" % t.numpy(), repr(t))
def testStringTensor(self):
t_np_orig = np.array([[b"a", b"ab"], [b"abc", b"abcd"]])
t = _create_tensor(t_np_orig)
t_np = t.numpy()
self.assertTrue(np.all(t_np == t_np_orig), "%s vs %s" % (t_np, t_np_orig))
def testIterateOverTensor(self):
l = [[1, 2], [3, 4]]
t = _create_tensor(l)
for list_element, tensor_element in zip(l, t):
self.assertAllEqual(list_element, tensor_element.numpy())
def testIterateOverScalarTensorRaises(self):
t = _create_tensor(1)
with self.assertRaisesRegex(TypeError,
"Cannot iterate over a scalar tensor"):
iter(t)
@test_util.run_gpu_only
def testStringTensorOnGPU(self):
with ops.device("/device:GPU:0"):
t = _create_tensor("test string")
self.assertIn("CPU", t.device)
self.assertIn("CPU", t.backing_device)
def testInvalidUTF8ProducesReasonableError(self):
if sys.version_info[0] < 3:
self.skipTest("Test is only valid in python3.")
with self.assertRaises(UnicodeDecodeError):
io_ops.read_file(b"\xff")
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferredDtypeIsRespected(self):
self.assertEqual(
ops.convert_to_tensor(0.5, preferred_dtype=dtypes.int32).dtype,
dtypes.float32)
self.assertEqual(
ops.convert_to_tensor(0.5, preferred_dtype=dtypes.float64).dtype,
dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testCompatibility(self):
integer_types = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
# Floats are not compatible with ints
for t in integer_types:
with self.assertRaises(TypeError):
constant_op.constant(0.5, dtype=t)
# Ints compatible with floats
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float16)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float32)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float64)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.bfloat16)), 5.0)
# Ints and floats are compatible with complex types
self.assertEqual(
constant_op.constant([[1.0]], dtype=dtypes.complex128).dtype,
dtypes.complex128)
self.assertEqual(
constant_op.constant([[1]], dtype=dtypes.complex128).dtype,
dtypes.complex128)
# Quantized types are not compatible with floats
quantized_types = [dtypes.qint16, dtypes.qint32, dtypes.qint8,
dtypes.quint16, dtypes.quint8]
for t in quantized_types:
with self.assertRaises(TypeError):
constant_op.constant(0.5, dtype=t)
# TODO(b/118402529): quantized types are broken in eager.
@test_util.run_in_graph_and_eager_modes
def testCConvertToTensor(self):
with self.assertRaises(TypeError):
_ = constant_op.constant(0) < 0.5
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorAllowsOverflow(self):
_ = ops.convert_to_tensor(123456789, dtype=dtypes.uint8)
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNumpyZeroDim(self):
for np_type, dtype in [(np.int32, dtypes.int32),
(np.half, dtypes.half),
(np.float32, dtypes.float32)]:
x = ops.convert_to_tensor([np.array(65, dtype=np_type),
np.array(16, dtype=np_type)])
self.assertEqual(x.dtype, dtype)
self.assertAllEqual(x, [65, 16])
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNumpyScalar(self):
x = ops.convert_to_tensor(
[np.array(321, dtype=np.int).item(),
np.array(16, dtype=np.int).item()])
self.assertAllEqual(x, [321, 16])
def testEagerTensorError(self):
with self.assertRaisesRegex(TypeError,
"Cannot convert .* to EagerTensor of dtype .*"):
_ = ops.convert_to_tensor(1., dtype=dtypes.int32)
def testEagerLargeConstant(self):
for t in [dtypes.uint64, dtypes.uint32, dtypes.int32, dtypes.int64]:
self.assertEqual(
constant_op.constant(t.max, dtype=t).numpy(), t.max)
self.assertEqual(
constant_op.constant(t.min, dtype=t).numpy(), t.min)
def test_numpyIsView(self):
t = constant_op.constant([0.0])
t._numpy()[0] = 42.0
self.assertAllClose(t, constant_op.constant([42.0]))
def test_numpyFailsForResource(self):
v = variables.Variable(42)
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Cannot convert .+ resource"):
v._handle._numpy()
def testMemoryviewFailsForResource(self):
v = variables.Variable(42)
with self.assertRaisesRegex(BufferError, "Cannot convert .+ resource"):
np.asarray(memoryview(v._handle))
def testMemoryviewIsReadonly(self):
t = constant_op.constant([0.0])
self.assertTrue(memoryview(t).readonly)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMemoryviewScalar(self):
t = constant_op.constant(42.0)
self.assertAllEqual(
np.array(memoryview(t)), np.array(42.0, dtype=np.float32))
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMemoryviewEmpty(self):
t = constant_op.constant([], dtype=np.float32)
self.assertAllEqual(np.array(memoryview(t)), np.array([]))
@test_util.run_gpu_only
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMemoryviewCopyToCPU(self):
with ops.device("/device:GPU:0"):
t = constant_op.constant([0.0])
self.assertAllEqual(
np.array(memoryview(t)), np.array([0.0], dtype=np.float32))
def testResourceTensorCopy(self):
if not test_util.is_gpu_available():
self.skipTest("GPU only")
with ops.device("GPU:0"):
v = resource_variable_ops.ResourceVariable(1.)
read_handle_on_gpu = resource_variable_ops.read_variable_op(
v.handle, dtypes.float32)
handle_on_cpu = v.handle.cpu()
read_handle_on_cpu = resource_variable_ops.read_variable_op(
handle_on_cpu, dtypes.float32)
self.assertAllEqual(read_handle_on_cpu, read_handle_on_gpu)
class TFETensorUtilTest(test_util.TensorFlowTestCase):
def testListOfThree(self):
t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32)
t2 = _create_tensor([[1, 2, 5], [3, 4, 5]], dtype=dtypes.int32)
t3 = _create_tensor([[1], [3], [5], [6]], dtype=dtypes.int32)
r = pywrap_tfe.TFE_Py_TensorShapeSlice([t1, t2, t3], 0)
self.assertAllEqual(np.array([3, 2, 4]), r.numpy())
r = pywrap_tfe.TFE_Py_TensorShapeSlice([t1, t2, t3], 1)
self.assertAllEqual(np.array([2, 3, 1]), r.numpy())
def testEmptyTensorList(self):
a = pywrap_tfe.TFE_Py_TensorShapeSlice([], 0)
self.assertTrue(isinstance(a, ops.EagerTensor))
self.assertEqual(0, a.numpy().size)
def testTensorListContainsNonTensors(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegex(
TypeError,
r"Expected a list of EagerTensors but element 1 has type \"str\""):
pywrap_tfe.TFE_Py_TensorShapeSlice([t1, "abc"], 0)
with self.assertRaisesRegex(
TypeError,
r"Expected a list of EagerTensors but element 0 has type \"int\""):
pywrap_tfe.TFE_Py_TensorShapeSlice([2, t1], 0)
def testTensorListNotList(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegex(
TypeError,
r"tensors argument must be a list or a tuple. Got.*EagerTensor"):
pywrap_tfe.TFE_Py_TensorShapeSlice(t1, -2)
def testNegativeSliceDim(self):
t1 = _create_tensor([1, 2], dtype=dtypes.int32)
with self.assertRaisesRegex(
ValueError, r"Slice dimension must be non-negative. Got -2"):
pywrap_tfe.TFE_Py_TensorShapeSlice([t1], -2)
def testUnicode(self):
self.assertEqual(constant_op.constant(u"asdf").numpy(), b"asdf")
def testFloatTensor(self):
self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype)
self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype)
def testSliceDimOutOfRange(self):
t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32)
t2 = _create_tensor([1, 2], dtype=dtypes.int32)
t3 = _create_tensor(2, dtype=dtypes.int32)
with self.assertRaisesRegex(
IndexError,
r"Slice dimension \(2\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 2"):
pywrap_tfe.TFE_Py_TensorShapeSlice([t1], 2)
with self.assertRaisesRegex(
IndexError,
r"Slice dimension \(1\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 1"):
pywrap_tfe.TFE_Py_TensorShapeSlice([t2], 1)
with self.assertRaisesRegex(
IndexError,
r"Slice dimension \(1\) must be smaller than rank of all tensors, "
"but tensor at index 1 has rank 1"):
pywrap_tfe.TFE_Py_TensorShapeSlice([t1, t2], 1)
with self.assertRaisesRegex(
IndexError,
r"Slice dimension \(0\) must be smaller than rank of all tensors, "
"but tensor at index 0 has rank 0"):
pywrap_tfe.TFE_Py_TensorShapeSlice([t3], 0)
with self.assertRaisesRegex(
IndexError,
r"Slice dimension \(0\) must be smaller than rank of all tensors, "
"but tensor at index 2 has rank 0"):
pywrap_tfe.TFE_Py_TensorShapeSlice([t2, t1, t3], 0)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testTensorDir(self):
t = array_ops.ones(1)
t.test_attr = "Test"
instance_dir = dir(t)
type_dir = dir(ops.EagerTensor)
# Monkey patched attributes should show up in dir(t)
self.assertIn("test_attr", instance_dir)
instance_dir.remove("test_attr")
self.assertEqual(instance_dir, type_dir)
def testNonRectangularPackAsConstant(self):
l = [array_ops.zeros((10, 1)).numpy(), array_ops.zeros(1).numpy()]
with self.assertRaisesRegex(ValueError, "non-rectangular Python sequence"):
constant_op.constant(l)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFloatAndIntAreConvertibleToComplex(self):
a = [[1., 1], [1j, 2j]]
np_value = np.array(a, dtype=np.complex128)
tf_value = ops.convert_to_tensor(a, dtype=dtypes.complex128)
self.assertAllEqual(tf_value.numpy(), np_value)
if __name__ == "__main__":
test.main()
| aldian/tensorflow | tensorflow/python/eager/tensor_test.py | Python | apache-2.0 | 20,269 |
#! /usr/bin/env python
"""Sorting algorithms visualizer using Tkinter.
This module is comprised of three ``components'':
- an array visualizer with methods that implement basic sorting
operations (compare, swap) as well as methods for ``annotating'' the
sorting algorithm (e.g. to show the pivot element);
- a number of sorting algorithms (currently quicksort, insertion sort,
selection sort and bubble sort, as well as a randomization function),
all using the array visualizer for its basic operations and with calls
to its annotation methods;
- and a ``driver'' class which can be used as a Grail applet or as a
stand-alone application.
"""
from Tkinter import *
from Canvas import Line, Rectangle
import random
XGRID = 10
YGRID = 10
WIDTH = 6
class Array:
def __init__(self, master, data=None):
self.master = master
self.frame = Frame(self.master)
self.frame.pack(fill=X)
self.label = Label(self.frame)
self.label.pack()
self.canvas = Canvas(self.frame)
self.canvas.pack()
self.report = Label(self.frame)
self.report.pack()
self.left = Line(self.canvas, 0, 0, 0, 0)
self.right = Line(self.canvas, 0, 0, 0, 0)
self.pivot = Line(self.canvas, 0, 0, 0, 0)
self.items = []
self.size = self.maxvalue = 0
if data:
self.setdata(data)
def setdata(self, data):
olditems = self.items
self.items = []
for item in olditems:
item.delete()
self.size = len(data)
self.maxvalue = max(data)
self.canvas.config(width=(self.size+1)*XGRID,
height=(self.maxvalue+1)*YGRID)
for i in range(self.size):
self.items.append(ArrayItem(self, i, data[i]))
self.reset("Sort demo, size %d" % self.size)
speed = "normal"
def setspeed(self, speed):
self.speed = speed
def destroy(self):
self.frame.destroy()
in_mainloop = 0
stop_mainloop = 0
def cancel(self):
self.stop_mainloop = 1
if self.in_mainloop:
self.master.quit()
def step(self):
if self.in_mainloop:
self.master.quit()
Cancelled = "Array.Cancelled" # Exception
def wait(self, msecs):
if self.speed == "fastest":
msecs = 0
elif self.speed == "fast":
msecs = msecs/10
elif self.speed == "single-step":
msecs = 1000000000
if not self.stop_mainloop:
self.master.update()
id = self.master.after(msecs, self.master.quit)
self.in_mainloop = 1
self.master.mainloop()
self.master.after_cancel(id)
self.in_mainloop = 0
if self.stop_mainloop:
self.stop_mainloop = 0
self.message("Cancelled")
raise Array.Cancelled
def getsize(self):
return self.size
def show_partition(self, first, last):
for i in range(self.size):
item = self.items[i]
if first <= i < last:
item.item.config(fill='red')
else:
item.item.config(fill='orange')
self.hide_left_right_pivot()
def hide_partition(self):
for i in range(self.size):
item = self.items[i]
item.item.config(fill='red')
self.hide_left_right_pivot()
def show_left(self, left):
if not 0 <= left < self.size:
self.hide_left()
return
x1, y1, x2, y2 = self.items[left].position()
## top, bot = HIRO
self.left.coords([(x1-2, 0), (x1-2, 9999)])
self.master.update()
def show_right(self, right):
if not 0 <= right < self.size:
self.hide_right()
return
x1, y1, x2, y2 = self.items[right].position()
self.right.coords(((x2+2, 0), (x2+2, 9999)))
self.master.update()
def hide_left_right_pivot(self):
self.hide_left()
self.hide_right()
self.hide_pivot()
def hide_left(self):
self.left.coords(((0, 0), (0, 0)))
def hide_right(self):
self.right.coords(((0, 0), (0, 0)))
def show_pivot(self, pivot):
x1, y1, x2, y2 = self.items[pivot].position()
self.pivot.coords(((0, y1-2), (9999, y1-2)))
def hide_pivot(self):
self.pivot.coords(((0, 0), (0, 0)))
def swap(self, i, j):
if i == j: return
self.countswap()
item = self.items[i]
other = self.items[j]
self.items[i], self.items[j] = other, item
item.swapwith(other)
def compare(self, i, j):
self.countcompare()
item = self.items[i]
other = self.items[j]
return item.compareto(other)
def reset(self, msg):
self.ncompares = 0
self.nswaps = 0
self.message(msg)
self.updatereport()
self.hide_partition()
def message(self, msg):
self.label.config(text=msg)
def countswap(self):
self.nswaps = self.nswaps + 1
self.updatereport()
def countcompare(self):
self.ncompares = self.ncompares + 1
self.updatereport()
def updatereport(self):
text = "%d cmps, %d swaps" % (self.ncompares, self.nswaps)
self.report.config(text=text)
class ArrayItem:
def __init__(self, array, index, value):
self.array = array
self.index = index
self.value = value
x1, y1, x2, y2 = self.position()
self.item = Rectangle(array.canvas, x1, y1, x2, y2,
fill='red', outline='black', width=1)
self.item.bind('<Button-1>', self.mouse_down)
self.item.bind('<Button1-Motion>', self.mouse_move)
self.item.bind('<ButtonRelease-1>', self.mouse_up)
def delete(self):
item = self.item
self.array = None
self.item = None
item.delete()
def mouse_down(self, event):
self.lastx = event.x
self.lasty = event.y
self.origx = event.x
self.origy = event.y
self.item.tkraise()
def mouse_move(self, event):
self.item.move(event.x - self.lastx, event.y - self.lasty)
self.lastx = event.x
self.lasty = event.y
def mouse_up(self, event):
i = self.nearestindex(event.x)
if i >= self.array.getsize():
i = self.array.getsize() - 1
if i < 0:
i = 0
other = self.array.items[i]
here = self.index
self.array.items[here], self.array.items[i] = other, self
self.index = i
x1, y1, x2, y2 = self.position()
self.item.coords(((x1, y1), (x2, y2)))
other.setindex(here)
def setindex(self, index):
nsteps = steps(self.index, index)
if not nsteps: return
if self.array.speed == "fastest":
nsteps = 0
oldpts = self.position()
self.index = index
newpts = self.position()
trajectory = interpolate(oldpts, newpts, nsteps)
self.item.tkraise()
for pts in trajectory:
self.item.coords((pts[:2], pts[2:]))
self.array.wait(50)
def swapwith(self, other):
nsteps = steps(self.index, other.index)
if not nsteps: return
if self.array.speed == "fastest":
nsteps = 0
myoldpts = self.position()
otheroldpts = other.position()
self.index, other.index = other.index, self.index
mynewpts = self.position()
othernewpts = other.position()
myfill = self.item['fill']
otherfill = other.item['fill']
self.item.config(fill='green')
other.item.config(fill='yellow')
self.array.master.update()
if self.array.speed == "single-step":
self.item.coords((mynewpts[:2], mynewpts[2:]))
other.item.coords((othernewpts[:2], othernewpts[2:]))
self.array.master.update()
self.item.config(fill=myfill)
other.item.config(fill=otherfill)
self.array.wait(0)
return
mytrajectory = interpolate(myoldpts, mynewpts, nsteps)
othertrajectory = interpolate(otheroldpts, othernewpts, nsteps)
if self.value > other.value:
self.item.tkraise()
other.item.tkraise()
else:
other.item.tkraise()
self.item.tkraise()
try:
for i in range(len(mytrajectory)):
mypts = mytrajectory[i]
otherpts = othertrajectory[i]
self.item.coords((mypts[:2], mypts[2:]))
other.item.coords((otherpts[:2], otherpts[2:]))
self.array.wait(50)
finally:
mypts = mytrajectory[-1]
otherpts = othertrajectory[-1]
self.item.coords((mypts[:2], mypts[2:]))
other.item.coords((otherpts[:2], otherpts[2:]))
self.item.config(fill=myfill)
other.item.config(fill=otherfill)
def compareto(self, other):
myfill = self.item['fill']
otherfill = other.item['fill']
outcome = cmp(self.value, other.value)
if outcome < 0:
myflash = 'white'
otherflash = 'black'
elif outcome > 0:
myflash = 'black'
otherflash = 'white'
else:
myflash = otherflash = 'grey'
try:
self.item.config(fill=myflash)
other.item.config(fill=otherflash)
self.array.wait(500)
finally:
self.item.config(fill=myfill)
other.item.config(fill=otherfill)
return outcome
def position(self):
x1 = (self.index+1)*XGRID - WIDTH/2
x2 = x1+WIDTH
y2 = (self.array.maxvalue+1)*YGRID
y1 = y2 - (self.value)*YGRID
return x1, y1, x2, y2
def nearestindex(self, x):
return int(round(float(x)/XGRID)) - 1
# Subroutines that don't need an object
def steps(here, there):
nsteps = abs(here - there)
if nsteps <= 3:
nsteps = nsteps * 3
elif nsteps <= 5:
nsteps = nsteps * 2
elif nsteps > 10:
nsteps = 10
return nsteps
def interpolate(oldpts, newpts, n):
if len(oldpts) != len(newpts):
raise ValueError, "can't interpolate arrays of different length"
pts = [0]*len(oldpts)
res = [tuple(oldpts)]
for i in range(1, n):
for k in range(len(pts)):
pts[k] = oldpts[k] + (newpts[k] - oldpts[k])*i/n
res.append(tuple(pts))
res.append(tuple(newpts))
return res
# Various (un)sorting algorithms
def uniform(array):
size = array.getsize()
array.setdata([(size+1)/2] * size)
array.reset("Uniform data, size %d" % size)
def distinct(array):
size = array.getsize()
array.setdata(range(1, size+1))
array.reset("Distinct data, size %d" % size)
def randomize(array):
array.reset("Randomizing")
n = array.getsize()
for i in range(n):
j = random.randint(0, n-1)
array.swap(i, j)
array.message("Randomized")
def insertionsort(array):
size = array.getsize()
array.reset("Insertion sort")
for i in range(1, size):
j = i-1
while j >= 0:
if array.compare(j, j+1) <= 0:
break
array.swap(j, j+1)
j = j-1
array.message("Sorted")
def selectionsort(array):
size = array.getsize()
array.reset("Selection sort")
try:
for i in range(size):
array.show_partition(i, size)
for j in range(i+1, size):
if array.compare(i, j) > 0:
array.swap(i, j)
array.message("Sorted")
finally:
array.hide_partition()
def bubblesort(array):
size = array.getsize()
array.reset("Bubble sort")
for i in range(size):
for j in range(1, size):
if array.compare(j-1, j) > 0:
array.swap(j-1, j)
array.message("Sorted")
def quicksort(array):
size = array.getsize()
array.reset("Quicksort")
try:
stack = [(0, size)]
while stack:
first, last = stack[-1]
del stack[-1]
array.show_partition(first, last)
if last-first < 5:
array.message("Insertion sort")
for i in range(first+1, last):
j = i-1
while j >= first:
if array.compare(j, j+1) <= 0:
break
array.swap(j, j+1)
j = j-1
continue
array.message("Choosing pivot")
j, i, k = first, (first+last)/2, last-1
if array.compare(k, i) < 0:
array.swap(k, i)
if array.compare(k, j) < 0:
array.swap(k, j)
if array.compare(j, i) < 0:
array.swap(j, i)
pivot = j
array.show_pivot(pivot)
array.message("Pivot at left of partition")
array.wait(1000)
left = first
right = last
while 1:
array.message("Sweep right pointer")
right = right-1
array.show_right(right)
while right > first and array.compare(right, pivot) >= 0:
right = right-1
array.show_right(right)
array.message("Sweep left pointer")
left = left+1
array.show_left(left)
while left < last and array.compare(left, pivot) <= 0:
left = left+1
array.show_left(left)
if left > right:
array.message("End of partition")
break
array.message("Swap items")
array.swap(left, right)
array.message("Swap pivot back")
array.swap(pivot, right)
n1 = right-first
n2 = last-left
if n1 > 1: stack.append((first, right))
if n2 > 1: stack.append((left, last))
array.message("Sorted")
finally:
array.hide_partition()
def demosort(array):
while 1:
for alg in [quicksort, insertionsort, selectionsort, bubblesort]:
randomize(array)
alg(array)
# Sort demo class -- usable as a Grail applet
class SortDemo:
def __init__(self, master, size=15):
self.master = master
self.size = size
self.busy = 0
self.array = Array(self.master)
self.botframe = Frame(master)
self.botframe.pack(side=BOTTOM)
self.botleftframe = Frame(self.botframe)
self.botleftframe.pack(side=LEFT, fill=Y)
self.botrightframe = Frame(self.botframe)
self.botrightframe.pack(side=RIGHT, fill=Y)
self.b_qsort = Button(self.botleftframe,
text="Quicksort", command=self.c_qsort)
self.b_qsort.pack(fill=X)
self.b_isort = Button(self.botleftframe,
text="Insertion sort", command=self.c_isort)
self.b_isort.pack(fill=X)
self.b_ssort = Button(self.botleftframe,
text="Selection sort", command=self.c_ssort)
self.b_ssort.pack(fill=X)
self.b_bsort = Button(self.botleftframe,
text="Bubble sort", command=self.c_bsort)
self.b_bsort.pack(fill=X)
# Terrible hack to overcome limitation of OptionMenu...
class MyIntVar(IntVar):
def __init__(self, master, demo):
self.demo = demo
IntVar.__init__(self, master)
def set(self, value):
IntVar.set(self, value)
if str(value) != '0':
self.demo.resize(value)
self.v_size = MyIntVar(self.master, self)
self.v_size.set(size)
sizes = [1, 2, 3, 4] + range(5, 55, 5)
if self.size not in sizes:
sizes.append(self.size)
sizes.sort()
self.m_size = apply(OptionMenu,
(self.botleftframe, self.v_size) + tuple(sizes))
self.m_size.pack(fill=X)
self.v_speed = StringVar(self.master)
self.v_speed.set("normal")
self.m_speed = OptionMenu(self.botleftframe, self.v_speed,
"single-step", "normal", "fast", "fastest")
self.m_speed.pack(fill=X)
self.b_step = Button(self.botleftframe,
text="Step", command=self.c_step)
self.b_step.pack(fill=X)
self.b_randomize = Button(self.botrightframe,
text="Randomize", command=self.c_randomize)
self.b_randomize.pack(fill=X)
self.b_uniform = Button(self.botrightframe,
text="Uniform", command=self.c_uniform)
self.b_uniform.pack(fill=X)
self.b_distinct = Button(self.botrightframe,
text="Distinct", command=self.c_distinct)
self.b_distinct.pack(fill=X)
self.b_demo = Button(self.botrightframe,
text="Demo", command=self.c_demo)
self.b_demo.pack(fill=X)
self.b_cancel = Button(self.botrightframe,
text="Cancel", command=self.c_cancel)
self.b_cancel.pack(fill=X)
self.b_cancel.config(state=DISABLED)
self.b_quit = Button(self.botrightframe,
text="Quit", command=self.c_quit)
self.b_quit.pack(fill=X)
def resize(self, newsize):
if self.busy:
self.master.bell()
return
self.size = newsize
self.array.setdata(range(1, self.size+1))
def c_qsort(self):
self.run(quicksort)
def c_isort(self):
self.run(insertionsort)
def c_ssort(self):
self.run(selectionsort)
def c_bsort(self):
self.run(bubblesort)
def c_demo(self):
self.run(demosort)
def c_randomize(self):
self.run(randomize)
def c_uniform(self):
self.run(uniform)
def c_distinct(self):
self.run(distinct)
def run(self, func):
if self.busy:
self.master.bell()
return
self.busy = 1
self.array.setspeed(self.v_speed.get())
self.b_cancel.config(state=NORMAL)
try:
func(self.array)
except Array.Cancelled:
pass
self.b_cancel.config(state=DISABLED)
self.busy = 0
def c_cancel(self):
if not self.busy:
self.master.bell()
return
self.array.cancel()
def c_step(self):
if not self.busy:
self.master.bell()
return
self.v_speed.set("single-step")
self.array.setspeed("single-step")
self.array.step()
def c_quit(self):
if self.busy:
self.array.cancel()
self.master.after_idle(self.master.quit)
# Main program -- for stand-alone operation outside Grail
def main():
root = Tk()
demo = SortDemo(root)
root.protocol('WM_DELETE_WINDOW', demo.c_quit)
root.mainloop()
if __name__ == '__main__':
main()
| xbmc/atv2 | xbmc/lib/libPython/Python/Demo/tkinter/guido/sortvisu.py | Python | gpl-2.0 | 19,342 |
#!/usr/bin/env python
from __future__ import print_function
import os
import os.path as osp
import sys
try:
import caffe
except ImportError:
print('Cannot import caffe. Please install it.')
quit(1)
import chainer.serializers as S
import fcn
here = osp.dirname(osp.abspath(__file__))
sys.path.insert(0, osp.join(here, '../../fcn/external/fcn.berkeleyvision.org'))
def caffe_to_chainermodel(model, caffe_prototxt, caffemodel_path,
chainermodel_path):
os.chdir(osp.dirname(caffe_prototxt))
net = caffe.Net(caffe_prototxt, caffemodel_path, caffe.TEST)
for name, param in net.params.iteritems():
try:
layer = getattr(model, name)
except AttributeError:
print('Skipping caffe layer: %s' % name)
continue
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W: %s %s' % (param[0].data.shape, layer.W.data.shape))
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b: %s %s' % (param[1].data.shape, layer.b.data.shape))
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_npz(chainermodel_path, model)
def main():
for model_name in ['FCN8s', 'FCN8sAtOnce', 'FCN16s', 'FCN32s']:
print('[caffe_to_chainermodel.py] converting model: %s' % model_name)
# get model
model = getattr(fcn.models, model_name)()
if model_name == 'FCN8sAtOnce':
model_name = 'fcn8s-atonce'
else:
model_name = model_name.lower()
# get caffemodel
caffe_prototxt = osp.join(
here, '../..',
'fcn/external/fcn.berkeleyvision.org/voc-%s/deploy.prototxt' %
model_name)
caffemodel = osp.expanduser(
'~/data/models/caffe/%s-heavy-pascal.caffemodel' % model_name)
if not osp.exists(caffemodel):
file = osp.join(osp.dirname(caffe_prototxt), 'caffemodel-url')
url = open(file).read().strip()
fcn.data.cached_download(url, caffemodel)
# convert caffemodel to chainermodel
chainermodel = osp.expanduser(
'~/data/models/chainer/%s_from_caffe.npz' % model_name)
if not osp.exists(chainermodel):
caffe_to_chainermodel(model, caffe_prototxt,
caffemodel, chainermodel)
if __name__ == '__main__':
main()
| wkentaro/fcn | examples/voc/caffe_to_chainermodel.py | Python | mit | 2,601 |
import os
import urlparse
class Config(object):
'''Default configuration object.'''
DEBUG = False
TESTING = False
PORT = int(os.environ.get('PORT', 5000))
class ProductionConfig(Config):
'''Configuration object specific to production environments.'''
REDIS_URL = os.environ.get('REDISTOGO_URL')
if REDIS_URL:
url = urlparse.urlparse(REDIS_URL)
REDIS_HOST = url.hostname
REDIS_PORT = url.port
REDIS_PASSWORD = url.password
MONGOLAB_URI = os.environ.get('MONGOLAB_URI')
if MONGOLAB_URI:
url = urlparse.urlparse(MONGOLAB_URI)
MONGODB_USER = url.username
MONGODB_PASSWORD = url.password
MONGODB_HOST = url.hostname
MONGODB_PORT = url.port
MONGODB_DB = url.path[1:]
class DevelopmentConfig(Config):
'''Configuration object specific to development environments.'''
DEBUG = True
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_DB = os.environ.get('DBNAME', 'taarifa_backend')
| taarifa/taarifa_backend | config.py | Python | bsd-3-clause | 1,024 |
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Binding - Interactive API
==================================
Provides the current start and stop for the use of PyCOMPSs interactively.
"""
import os
import sys
import logging
from tempfile import mkdtemp
import time
import pycompss.util.context as context
import pycompss.runtime.binding as binding
import pycompss.util.interactive.helpers as interactive_helpers
from pycompss.runtime.binding import get_log_path
from pycompss.runtime.binding import pending_to_synchronize
from pycompss.runtime.commons import RUNNING_IN_SUPERCOMPUTER
from pycompss.util.environment.configuration import prepare_environment
from pycompss.util.environment.configuration import prepare_loglevel_graph_for_monitoring # noqa: E501
from pycompss.util.environment.configuration import updated_variables_in_sc
from pycompss.util.environment.configuration import prepare_tracing_environment
from pycompss.util.environment.configuration import check_infrastructure_variables # noqa: E501
from pycompss.util.environment.configuration import create_init_config_file
from pycompss.util.environment.configuration import setup_logger
# Storage imports
from pycompss.util.storages.persistent import init_storage
from pycompss.util.storages.persistent import stop_storage
# Streaming imports
from pycompss.streams.environment import init_streaming
from pycompss.streams.environment import stop_streaming
# GLOBAL VARIABLES
APP_PATH = 'InteractiveMode'
# Warning! The name should start with 'InteractiveMode' due to @task checks
# it explicitly. If changed, it is necessary to update the task decorator.
PERSISTENT_STORAGE = False
STREAMING = False
LOG_PATH = '/tmp/'
GRAPHING = False
def start(log_level='off',
debug=False,
o_c=False,
graph=False,
trace=False,
monitor=None,
project_xml=None,
resources_xml=None,
summary=False,
task_execution='compss',
storage_impl=None,
storage_conf=None,
streaming_backend=None,
streaming_master_name=None,
streaming_master_port=None,
task_count=50,
app_name='Interactive',
uuid=None,
base_log_dir=None,
specific_log_dir=None,
extrae_cfg=None,
comm='NIO',
conn='es.bsc.compss.connectors.DefaultSSHConnector',
master_name='',
master_port='',
scheduler='es.bsc.compss.scheduler.' +
'loadbalancing.LoadBalancingScheduler',
jvm_workers='-Xms1024m,-Xmx1024m,-Xmn400m',
cpu_affinity='automatic',
gpu_affinity='automatic',
fpga_affinity='automatic',
fpga_reprogram='',
profile_input='',
profile_output='',
scheduler_config='',
external_adaptation=False,
propagate_virtual_environment=True,
mpi_worker=False,
verbose=False
):
"""
Start the runtime in interactive mode.
:param log_level: Logging level [ 'off' | 'info' | 'debug' ]
(default: 'off')
:param debug: Debug mode [ True | False ]
(default: False) (overrides log-level)
:param o_c: Objects to string conversion [ True | False ]
(default: False)
:param graph: Generate graph [ True | False ]
(default: False)
:param trace: Generate trace
[ True | False | 'scorep' | 'arm-map' | 'arm-ddt' ]
(default: False)
:param monitor: Monitor refresh rate
(default: None)
:param project_xml: Project xml file path
(default: None)
:param resources_xml: Resources xml file path
(default: None)
:param summary: Execution summary [ True | False ]
(default: False)
:param task_execution: Task execution
(default: 'compss')
:param storage_impl: Storage implementation path
(default: None)
:param storage_conf: Storage configuration file path
(default: None)
:param streaming_backend: Streaming backend
(default: None)
:param streaming_master_name: Streaming master name
(default: None)
:param streaming_master_port: Streaming master port
(default: None)
:param task_count: Task count
(default: 50)
:param app_name: Application name
default: Interactive_date)
:param uuid: UUId
(default: None)
:param base_log_dir: Base logging directory
(default: None)
:param specific_log_dir: Specific logging directory
(default: None)
:param extrae_cfg: Extrae configuration file path
(default: None)
:param comm: Communication library
(default: NIO)
:param conn: Connector
(default: DefaultSSHConnector)
:param master_name: Master Name
(default: '')
:param master_port: Master port
(default: '')
:param scheduler: Scheduler
(default: es.bsc.compss.scheduler.
loadbalancing.LoadBalancingScheduler)
:param jvm_workers: Java VM parameters
(default: '-Xms1024m,-Xmx1024m,-Xmn400m')
:param cpu_affinity: CPU Core affinity
(default: 'automatic')
:param gpu_affinity: GPU affinity
(default: 'automatic')
:param fpga_affinity: FPGA affinity
(default: 'automatic')
:param fpga_reprogram: FPGA repogram command
(default: '')
:param profile_input: Input profile
(default: '')
:param profile_output: Output profile
(default: '')
:param scheduler_config: Scheduler configuration
(default: '')
:param external_adaptation: External adaptation [ True | False ]
(default: False)
:param propagate_virtual_environment: Propagate virtual environment
[ True | False ]
(default: False)
:param mpi_worker: Use the MPI worker [ True | False ]
(default: False)
:param verbose: Verbose mode [ True | False ]
(default: False)
:return: None
"""
# Export global variables
global GRAPHING
GRAPHING = graph
__export_globals__()
interactive_helpers.DEBUG = debug
__show_flower__()
# Let the Python binding know we are at master
context.set_pycompss_context(context.MASTER)
# Then we can import the appropriate start and stop functions from the API
from pycompss.api.api import compss_start
##############################################################
# INITIALIZATION
##############################################################
# TODO: Check that input values are valid
# Initial dictionary with the user defined parameters
all_vars = {'log_level': log_level,
'debug': debug,
'o_c': o_c,
'graph': graph,
'trace': trace,
'monitor': monitor,
'project_xml': project_xml,
'resources_xml': resources_xml,
'summary': summary,
'task_execution': task_execution,
'storage_impl': storage_impl,
'storage_conf': storage_conf,
'streaming_backend': streaming_backend,
'streaming_master_name': streaming_master_name,
'streaming_master_port': streaming_master_port,
'task_count': task_count,
'app_name': app_name,
'uuid': uuid,
'base_log_dir': base_log_dir,
'specific_log_dir': specific_log_dir,
'extrae_cfg': extrae_cfg,
'comm': comm,
'conn': conn,
'master_name': master_name,
'master_port': master_port,
'scheduler': scheduler,
'jvm_workers': jvm_workers,
'cpu_affinity': cpu_affinity,
'gpu_affinity': gpu_affinity,
'fpga_affinity': fpga_affinity,
'fpga_reprogram': fpga_reprogram,
'profile_input': profile_input,
'profile_output': profile_output,
'scheduler_config': scheduler_config,
'external_adaptation': external_adaptation,
'propagate_virtual_environment': propagate_virtual_environment,
'mpi_worker': mpi_worker}
# Prepare the environment
env_vars = prepare_environment(True, o_c, storage_impl,
None, debug, trace, mpi_worker)
all_vars.update(env_vars)
# Update the log level and graph values if monitoring is enabled
monitoring_vars = prepare_loglevel_graph_for_monitoring(monitor,
graph,
debug,
log_level)
all_vars.update(monitoring_vars)
# Check if running in supercomputer and update the variables accordingly
# with the defined in the launcher and exported in environment variables.
if RUNNING_IN_SUPERCOMPUTER:
updated_vars = updated_variables_in_sc()
if verbose:
print("- Overridden project xml with: " +
updated_vars['project_xml'])
print("- Overridden resources xml with: " +
updated_vars['resources_xml'])
print("- Overridden master name with: " +
updated_vars['master_name'])
print("- Overridden master port with: " +
updated_vars['master_port'])
print("- Overridden uuid with: " +
updated_vars['uuid'])
print("- Overridden base log dir with: " +
updated_vars['base_log_dir'])
print("- Overridden specific log dir with: " +
updated_vars['specific_log_dir'])
print("- Overridden storage conf with: " +
updated_vars['storage_conf'])
print("- Overridden log level with: " +
str(updated_vars['log_level']))
print("- Overridden debug with: " +
str(updated_vars['debug']))
print("- Overridden trace with: " +
str(updated_vars['trace']))
all_vars.update(updated_vars)
# Update the tracing environment if set and set the appropriate trace
# integer value
tracing_vars = prepare_tracing_environment(all_vars['trace'],
all_vars['extrae_lib'],
all_vars['ld_library_path'])
all_vars['trace'], all_vars['ld_library_path'] = tracing_vars
# Update the infrastructure variables if necessary
inf_vars = check_infrastructure_variables(all_vars['project_xml'],
all_vars['resources_xml'],
all_vars['compss_home'],
all_vars['app_name'],
all_vars['file_name'],
all_vars['external_adaptation'])
all_vars.update(inf_vars)
# With all this information, create the configuration file for the
# runtime start
create_init_config_file(**all_vars)
##############################################################
# RUNTIME START
##############################################################
print("* - Starting COMPSs runtime... *")
sys.stdout.flush() # Force flush
compss_start()
global LOG_PATH
LOG_PATH = get_log_path()
binding.temp_dir = mkdtemp(prefix='pycompss', dir=LOG_PATH + '/tmpFiles/')
print("* - Log path : " + LOG_PATH)
major_version = all_vars['major_version']
compss_home = all_vars['compss_home']
logger = setup_logger(debug, log_level, major_version,
compss_home, LOG_PATH)
__print_setup__(verbose, all_vars)
logger.debug("--- START ---")
logger.debug("PyCOMPSs Log path: %s" % LOG_PATH)
logger.debug("Starting storage")
global PERSISTENT_STORAGE
PERSISTENT_STORAGE = init_storage(all_vars['storage_conf'], logger)
logger.debug("Starting streaming")
global STREAMING
STREAMING = init_streaming(all_vars['streaming_backend'],
all_vars['streaming_master_name'],
all_vars['streaming_master_port'],
logger)
# MAIN EXECUTION
# let the user write an interactive application
print("* - PyCOMPSs Runtime started... Have fun! *")
print("******************************************************")
def __show_flower__():
"""
Shows the flower and version through stdout.
:return: None
"""
print("******************************************************") # noqa
print("*************** PyCOMPSs Interactive *****************") # noqa
print("******************************************************") # noqa
print("* .-~~-.--. _____ __ *") # noqa
print("* : ) |____ \ / / *") # noqa
print("* .~ ~ -.\ /.- ~~ . ___) | / /__ *") # noqa
print("* > `. .' < / ___/ / _ \ *") # noqa
print("* ( .- -. ) | |___ _ | |_| ) *") # noqa
print("* `- -.-~ `- -' ~-.- -' |_____| |_| \_____/ *") # noqa
print("* ( : ) _ _ .-: *") # noqa
print("* ~--. : .--~ .-~ .-~ } *") # noqa
print("* ~-.-^-.-~ \_ .~ .-~ .~ *") # noqa
print("* \ \ ' \ '_ _ -~ *") # noqa
print("* \`.\`. // *") # noqa
print("* . - ~ ~-.__\`.\`-.// *") # noqa
print("* .-~ . - ~ }~ ~ ~-.~-. *") # noqa
print("* .' .-~ .-~ :/~-.~-./: *") # noqa
print("* /_~_ _ . - ~ ~-.~-._ *") # noqa
print("* ~-.< *") # noqa
print("******************************************************") # noqa
def __print_setup__(verbose, all_vars):
"""
Print the setup variables through stdout (only if verbose is True).
However, it shows them through the logger.
:param verbose: Verbose mode [True | False]
:param all_vars: Dictionary containing all variables.
:return: None
"""
logger = logging.getLogger(__name__)
output = ""
output += "******************************************************\n"
output += " CONFIGURATION: \n"
for k, v in sorted(all_vars.items()):
output += ' - {0:20} : {1} \n'.format(k, v)
output += "******************************************************"
if verbose:
print(output)
logger.debug(output)
def stop(sync=False):
"""
Runtime stop.
:param sync: Scope variables synchronization [ True | False ]
(default: False)
:return: None
"""
from pycompss.api.api import compss_stop
print("****************************************************")
print("*************** STOPPING PyCOMPSs ******************")
print("****************************************************")
logger = logging.getLogger(__name__)
if sync:
sync_msg = "Synchronizing all future objects left on the user scope."
print(sync_msg)
logger.debug(sync_msg)
from pycompss.api.api import compss_wait_on
ipython = globals()['__builtins__']['get_ipython']()
# import pprint
# pprint.pprint(ipython.__dict__, width=1)
raw_code = ipython.__dict__['user_ns']
for k in raw_code:
obj_k = raw_code[k]
if not k.startswith('_'): # not internal objects
if type(obj_k) == binding.Future:
print("Found a future object: %s" % str(k))
logger.debug("Found a future object: %s" % (k,))
ipython.__dict__['user_ns'][k] = compss_wait_on(obj_k)
elif obj_k in pending_to_synchronize.values():
print("Found an object to synchronize: %s" % str(k))
logger.debug("Found an object to synchronize: %s" % (k,))
ipython.__dict__['user_ns'][k] = compss_wait_on(obj_k)
else:
pass
else:
print("Warning: some of the variables used with PyCOMPSs may")
print(" have not been brought to the master.")
if STREAMING:
logger.debug("Stopping streaming")
stop_streaming(logger)
if PERSISTENT_STORAGE:
logger.debug("Stopping persistent storage")
stop_storage()
compss_stop()
__clean_temp_files__()
# Let the Python binding know we are not at master anymore
context.set_pycompss_context(context.OUT_OF_SCOPE)
print("****************************************************")
logger.debug("--- END ---")
# os._exit(00) # Explicit kernel restart # breaks Jupyter-notebook
# --- Execution finished ---
def __show_current_graph__(fit=False):
"""
Show current graph.
:param fit: Fit to width [ True | False ] (default: False)
:return: None
"""
if GRAPHING:
return __show_graph__(name='current_graph', fit=fit)
else:
print('Oops! Graph is not enabled in this execution.')
print(' Please, enable it by setting the graph flag when' +
' starting PyCOMPSs.')
def __show_complete_graph__(fit=False):
"""
Show complete graph.
:param fit: Fit to width [ True | False ] (default: False)
:return: None
"""
if GRAPHING:
return __show_graph__(name='complete_graph', fit=fit)
else:
print('Oops! Graph is not enabled in this execution.')
print(' Please, enable it by setting the graph flag when' +
' starting PyCOMPSs.')
def __show_graph__(name='complete_graph', fit=False):
"""
Show graph.
:param name: Graph to show (default: 'complete_graph')
:param fit: Fit to width [ True | False ] (default: False)
:return: None
"""
try:
from graphviz import Source
except ImportError:
print('Oops! graphviz is not available.')
raise
monitor_file = open(LOG_PATH + '/monitor/' + name + '.dot', 'r')
text = monitor_file.read()
monitor_file.close()
if fit:
try:
# Convert to png and show full picture
filename = LOG_PATH + '/monitor/' + name
extension = 'png'
import os
if os.path.exists(filename + '.' + extension):
os.remove(filename + '.' + extension)
s = Source(text, filename=filename, format=extension)
s.render()
from IPython.display import Image
image = Image(filename=filename + '.' + extension)
return image
except Exception:
print('Oops! Failed rendering the graph.')
raise
else:
return Source(text)
# ########################################################################### #
# ########################################################################### #
# ########################################################################### #
def __export_globals__():
"""
Export globals into interactive environment.
:return: None
"""
# Super ugly, but I see no other way to define the APP_PATH across the
# interactive execution without making the user to define it explicitly.
# It is necessary to define only one APP_PATH because of the two decorators
# need to access the same information.
# if the file is created per task, the constraint will not be able to work.
# Get ipython globals
ipython = globals()['__builtins__']['get_ipython']()
# import pprint
# pprint.pprint(ipython.__dict__, width=1)
# Extract user globals from ipython
user_globals = ipython.__dict__['ns_table']['user_global']
# Inject APP_PATH variable to user globals so that task and constraint
# decorators can get it.
temp_app_filename = os.getcwd() + '/' + "InteractiveMode_"
temp_app_filename += str(time.strftime('%d%m%y_%H%M%S')) + '.py'
user_globals['APP_PATH'] = temp_app_filename
global APP_PATH
APP_PATH = temp_app_filename
def __clean_temp_files__():
"""
Remove any temporary files that may exist.
Currently: APP_PATH, which contains the file path where all interactive
code required by the worker is.
:return: None
"""
try:
if os.path.exists(APP_PATH):
os.remove(APP_PATH)
if os.path.exists(APP_PATH + 'c'):
os.remove(APP_PATH + 'c')
except OSError:
print("[ERROR] An error has occurred when cleaning temporary files.")
| mF2C/COMPSs | compss/programming_model/bindings/python/src/pycompss/interactive.py | Python | apache-2.0 | 22,419 |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 18:59:02 2017
@author: Administrator
"""
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
from heapq import heappop, heappush
class Solution:
"""
@param lists: a list of ListNode
@return: The head of one sorted list.
"""
def mergeKLists(self, lists):
if not lists:
return None
trav = dummy = ListNode(-1)
heap = []
for ll in lists:
if ll:
self.heappushNode(heap, ll)
while heap:
node = heappop(heap)[1]
trav.next = node
trav = trav.next
if trav.next:
self.heappushNode(heap, trav.next)
return dummy.next
def heappushNode(self, heap, node):
heappush(heap, (node.val, node))
def my_mergeKLists(self, lists):
# write your code here
len_list = len(lists)
if len_list == 0:
return None
ret = lists[0]
for i in range(1, len_list):
if ret is None:
ret = lists[i]
elif lists[i] is None:
continue
else:
ret = self.mergeTwolist(ret, lists[i])
return ret
def mergeTwolist(self, first, second):
ret = ListNode(None)
head = ret
while first and second:
if first.val <= second.val:
ret.val = first.val
ret.next = ListNode(None)
ret = ret.next
first = first.next
if first is None:
while second:
ret.val = second.val
second = second.next
if second:
ret.next = ListNode(None)
ret = ret.next
break
else:
ret.val = second.val
ret.next = ListNode(None)
second = second.next
ret = ret.next
if second is None:
while first:
ret.val = first.val
first = first.next
if first:
ret.next = ListNode(None)
ret = ret.next
break
return head | NanguangChou/leetcode_python | 104 合并K个排序链表.py | Python | apache-2.0 | 2,554 |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
"""
This module contains the execution logic for Workload Automation. It defines the
following actors:
WorkloadSpec: Identifies the workload to be run and defines parameters under
which it should be executed.
Executor: Responsible for the overall execution process. It instantiates
and/or intialises the other actors, does any necessary vaidation
and kicks off the whole process.
Execution Context: Provides information about the current state of run
execution to instrumentation.
RunInfo: Information about the current run.
Runner: This executes workload specs that are passed to it. It goes through
stages of execution, emitting an appropriate signal at each step to
allow instrumentation to do its stuff.
"""
import os
import uuid
import logging
import subprocess
import random
from copy import copy
from datetime import datetime
from contextlib import contextmanager
from collections import Counter, defaultdict, OrderedDict
from itertools import izip_longest
import wlauto.core.signal as signal
from wlauto.core import instrumentation
from wlauto.core.bootstrap import settings
from wlauto.core.extension import Artifact
from wlauto.core.configuration import RunConfiguration
from wlauto.core.extension_loader import ExtensionLoader
from wlauto.core.resolver import ResourceResolver
from wlauto.core.result import ResultManager, IterationResult, RunResult
from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
DeviceError, DeviceNotRespondingError)
from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, merge_dicts, format_duration
# The maximum number of reboot attempts for an iteration.
MAX_REBOOT_ATTEMPTS = 3
# If something went wrong during device initialization, wait this
# long (in seconds) before retrying. This is necessary, as retrying
# immediately may not give the device enough time to recover to be able
# to reboot.
REBOOT_DELAY = 3
class RunInfo(object):
"""
Information about the current run, such as its unique ID, run
time, etc.
"""
def __init__(self, config):
self.config = config
self.uuid = uuid.uuid4()
self.start_time = None
self.end_time = None
self.duration = None
self.project = config.project
self.project_stage = config.project_stage
self.run_name = config.run_name or "{}_{}".format(os.path.split(settings.output_directory)[1],
datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S"))
self.notes = None
self.device_properties = {}
def to_dict(self):
d = copy(self.__dict__)
d['uuid'] = str(self.uuid)
del d['config']
d = merge_dicts(d, self.config.to_dict())
return d
class ExecutionContext(object):
"""
Provides a context for instrumentation. Keeps track of things like
current workload and iteration.
This class also provides two status members that can be used by workloads
and instrumentation to keep track of arbitrary state. ``result``
is reset on each new iteration of a workload; run_status is maintained
throughout a Workload Automation run.
"""
# These are the artifacts generated by the core framework.
default_run_artifacts = [
Artifact('runlog', 'run.log', 'log', mandatory=True,
description='The log for the entire run.'),
]
@property
def current_iteration(self):
if self.current_job:
spec_id = self.current_job.spec.id
return self.job_iteration_counts[spec_id]
else:
return None
@property
def job_status(self):
if not self.current_job:
return None
return self.current_job.result.status
@property
def workload(self):
return getattr(self.spec, 'workload', None)
@property
def spec(self):
return getattr(self.current_job, 'spec', None)
@property
def result(self):
return getattr(self.current_job, 'result', self.run_result)
def __init__(self, device, config):
self.device = device
self.config = config
self.reboot_policy = config.reboot_policy
self.output_directory = None
self.current_job = None
self.resolver = None
self.last_error = None
self.run_info = None
self.run_result = None
self.run_output_directory = settings.output_directory
self.host_working_directory = settings.meta_directory
self.iteration_artifacts = None
self.run_artifacts = copy(self.default_run_artifacts)
self.job_iteration_counts = defaultdict(int)
self.aborted = False
self.runner = None
if settings.agenda:
self.run_artifacts.append(Artifact('agenda',
os.path.join(self.host_working_directory,
os.path.basename(settings.agenda)),
'meta',
mandatory=True,
description='Agenda for this run.'))
for i, filepath in enumerate(settings.loaded_files, 1):
name = 'config_{}'.format(i)
path = os.path.join(self.host_working_directory,
name + os.path.splitext(filepath)[1])
self.run_artifacts.append(Artifact(name,
path,
kind='meta',
mandatory=True,
description='Config file used for the run.'))
def initialize(self):
if not os.path.isdir(self.run_output_directory):
os.makedirs(self.run_output_directory)
self.output_directory = self.run_output_directory
self.resolver = ResourceResolver(self.config)
self.run_info = RunInfo(self.config)
self.run_result = RunResult(self.run_info, self.run_output_directory)
def next_job(self, job):
"""Invoked by the runner when starting a new iteration of workload execution."""
self.current_job = job
self.job_iteration_counts[self.spec.id] += 1
if not self.aborted:
outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration]))
self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name))
self.iteration_artifacts = [wa for wa in self.workload.artifacts]
self.current_job.result.iteration = self.current_iteration
self.current_job.result.output_directory = self.output_directory
def end_job(self):
if self.current_job.result.status == IterationResult.ABORTED:
self.aborted = True
self.current_job = None
self.output_directory = self.run_output_directory
def add_metric(self, *args, **kwargs):
self.result.add_metric(*args, **kwargs)
def add_classifiers(self, **kwargs):
self.result.classifiers.update(kwargs)
def add_artifact(self, name, path, kind, *args, **kwargs):
if self.current_job is None:
self.add_run_artifact(name, path, kind, *args, **kwargs)
else:
self.add_iteration_artifact(name, path, kind, *args, **kwargs)
def add_run_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.run_output_directory)
self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
def add_iteration_artifact(self, name, path, kind, *args, **kwargs):
path = _check_artifact_path(path, self.output_directory)
self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
def get_artifact(self, name):
if self.iteration_artifacts:
for art in self.iteration_artifacts:
if art.name == name:
return art
for art in self.run_artifacts:
if art.name == name:
return art
return None
def _check_artifact_path(path, rootpath):
if path.startswith(rootpath):
return os.path.abspath(path)
rootpath = os.path.abspath(rootpath)
full_path = os.path.join(rootpath, path)
if not os.path.isfile(full_path):
raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
return full_path
class Executor(object):
"""
The ``Executor``'s job is to set up the execution context and pass to a ``Runner``
along with a loaded run specification. Once the ``Runner`` has done its thing,
the ``Executor`` performs some final reporint before returning.
The initial context set up involves combining configuration from various sources,
loading of requided workloads, loading and installation of instruments and result
processors, etc. Static validation of the combined configuration is also performed.
"""
# pylint: disable=R0915
def __init__(self):
self.logger = logging.getLogger('Executor')
self.error_logged = False
self.warning_logged = False
self.config = None
self.ext_loader = None
self.device = None
self.context = None
def execute(self, agenda, selectors=None): # NOQA
"""
Execute the run specified by an agenda. Optionally, selectors may be used to only
selecute a subset of the specified agenda.
Params::
:agenda: an ``Agenda`` instance to be executed.
:selectors: A dict mapping selector name to the coresponding values.
**Selectors**
Currently, the following seectors are supported:
ids
The value must be a sequence of workload specfication IDs to be executed. Note
that if sections are specified inthe agenda, the workload specifacation ID will
be a combination of the section and workload IDs.
"""
signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
self.logger.info('Initializing')
self.ext_loader = ExtensionLoader(packages=settings.extension_packages,
paths=settings.extension_paths)
self.logger.debug('Loading run configuration.')
self.config = RunConfiguration(self.ext_loader)
for filepath in settings.get_config_paths():
self.config.load_config(filepath)
self.config.set_agenda(agenda, selectors)
self.config.finalize()
config_outfile = os.path.join(settings.meta_directory, 'run_config.json')
with open(config_outfile, 'w') as wfh:
self.config.serialize(wfh)
self.logger.debug('Initialising device configuration.')
if not self.config.device:
raise ConfigError('Make sure a device is specified in the config.')
self.device = self.ext_loader.get_device(self.config.device, **self.config.device_config)
self.device.validate()
self.context = ExecutionContext(self.device, self.config)
self.logger.debug('Loading resource discoverers.')
self.context.initialize()
self.context.resolver.load()
self.context.add_artifact('run_config', config_outfile, 'meta')
self.logger.debug('Installing instrumentation')
for name, params in self.config.instrumentation.iteritems():
instrument = self.ext_loader.get_instrument(name, self.device, **params)
instrumentation.install(instrument)
instrumentation.validate()
self.logger.debug('Installing result processors')
result_manager = ResultManager()
for name, params in self.config.result_processors.iteritems():
processor = self.ext_loader.get_result_processor(name, **params)
result_manager.install(processor)
result_manager.validate()
self.logger.debug('Loading workload specs')
for workload_spec in self.config.workload_specs:
workload_spec.load(self.device, self.ext_loader)
workload_spec.workload.init_resources(self.context)
workload_spec.workload.validate()
if self.config.flashing_config:
if not self.device.flasher:
msg = 'flashing_config specified for {} device that does not support flashing.'
raise ConfigError(msg.format(self.device.name))
self.logger.debug('Flashing the device')
self.device.flasher.flash(self.device)
self.logger.info('Running workloads')
runner = self._get_runner(result_manager)
runner.init_queue(self.config.workload_specs)
runner.run()
self.execute_postamble()
def execute_postamble(self):
"""
This happens after the run has completed. The overall results of the run are
summarised to the user.
"""
result = self.context.run_result
counter = Counter()
for ir in result.iteration_results:
counter[ir.status] += 1
self.logger.info('Done.')
self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration)))
status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values()))
parts = []
for status in IterationResult.values:
if status in counter:
parts.append('{} {}'.format(counter[status], status))
self.logger.info(status_summary + ', '.join(parts))
self.logger.info('Results can be found in {}'.format(settings.output_directory))
if self.error_logged:
self.logger.warn('There were errors during execution.')
self.logger.warn('Please see {}'.format(settings.log_file))
elif self.warning_logged:
self.logger.warn('There were warnings during execution.')
self.logger.warn('Please see {}'.format(settings.log_file))
def _get_runner(self, result_manager):
if not self.config.execution_order or self.config.execution_order == 'by_iteration':
if self.config.reboot_policy == 'each_spec':
self.logger.info('each_spec reboot policy with the default by_iteration execution order is '
'equivalent to each_iteration policy.')
runnercls = ByIterationRunner
elif self.config.execution_order in ['classic', 'by_spec']:
runnercls = BySpecRunner
elif self.config.execution_order == 'by_section':
runnercls = BySectionRunner
elif self.config.execution_order == 'random':
runnercls = RandomRunner
else:
raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order))
return runnercls(self.device, self.context, result_manager)
def _error_signalled_callback(self):
self.error_logged = True
signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
def _warning_signalled_callback(self):
self.warning_logged = True
signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
class RunnerJob(object):
"""
Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration
specified by ``RunnerJobDescription.number_of_iterations``.
"""
def __init__(self, spec, retry=0):
self.spec = spec
self.retry = retry
self.iteration = None
self.result = IterationResult(self.spec)
class Runner(object):
"""
This class is responsible for actually performing a workload automation
run. The main responsibility of this class is to emit appropriate signals
at the various stages of the run to allow things like traces an other
instrumentation to hook into the process.
This is an abstract base class that defines each step of the run, but not
the order in which those steps are executed, which is left to the concrete
derived classes.
"""
class _RunnerError(Exception):
"""Internal runner error."""
pass
@property
def config(self):
return self.context.config
@property
def current_job(self):
if self.job_queue:
return self.job_queue[0]
return None
@property
def previous_job(self):
if self.completed_jobs:
return self.completed_jobs[-1]
return None
@property
def next_job(self):
if self.job_queue:
if len(self.job_queue) > 1:
return self.job_queue[1]
return None
@property
def spec_changed(self):
if self.previous_job is None and self.current_job is not None: # Start of run
return True
if self.previous_job is not None and self.current_job is None: # End of run
return True
return self.current_job.spec.id != self.previous_job.spec.id
@property
def spec_will_change(self):
if self.current_job is None and self.next_job is not None: # Start of run
return True
if self.current_job is not None and self.next_job is None: # End of run
return True
return self.current_job.spec.id != self.next_job.spec.id
def __init__(self, device, context, result_manager):
self.device = device
self.context = context
self.result_manager = result_manager
self.logger = logging.getLogger('Runner')
self.job_queue = []
self.completed_jobs = []
self._initial_reset = True
def init_queue(self, specs):
raise NotImplementedError()
def run(self): # pylint: disable=too-many-branches
self._send(signal.RUN_START)
self._initialize_run()
try:
while self.job_queue:
try:
self._init_job()
self._run_job()
except KeyboardInterrupt:
self.current_job.result.status = IterationResult.ABORTED
raise
except Exception, e: # pylint: disable=broad-except
self.current_job.result.status = IterationResult.FAILED
self.current_job.result.add_event(e.message)
if isinstance(e, DeviceNotRespondingError):
self.logger.info('Device appears to be unresponsive.')
if self.context.reboot_policy.can_reboot and self.device.can('reset_power'):
self.logger.info('Attempting to hard-reset the device...')
try:
self.device.boot(hard=True)
self.device.connect()
except DeviceError: # hard_boot not implemented for the device.
raise e
else:
raise e
else: # not a DeviceNotRespondingError
self.logger.error(e)
finally:
self._finalize_job()
except KeyboardInterrupt:
self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).')
# Skip through the remaining jobs.
while self.job_queue:
self.context.next_job(self.current_job)
self.current_job.result.status = IterationResult.ABORTED
self._finalize_job()
except DeviceNotRespondingError:
self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.')
self.context.aborted = True
while self.job_queue:
self.context.next_job(self.current_job)
self.current_job.result.status = IterationResult.SKIPPED
self._finalize_job()
instrumentation.enable_all()
self._finalize_run()
self._process_results()
self.result_manager.finalize(self.context)
self._send(signal.RUN_END)
def _initialize_run(self):
self.context.runner = self
self.context.run_info.start_time = datetime.utcnow()
self._connect_to_device()
self.logger.info('Initializing device')
self.device.initialize(self.context)
self.logger.info('Initializing workloads')
for workload_spec in self.context.config.workload_specs:
workload_spec.workload.initialize(self.context)
props = self.device.get_properties(self.context)
self.context.run_info.device_properties = props
self.result_manager.initialize(self.context)
self._send(signal.RUN_INIT)
if instrumentation.check_failures():
raise InstrumentError('Detected failure(s) during instrumentation initialization.')
def _connect_to_device(self):
if self.context.reboot_policy.perform_initial_boot:
try:
self.device.connect()
except DeviceError: # device may be offline
if self.device.can('reset_power'):
with self._signal_wrap('INITIAL_BOOT'):
self.device.boot(hard=True)
else:
raise DeviceError('Cannot connect to device for initial reboot; '
'and device does not support hard reset.')
else: # successfully connected
self.logger.info('\tBooting device')
with self._signal_wrap('INITIAL_BOOT'):
self._reboot_device()
else:
self.logger.info('Connecting to device')
self.device.connect()
def _init_job(self):
self.current_job.result.status = IterationResult.RUNNING
self.context.next_job(self.current_job)
def _run_job(self): # pylint: disable=too-many-branches
spec = self.current_job.spec
if not spec.enabled:
self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration)
self.current_job.result.status = IterationResult.SKIPPED
return
self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration)
if spec.flash:
if not self.context.reboot_policy.can_reboot:
raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.')
if not self.device.can('flash'):
raise DeviceError('Device does not support flashing.')
self._flash_device(spec.flash)
elif not self.completed_jobs:
# Never reboot on the very fist job of a run, as we would have done
# the initial reboot if a reboot was needed.
pass
elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed:
self.logger.debug('Rebooting on spec change.')
self._reboot_device()
elif self.context.reboot_policy.reboot_on_each_iteration:
self.logger.debug('Rebooting on iteration.')
self._reboot_device()
instrumentation.disable_all()
instrumentation.enable(spec.instrumentation)
self.device.start()
if self.spec_changed:
self._send(signal.WORKLOAD_SPEC_START)
self._send(signal.ITERATION_START)
try:
setup_ok = False
with self._handle_errors('Setting up device parameters'):
self.device.set_runtime_parameters(spec.runtime_parameters)
setup_ok = True
if setup_ok:
with self._handle_errors('running {}'.format(spec.workload.name)):
self.current_job.result.status = IterationResult.RUNNING
self._run_workload_iteration(spec.workload)
else:
self.logger.info('\tSkipping the rest of the iterations for this spec.')
spec.enabled = False
except KeyboardInterrupt:
self._send(signal.ITERATION_END)
self._send(signal.WORKLOAD_SPEC_END)
raise
else:
self._send(signal.ITERATION_END)
if self.spec_will_change or not spec.enabled:
self._send(signal.WORKLOAD_SPEC_END)
finally:
self.device.stop()
def _finalize_job(self):
self.context.run_result.iteration_results.append(self.current_job.result)
job = self.job_queue.pop(0)
job.iteration = self.context.current_iteration
if job.result.status in self.config.retry_on_status:
if job.retry >= self.config.max_retries:
self.logger.error('Exceeded maxium number of retries. Abandoning job.')
else:
self.logger.info('Job status was {}. Retrying...'.format(job.result.status))
retry_job = RunnerJob(job.spec, job.retry + 1)
self.job_queue.insert(0, retry_job)
self.completed_jobs.append(job)
self.context.end_job()
def _finalize_run(self):
self.logger.info('Finalizing workloads')
for workload_spec in self.context.config.workload_specs:
workload_spec.workload.finalize(self.context)
self.logger.info('Finalizing.')
self._send(signal.RUN_FIN)
with self._handle_errors('Disconnecting from the device'):
self.device.disconnect()
info = self.context.run_info
info.end_time = datetime.utcnow()
info.duration = info.end_time - info.start_time
def _process_results(self):
self.logger.info('Processing overall results')
with self._signal_wrap('OVERALL_RESULTS_PROCESSING'):
if instrumentation.check_failures():
self.context.run_result.non_iteration_errors = True
self.result_manager.process_run_result(self.context.run_result, self.context)
def _run_workload_iteration(self, workload):
self.logger.info('\tSetting up')
with self._signal_wrap('WORKLOAD_SETUP'):
try:
workload.setup(self.context)
except:
self.logger.info('\tSkipping the rest of the iterations for this spec.')
self.current_job.spec.enabled = False
raise
try:
self.logger.info('\tExecuting')
with self._handle_errors('Running workload'):
with self._signal_wrap('WORKLOAD_EXECUTION'):
workload.run(self.context)
self.logger.info('\tProcessing result')
self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE)
try:
if self.current_job.result.status != IterationResult.FAILED:
with self._handle_errors('Processing workload result',
on_error_status=IterationResult.PARTIAL):
workload.update_result(self.context)
self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE)
if self.current_job.result.status == IterationResult.RUNNING:
self.current_job.result.status = IterationResult.OK
finally:
self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE)
finally:
self.logger.info('\tTearing down')
with self._handle_errors('Tearing down workload',
on_error_status=IterationResult.NONCRITICAL):
with self._signal_wrap('WORKLOAD_TEARDOWN'):
workload.teardown(self.context)
self.result_manager.add_result(self.current_job.result, self.context)
def _flash_device(self, flashing_params):
with self._signal_wrap('FLASHING'):
self.device.flash(**flashing_params)
self.device.connect()
def _reboot_device(self):
with self._signal_wrap('BOOT'):
for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS):
if reboot_attempts:
self.logger.info('\tRetrying...')
with self._handle_errors('Rebooting device'):
self.device.boot(**self.current_job.spec.boot_parameters)
break
else:
raise DeviceError('Could not reboot device; max reboot attempts exceeded.')
self.device.connect()
def _send(self, s):
signal.send(s, self, self.context)
def _take_screenshot(self, filename):
if self.context.output_directory:
filepath = os.path.join(self.context.output_directory, filename)
else:
filepath = os.path.join(settings.output_directory, filename)
self.device.capture_screen(filepath)
@contextmanager
def _handle_errors(self, action, on_error_status=IterationResult.FAILED):
try:
if action is not None:
self.logger.debug(action)
yield
except (KeyboardInterrupt, DeviceNotRespondingError):
raise
except (WAError, TimeoutError), we:
self.device.ping()
if self.current_job:
self.current_job.result.status = on_error_status
self.current_job.result.add_event(str(we))
try:
self._take_screenshot('error.png')
except Exception, e: # pylint: disable=W0703
# We're already in error state, so the fact that taking a
# screenshot failed is not surprising...
pass
if action:
action = action[0].lower() + action[1:]
self.logger.error('Error while {}:\n\t{}'.format(action, we))
except Exception, e: # pylint: disable=W0703
error_text = '{}("{}")'.format(e.__class__.__name__, e)
if self.current_job:
self.current_job.result.status = on_error_status
self.current_job.result.add_event(error_text)
self.logger.error('Error while {}'.format(action))
self.logger.error(error_text)
if isinstance(e, subprocess.CalledProcessError):
self.logger.error('Got:')
self.logger.error(e.output)
tb = get_traceback()
self.logger.error(tb)
@contextmanager
def _signal_wrap(self, signal_name):
"""Wraps the suite in before/after signals, ensuring
that after signal is always sent."""
before_signal = getattr(signal, 'BEFORE_' + signal_name)
success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name)
after_signal = getattr(signal, 'AFTER_' + signal_name)
try:
self._send(before_signal)
yield
self._send(success_signal)
finally:
self._send(after_signal)
class BySpecRunner(Runner):
"""
This is that "classic" implementation that executes all iterations of a workload
spec before proceeding onto the next spec.
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
self.job_queue = [j for spec_jobs in jobs for j in spec_jobs]
class BySectionRunner(Runner):
"""
Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
If multiple sections where specified in the agenda, this will run all specs for the first section
followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
class ByIterationRunner(Runner):
"""
Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2, C1, C2...
If multiple sections where specified in the agenda, this will run all sections for the first global
spec first, followed by all sections for the second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
def init_queue(self, specs):
sections = OrderedDict()
for s in specs:
if s.section_id not in sections:
sections[s.section_id] = []
sections[s.section_id].append(s)
specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s]
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
class RandomRunner(Runner):
"""
This will run specs in a random order.
"""
def init_queue(self, specs):
jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs] # pylint: disable=unused-variable
all_jobs = [j for spec_jobs in jobs for j in spec_jobs]
random.shuffle(all_jobs)
self.job_queue = all_jobs
| chase-qi/workload-automation | wlauto/core/execution.py | Python | apache-2.0 | 34,418 |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
class MiscTests(unittest.TestCase):
def setUp(self):
self.N = 20
self.N_new = 50
self.D = 1
self.X = np.random.uniform(-3., 3., (self.N, 1))
self.Y = np.sin(self.X) + np.random.randn(self.N, self.D) * 0.05
self.X_new = np.random.uniform(-3., 3., (self.N_new, 1))
def test_setXY(self):
m = GPy.models.GPRegression(self.X, self.Y)
m.set_XY(np.vstack([self.X, np.random.rand(1,self.X.shape[1])]), np.vstack([self.Y, np.random.rand(1,self.Y.shape[1])]))
m._trigger_params_changed()
self.assertTrue(m.checkgrad())
m.predict(m.X)
def test_raw_predict(self):
k = GPy.kern.RBF(1)
m = GPy.models.GPRegression(self.X, self.Y, kernel=k)
m.randomize()
m.likelihood.variance = .5
Kinv = np.linalg.pinv(k.K(self.X) + np.eye(self.N) * m.likelihood.variance)
K_hat = k.K(self.X_new) - k.K(self.X_new, self.X).dot(Kinv).dot(k.K(self.X, self.X_new))
mu_hat = k.K(self.X_new, self.X).dot(Kinv).dot(m.Y_normalized)
mu, covar = m._raw_predict(self.X_new, full_cov=True)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(covar.shape, (self.N_new, self.N_new))
np.testing.assert_almost_equal(K_hat, covar)
np.testing.assert_almost_equal(mu_hat, mu)
mu, var = m._raw_predict(self.X_new)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(var.shape, (self.N_new, 1))
np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var)
np.testing.assert_almost_equal(mu_hat, mu)
def test_normalizer(self):
k = GPy.kern.RBF(1)
Y = self.Y
mu, std = Y.mean(0), Y.std(0)
m = GPy.models.GPRegression(self.X, Y, kernel=k, normalizer=True)
m.optimize()
assert(m.checkgrad())
k = GPy.kern.RBF(1)
m2 = GPy.models.GPRegression(self.X, (Y-mu)/std, kernel=k, normalizer=False)
m2[:] = m[:]
mu1, var1 = m.predict(m.X, full_cov=True)
mu2, var2 = m2.predict(m2.X, full_cov=True)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
mu1, var1 = m.predict(m.X, full_cov=False)
mu2, var2 = m2.predict(m2.X, full_cov=False)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
q50n = m.predict_quantiles(m.X, (50,))
q50 = m2.predict_quantiles(m2.X, (50,))
np.testing.assert_allclose(q50n[0], (q50[0]*std)+mu)
def check_jacobian(self):
try:
import autograd.numpy as np, autograd as ag, GPy, matplotlib.pyplot as plt
from GPy.models import GradientChecker, GPRegression
except:
raise self.skipTest("autograd not available to check gradients")
def k(X, X2, alpha=1., lengthscale=None):
if lengthscale is None:
lengthscale = np.ones(X.shape[1])
exp = 0.
for q in range(X.shape[1]):
exp += ((X[:, [q]] - X2[:, [q]].T)/lengthscale[q])**2
#exp = np.sqrt(exp)
return alpha * np.exp(-.5*exp)
dk = ag.elementwise_grad(lambda x, x2: k(x, x2, alpha=ke.variance.values, lengthscale=ke.lengthscale.values))
dkdk = ag.elementwise_grad(dk, argnum=1)
ke = GPy.kern.RBF(1, ARD=True)
#ke.randomize()
ke.variance = .2#.randomize()
ke.lengthscale[:] = .5
ke.randomize()
X = np.linspace(-1, 1, 1000)[:,None]
X2 = np.array([[0.]]).T
np.testing.assert_allclose(ke.gradients_X([[1.]], X, X), dk(X, X))
np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X).sum(0), dkdk(X, X))
np.testing.assert_allclose(ke.gradients_X([[1.]], X, X2), dk(X, X2))
np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X2).sum(0), dkdk(X, X2))
m = GPRegression(self.X, self.Y)
def f(x):
m.X[:] = x
return m.log_likelihood()
def df(x):
m.X[:] = x
return m.kern.gradients_X(m.grad_dict['dL_dK'], X)
def ddf(x):
m.X[:] = x
return m.kern.gradients_XX(m.grad_dict['dL_dK'], X).sum(0)
gc = GradientChecker(f, df, self.X)
gc2 = GradientChecker(df, ddf, self.X)
assert(gc.checkgrad())
assert(gc2.checkgrad())
def test_sparse_raw_predict(self):
k = GPy.kern.RBF(1)
m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k)
m.randomize()
Z = m.Z[:]
# Not easy to check if woodbury_inv is correct in itself as it requires a large derivation and expression
Kinv = m.posterior.woodbury_inv
K_hat = k.K(self.X_new) - k.K(self.X_new, Z).dot(Kinv).dot(k.K(Z, self.X_new))
mu, covar = m._raw_predict(self.X_new, full_cov=True)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(covar.shape, (self.N_new, self.N_new))
np.testing.assert_almost_equal(K_hat, covar)
# np.testing.assert_almost_equal(mu_hat, mu)
mu, var = m._raw_predict(self.X_new)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(var.shape, (self.N_new, 1))
np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var)
# np.testing.assert_almost_equal(mu_hat, mu)
def test_likelihood_replicate(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[''].values()
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m['']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.Gaussian_noise.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m['.*var'] = 2
m2['.*var'] = m['.*var']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
def test_likelihood_set(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
def test_missing_data(self):
from GPy import kern
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.examples.dimensionality_reduction import _simulate_matern
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 400, 3, 4
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, False)
Y = Ylist[0]
inan = np.random.binomial(1, .9, size=Y.shape).astype(bool) # 80% missing data
Ymissing = Y.copy()
Ymissing[inan] = np.nan
k = kern.Linear(Q, ARD=True) + kern.White(Q, np.exp(-2)) # + kern.bias(Q)
m = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
kernel=k, missing_data=True)
assert(m.checkgrad())
mul, varl = m.predict(m.X)
k = kern.RBF(Q, ARD=True) + kern.White(Q, np.exp(-2)) # + kern.bias(Q)
m2 = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
kernel=k, missing_data=True)
assert(m.checkgrad())
m2.kern.rbf.lengthscale[:] = 1e6
m2.X[:] = m.X.param_array
m2.likelihood[:] = m.likelihood[:]
m2.kern.white[:] = m.kern.white[:]
mu, var = m.predict(m.X)
np.testing.assert_allclose(mul, mu)
np.testing.assert_allclose(varl, var)
q50 = m.predict_quantiles(m.X, (50,))
np.testing.assert_allclose(mul, q50[0])
def test_likelihood_replicate_kern(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[''] = m.kern[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[:] = m.kern[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[''] = m.kern['']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[:] = m.kern[''].values()
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
def test_big_model(self):
m = GPy.examples.dimensionality_reduction.mrd_simulation(optimize=0, plot=0, plot_sim=0)
m.X.fix()
print(m)
m.unfix()
m.checkgrad()
print(m)
m.fix()
print(m)
m.inducing_inputs.unfix()
print(m)
m.checkgrad()
m.unfix()
m.checkgrad()
m.checkgrad()
print(m)
def test_model_set_params(self):
m = GPy.models.GPRegression(self.X, self.Y)
lengthscale = np.random.uniform()
m.kern.lengthscale = lengthscale
np.testing.assert_equal(m.kern.lengthscale, lengthscale)
m.kern.lengthscale *= 1
m['.*var'] -= .1
np.testing.assert_equal(m.kern.lengthscale, lengthscale)
m.optimize()
print(m)
def test_model_updates(self):
Y1 = np.random.normal(0, 1, (40, 13))
Y2 = np.random.normal(0, 1, (40, 6))
m = GPy.models.MRD([Y1, Y2], 5)
self.count = 0
m.add_observer(self, self._count_updates, -2000)
m.update_model(False)
m['.*Gaussian'] = .001
self.assertEquals(self.count, 0)
m['.*Gaussian'].constrain_bounded(0,.01)
self.assertEquals(self.count, 0)
m.Z.fix()
self.assertEquals(self.count, 0)
m.update_model(True)
self.assertEquals(self.count, 1)
def _count_updates(self, me, which):
self.count+=1
def test_model_optimize(self):
X = np.random.uniform(-3., 3., (20, 1))
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
m = GPy.models.GPRegression(X, Y)
m.optimize()
print(m)
class GradientTests(np.testing.TestCase):
def setUp(self):
######################################
# # 1 dimensional example
# sample inputs and outputs
self.X1D = np.random.uniform(-3., 3., (20, 1))
self.Y1D = np.sin(self.X1D) + np.random.randn(20, 1) * 0.05
######################################
# # 2 dimensional example
# sample inputs and outputs
self.X2D = np.random.uniform(-3., 3., (40, 2))
self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(40, 1) * 0.05
def check_model(self, kern, model_type='GPRegression', dimension=1, uncertain_inputs=False):
# Get the correct gradients
if dimension == 1:
X = self.X1D
Y = self.Y1D
else:
X = self.X2D
Y = self.Y2D
# Get model type (GPRegression, SparseGPRegression, etc)
model_fit = getattr(GPy.models, model_type)
# noise = GPy.kern.White(dimension)
kern = kern # + noise
if uncertain_inputs:
m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1]))
else:
m = model_fit(X, Y, kernel=kern)
m.randomize()
# contrain all parameters to be positive
self.assertTrue(m.checkgrad())
def test_GPRegression_rbf_1d(self):
''' Testing the GP regression with rbf kernel with white kernel on 1d data '''
rbf = GPy.kern.RBF(1)
self.check_model(rbf, model_type='GPRegression', dimension=1)
def test_GPRegression_rbf_2D(self):
''' Testing the GP regression with rbf kernel on 2d data '''
rbf = GPy.kern.RBF(2)
self.check_model(rbf, model_type='GPRegression', dimension=2)
def test_GPRegression_rbf_ARD_2D(self):
''' Testing the GP regression with rbf kernel on 2d data '''
k = GPy.kern.RBF(2, ARD=True)
self.check_model(k, model_type='GPRegression', dimension=2)
def test_GPRegression_mlp_1d(self):
''' Testing the GP regression with mlp kernel with white kernel on 1d data '''
mlp = GPy.kern.MLP(1)
self.check_model(mlp, model_type='GPRegression', dimension=1)
# TODO:
# def test_GPRegression_poly_1d(self):
# ''' Testing the GP regression with polynomial kernel with white kernel on 1d data '''
# mlp = GPy.kern.Poly(1, degree=5)
# self.check_model(mlp, model_type='GPRegression', dimension=1)
def test_GPRegression_matern52_1D(self):
''' Testing the GP regression with matern52 kernel on 1d data '''
matern52 = GPy.kern.Matern52(1)
self.check_model(matern52, model_type='GPRegression', dimension=1)
def test_GPRegression_matern52_2D(self):
''' Testing the GP regression with matern52 kernel on 2d data '''
matern52 = GPy.kern.Matern52(2)
self.check_model(matern52, model_type='GPRegression', dimension=2)
def test_GPRegression_matern52_ARD_2D(self):
''' Testing the GP regression with matern52 kernel on 2d data '''
matern52 = GPy.kern.Matern52(2, ARD=True)
self.check_model(matern52, model_type='GPRegression', dimension=2)
def test_GPRegression_matern32_1D(self):
''' Testing the GP regression with matern32 kernel on 1d data '''
matern32 = GPy.kern.Matern32(1)
self.check_model(matern32, model_type='GPRegression', dimension=1)
def test_GPRegression_matern32_2D(self):
''' Testing the GP regression with matern32 kernel on 2d data '''
matern32 = GPy.kern.Matern32(2)
self.check_model(matern32, model_type='GPRegression', dimension=2)
def test_GPRegression_matern32_ARD_2D(self):
''' Testing the GP regression with matern32 kernel on 2d data '''
matern32 = GPy.kern.Matern32(2, ARD=True)
self.check_model(matern32, model_type='GPRegression', dimension=2)
def test_GPRegression_exponential_1D(self):
''' Testing the GP regression with exponential kernel on 1d data '''
exponential = GPy.kern.Exponential(1)
self.check_model(exponential, model_type='GPRegression', dimension=1)
def test_GPRegression_exponential_2D(self):
''' Testing the GP regression with exponential kernel on 2d data '''
exponential = GPy.kern.Exponential(2)
self.check_model(exponential, model_type='GPRegression', dimension=2)
def test_GPRegression_exponential_ARD_2D(self):
''' Testing the GP regression with exponential kernel on 2d data '''
exponential = GPy.kern.Exponential(2, ARD=True)
self.check_model(exponential, model_type='GPRegression', dimension=2)
def test_GPRegression_bias_kern_1D(self):
''' Testing the GP regression with bias kernel on 1d data '''
bias = GPy.kern.Bias(1)
self.check_model(bias, model_type='GPRegression', dimension=1)
def test_GPRegression_bias_kern_2D(self):
''' Testing the GP regression with bias kernel on 2d data '''
bias = GPy.kern.Bias(2)
self.check_model(bias, model_type='GPRegression', dimension=2)
def test_GPRegression_linear_kern_1D_ARD(self):
''' Testing the GP regression with linear kernel on 1d data '''
linear = GPy.kern.Linear(1, ARD=True)
self.check_model(linear, model_type='GPRegression', dimension=1)
def test_GPRegression_linear_kern_2D_ARD(self):
''' Testing the GP regression with linear kernel on 2d data '''
linear = GPy.kern.Linear(2, ARD=True)
self.check_model(linear, model_type='GPRegression', dimension=2)
def test_GPRegression_linear_kern_1D(self):
''' Testing the GP regression with linear kernel on 1d data '''
linear = GPy.kern.Linear(1)
self.check_model(linear, model_type='GPRegression', dimension=1)
def test_GPRegression_linear_kern_2D(self):
''' Testing the GP regression with linear kernel on 2d data '''
linear = GPy.kern.Linear(2)
self.check_model(linear, model_type='GPRegression', dimension=2)
def test_SparseGPRegression_rbf_white_kern_1d(self):
''' Testing the sparse GP regression with rbf kernel with white kernel on 1d data '''
rbf = GPy.kern.RBF(1)
self.check_model(rbf, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_white_kern_2D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data '''
rbf = GPy.kern.RBF(2)
self.check_model(rbf, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_linear_white_kern_1D(self):
''' Testing the sparse GP regression with rbf kernel on 1d data '''
rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1) + GPy.kern.White(1, 1e-5)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_linear_white_kern_2D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data '''
rbflin = GPy.kern.RBF(2) + GPy.kern.Linear(2)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_linear_white_kern_2D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear kernel on 2d data with uncertain inputs'''
rbflin = GPy.kern.RBF(2) + GPy.kern.Linear(2)
raise unittest.SkipTest("This is not implemented yet!")
self.check_model(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1)
def test_SparseGPRegression_rbf_linear_white_kern_1D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear kernel on 1d data with uncertain inputs'''
rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1)
raise unittest.SkipTest("This is not implemented yet!")
self.check_model(rbflin, model_type='SparseGPRegression', dimension=1, uncertain_inputs=1)
def test_GPLVM_rbf_bias_white_kern_2D(self):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.GPLVM(Y, input_dim, kernel=k)
self.assertTrue(m.checkgrad())
def test_BCGPLVM_rbf_bias_white_kern_2D(self):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.BCGPLVM(Y, input_dim, kernel=k)
self.assertTrue(m.checkgrad())
def test_GPLVM_rbf_linear_white_kern_2D(self):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k)
self.assertTrue(m.checkgrad())
def test_GP_EP_probit(self):
N = 20
X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None]
Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None]
kernel = GPy.kern.RBF(1)
m = GPy.models.GPClassification(X, Y, kernel=kernel)
self.assertTrue(m.checkgrad())
def test_sparse_EP_DTC_probit(self):
N = 20
X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None]
Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None]
Z = np.linspace(0, 15, 4)[:, None]
kernel = GPy.kern.RBF(1)
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, Z=Z)
self.assertTrue(m.checkgrad())
def test_multioutput_regression_1D(self):
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
X = np.vstack((X1, X2))
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.RBF(1)
m = GPy.models.GPCoregionalizedRegression(X_list=[X1, X2], Y_list=[Y1, Y2], kernel=k1)
#import ipdb;ipdb.set_trace()
#m.constrain_fixed('.*rbf_var', 1.)
self.assertTrue(m.checkgrad())
def test_multioutput_sparse_regression_1D(self):
X1 = np.random.rand(500, 1) * 8
X2 = np.random.rand(300, 1) * 5
X = np.vstack((X1, X2))
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.RBF(1)
m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1, X2], Y_list=[Y1, Y2], kernel=k1)
self.assertTrue(m.checkgrad())
def test_gp_heteroscedastic_regression(self):
num_obs = 25
X = np.random.randint(0, 140, num_obs)
X = X[:, None]
Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None]
kern = GPy.kern.Bias(1) + GPy.kern.RBF(1)
m = GPy.models.GPHeteroscedasticRegression(X, Y, kern)
self.assertTrue(m.checkgrad())
def test_sparse_gp_heteroscedastic_regression(self):
num_obs = 25
X = np.random.randint(0, 140, num_obs)
X = X[:, None]
Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None]
kern = GPy.kern.Bias(1) + GPy.kern.RBF(1)
Y_metadata = {'output_index':np.arange(num_obs)[:,None]}
noise_terms = np.unique(Y_metadata['output_index'].flatten())
likelihoods_list = [GPy.likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for j in noise_terms]
likelihood = GPy.likelihoods.MixedNoise(likelihoods_list=likelihoods_list)
m = GPy.core.SparseGP(X, Y, X[np.random.choice(num_obs, 10)],
kern, likelihood,
inference_method=GPy.inference.latent_function_inference.VarDTC(),
Y_metadata=Y_metadata)
self.assertTrue(m.checkgrad())
def test_gp_kronecker_gaussian(self):
np.random.seed(0)
N1, N2 = 30, 20
X1 = np.random.randn(N1, 1)
X2 = np.random.randn(N2, 1)
X1.sort(0); X2.sort(0)
k1 = GPy.kern.RBF(1) # + GPy.kern.White(1)
k2 = GPy.kern.RBF(1) # + GPy.kern.White(1)
Y = np.random.randn(N1, N2)
Y = Y - Y.mean(0)
Y = Y / Y.std(0)
m = GPy.models.GPKroneckerGaussianRegression(X1, X2, Y, k1, k2)
# build the model the dumb way
assert (N1 * N2 < 1000), "too much data for standard GPs!"
yy, xx = np.meshgrid(X2, X1)
Xgrid = np.vstack((xx.flatten(order='F'), yy.flatten(order='F'))).T
kg = GPy.kern.RBF(1, active_dims=[0]) * GPy.kern.RBF(1, active_dims=[1])
mm = GPy.models.GPRegression(Xgrid, Y.reshape(-1, 1, order='F'), kernel=kg)
m.randomize()
mm[:] = m[:]
self.assertTrue(np.allclose(m.log_likelihood(), mm.log_likelihood()))
self.assertTrue(np.allclose(m.gradient, mm.gradient))
X1test = np.random.randn(100, 1)
X2test = np.random.randn(100, 1)
mean1, var1 = m.predict(X1test, X2test)
yy, xx = np.meshgrid(X2test, X1test)
Xgrid = np.vstack((xx.flatten(order='F'), yy.flatten(order='F'))).T
mean2, var2 = mm.predict(Xgrid)
self.assertTrue( np.allclose(mean1, mean2) )
self.assertTrue( np.allclose(var1, var2) )
def test_gp_VGPC(self):
num_obs = 25
X = np.random.randint(0, 140, num_obs)
X = X[:, None]
Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None]
kern = GPy.kern.Bias(1) + GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.models.GPVariationalGaussianApproximation(X, Y, kernel=kern, likelihood=lik)
self.assertTrue(m.checkgrad())
if __name__ == "__main__":
print("Running unit tests, please be (very) patient...")
unittest.main()
| ptonner/GPy | GPy/testing/model_tests.py | Python | bsd-3-clause | 25,915 |
"""
Django settings for bp_mgmt project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
GIT_DIR = BASE_DIR
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Generate a new secret key by executing this in a Python-Shell:
# >>> import random, string
# >>> "".join([random.SystemRandom().choice(
# string.digits + string.ascii_letters + string.punctuation
# ) for i in range(50)])
SECRET_KEY = ' '
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'bp_cupid',
'bp_setup',
'kombu.transport.django',
'djcelery',
'actstream',
'django_ace',
'rest_framework',
'simple_history',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
)
if DEBUG:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
AUTH_LDAP_URI = ''
AUTH_LDAP_BASE_DN = ''
else:
AUTHENTICATION_BACKENDS = (
'django_auth_ldap3.backends.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
# TODO: change this accordingly to your needs:
AUTH_LDAP_URI = ''
AUTH_LDAP_BASE_DN = ''
SITE_ID = 1
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ROOT_URLCONF = 'bp_mgmt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bp_mgmt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'de-de'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-MESSAGE_TAGS
from django.contrib.messages import constants as message_constants
MESSAGE_TAGS = {
message_constants.ERROR: 'danger'
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
# Celery stuff:
BROKER_URL = 'django://'
CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level':'INFO',
},
'bp_cupid': {
'handlers': ['console'],
'level': 'DEBUG',
},
'bp_setup': {
'handlers': ['console'],
'level': 'DEBUG',
},
'django_auth_ldap3': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser'
]
}
| nerdoc/bp_mgmt | bp_mgmt/settings.py | Python | agpl-3.0 | 5,012 |
"""Initiazlization file for twopoppy"""
__all__ = ['const', 'model', 'args', 'wrapper', 'model_wrapper']
#
# get version
#
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
from .wrapper import model_wrapper
from .args import args
from . import const
from . import model
from . import wrapper
| birnstiel/two-pop-py | twopoppy/__init__.py | Python | gpl-3.0 | 436 |
import unittest
import zc.buildout.testing
class TestShellBuildout(unittest.TestCase):
def setUp(self):
zc.buildout.testing.buildoutSetup(self)
zc.buildout.install.develop('yt.recipe.shell', self)
def tearDown(self):
zc.buildout.testing.buildoutTearDown(self)
| toumorokoshi/yt.recipe.shell | yt/recipe/shell_tests.py | Python | mit | 297 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('licenses', '__first__'),
('elos', '0035_elo_description'),
]
operations = [
migrations.AddField(
model_name='elo',
name='license',
field=models.ForeignKey(related_name='elos', blank=True, to='licenses.License', null=True),
),
]
| yrchen/CommonRepo | commonrepo/elos/migrations/0036_elo_license.py | Python | apache-2.0 | 480 |
"""Support for ESPHome binary sensors."""
import logging
from typing import Optional
from aioesphomeapi import BinarySensorInfo, BinarySensorState
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import EsphomeEntity, platform_async_setup_entry
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up ESPHome binary sensors based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="binary_sensor",
info_type=BinarySensorInfo,
entity_type=EsphomeBinarySensor,
state_type=BinarySensorState,
)
class EsphomeBinarySensor(EsphomeEntity, BinarySensorDevice):
"""A binary sensor implementation for ESPHome."""
@property
def _static_info(self) -> BinarySensorInfo:
return super()._static_info
@property
def _state(self) -> Optional[BinarySensorState]:
return super()._state
@property
def is_on(self) -> Optional[bool]:
"""Return true if the binary sensor is on."""
if self._static_info.is_status_binary_sensor:
# Status binary sensors indicated connected state.
# So in their case what's usually _availability_ is now state
return self._entry_data.available
if self._state is None:
return None
if self._state.missing_state:
return None
return self._state.state
@property
def device_class(self) -> str:
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._static_info.device_class
@property
def available(self) -> bool:
"""Return True if entity is available."""
if self._static_info.is_status_binary_sensor:
return True
return super().available
| qedi-r/home-assistant | homeassistant/components/esphome/binary_sensor.py | Python | apache-2.0 | 1,893 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention-based sequence-to-sequence model with dynamic RNN support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import model
import model_helper
__all__ = ["AttentionModel"]
class AttentionModel(model.Model):
"""Sequence-to-sequence dynamic model with attention.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
(Luong et al., EMNLP'2015) paper: https://arxiv.org/pdf/1508.04025v5.pdf.
This class also allows to use GRU cells in addition to LSTM cells with
support for dropout.
"""
def __init__(self,
hparams,
mode,
features,
scope=None,
extra_args=None):
self.has_attention = hparams.attention_architecture and hparams.attention
# Set attention_mechanism_fn
if self.has_attention:
if extra_args and extra_args.attention_mechanism_fn:
self.attention_mechanism_fn = extra_args.attention_mechanism_fn
else:
self.attention_mechanism_fn = create_attention_mechanism
super(AttentionModel, self).__init__(
hparams=hparams,
mode=mode,
features=features,
scope=scope,
extra_args=extra_args)
def _prepare_beam_search_decoder_inputs(
self, beam_width, memory, source_sequence_length, encoder_state):
memory = tf.contrib.seq2seq.tile_batch(
memory, multiplier=beam_width)
source_sequence_length = tf.contrib.seq2seq.tile_batch(
source_sequence_length, multiplier=beam_width)
encoder_state = tf.contrib.seq2seq.tile_batch(
encoder_state, multiplier=beam_width)
batch_size = self.batch_size * beam_width
return memory, source_sequence_length, encoder_state, batch_size
def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,
source_sequence_length):
"""Build a RNN cell with attention mechanism that can be used by decoder."""
# No Attention
if not self.has_attention:
return super(AttentionModel, self)._build_decoder_cell(
hparams, encoder_outputs, encoder_state, source_sequence_length)
elif hparams.attention_architecture != "standard":
raise ValueError(
"Unknown attention architecture %s" % hparams.attention_architecture)
num_units = hparams.num_units
num_layers = self.num_decoder_layers
num_residual_layers = self.num_decoder_residual_layers
infer_mode = hparams.infer_mode
dtype = self.dtype
# Ensure memory is batch-major
if self.time_major:
memory = tf.transpose(encoder_outputs, [1, 0, 2])
else:
memory = encoder_outputs
if (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode == "beam_search"):
memory, source_sequence_length, encoder_state, batch_size = (
self._prepare_beam_search_decoder_inputs(
hparams.beam_width, memory, source_sequence_length,
encoder_state))
else:
batch_size = self.batch_size
# Attention
attention_mechanism = self.attention_mechanism_fn(
hparams.attention, num_units, memory, source_sequence_length, self.mode)
cell = model_helper.create_rnn_cell(
unit_type=hparams.unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=hparams.forget_bias,
dropout=hparams.dropout,
mode=self.mode,
single_cell_fn=self.single_cell_fn,
global_step=self.global_step)
# Only generate alignment in greedy INFER mode.
alignment_history = (self.mode == tf.contrib.learn.ModeKeys.INFER and
infer_mode != "beam_search")
cell = tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism,
attention_layer_size=num_units,
alignment_history=alignment_history,
output_attention=hparams.output_attention,
name="attention")
if hparams.pass_hidden_state:
decoder_initial_state = cell.zero_state(batch_size, dtype).clone(
cell_state=encoder_state)
else:
decoder_initial_state = cell.zero_state(batch_size, dtype)
return cell, decoder_initial_state
def create_attention_mechanism(attention_option, num_units, memory,
source_sequence_length, mode):
"""Create attention mechanism based on the attention_option."""
del mode # unused
score_mask_value = tf.convert_to_tensor(
tf.as_dtype(memory.dtype).as_numpy_dtype(-np.inf))
# Mechanism
if attention_option == "luong":
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units, memory, memory_sequence_length=source_sequence_length,
score_mask_value=score_mask_value)
elif attention_option == "scaled_luong":
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units,
memory,
memory_sequence_length=source_sequence_length,
score_mask_value=score_mask_value,
scale=True)
elif attention_option == "bahdanau":
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units, memory, memory_sequence_length=source_sequence_length,
score_mask_value=score_mask_value)
elif attention_option == "normed_bahdanau":
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units,
memory,
memory_sequence_length=source_sequence_length,
score_mask_value=score_mask_value,
normalize=True,
dtype=memory.dtype)
else:
raise ValueError("Unknown attention option %s" % attention_option)
return attention_mechanism
| mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/staging/models/rough/nmt/attention_model.py | Python | apache-2.0 | 6,486 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import os, gc
import sys
from . import common
from time import time, sleep
from proton import *
from .common import pump, Skipped
from proton.reactor import Reactor
from proton._compat import str2bin
# older versions of gc do not provide the garbage list
if not hasattr(gc, "garbage"):
gc.garbage=[]
# future test areas
# + different permutations of setup
# - creating deliveries and calling input/output before opening the session/link
# + shrinking output_size down to something small? should the enginge buffer?
# + resuming
# - locally and remotely created deliveries with the same tag
# Jython 2.5 needs this:
try:
bytes()
except:
bytes = str
# and this...
try:
bytearray()
except:
def bytearray(x):
return str2bin('\x00') * x
OUTPUT_SIZE = 10*1024
class Test(common.Test):
def __init__(self, *args):
common.Test.__init__(self, *args)
self._wires = []
def connection(self):
c1 = Connection()
c2 = Connection()
t1 = Transport()
t1.bind(c1)
t2 = Transport()
t2.bind(c2)
self._wires.append((c1, t1, c2, t2))
mask1 = 0
mask2 = 0
for cat in ("TRACE_FRM", "TRACE_RAW"):
trc = os.environ.get("PN_%s" % cat)
if trc and trc.lower() in ("1", "2", "yes", "true"):
mask1 = mask1 | getattr(Transport, cat)
if trc == "2":
mask2 = mask2 | getattr(Transport, cat)
t1.trace(mask1)
t2.trace(mask2)
return c1, c2
def link(self, name, max_frame=None, idle_timeout=None):
c1, c2 = self.connection()
if max_frame:
c1.transport.max_frame_size = max_frame[0]
c2.transport.max_frame_size = max_frame[1]
if idle_timeout:
# idle_timeout in seconds expressed as float
c1.transport.idle_timeout = idle_timeout[0]
c2.transport.idle_timeout = idle_timeout[1]
c1.open()
c2.open()
ssn1 = c1.session()
ssn1.open()
self.pump()
ssn2 = c2.session_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
ssn2.open()
self.pump()
snd = ssn1.sender(name)
rcv = ssn2.receiver(name)
return snd, rcv
def cleanup(self):
self._wires = []
def pump(self, buffer_size=OUTPUT_SIZE):
for c1, t1, c2, t2 in self._wires:
pump(t1, t2, buffer_size)
class ConnectionTest(Test):
def setup(self):
gc.enable()
self.c1, self.c2 = self.connection()
def cleanup(self):
# release resources created by this class
super(ConnectionTest, self).cleanup()
self.c1 = None
self.c2 = None
def teardown(self):
self.cleanup()
gc.collect()
assert not gc.garbage
def test_open_close(self):
assert self.c1.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.c1.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.c1.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.c2.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.c2.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_open_close(self):
assert self.c1.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.c1.close()
self.c2.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.c2.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_capabilities(self):
self.c1.offered_capabilities = Array(UNDESCRIBED, Data.SYMBOL,
symbol("O_one"),
symbol("O_two"),
symbol("O_three"))
self.c1.desired_capabilities = Array(UNDESCRIBED, Data.SYMBOL,
symbol("D_one"),
symbol("D_two"),
symbol("D_three"))
self.c1.open()
assert self.c2.remote_offered_capabilities is None
assert self.c2.remote_desired_capabilities is None
self.pump()
assert self.c2.remote_offered_capabilities == self.c1.offered_capabilities, \
(self.c2.remote_offered_capabilities, self.c1.offered_capabilities)
assert self.c2.remote_desired_capabilities == self.c1.desired_capabilities, \
(self.c2.remote_desired_capabilities, self.c1.desired_capabilities)
def test_condition(self):
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.c1.condition = cond
self.c1.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = self.c2.remote_condition
assert rcond == cond, (rcond, cond)
def test_properties(self, p1={symbol("key"): symbol("value")}, p2=None):
self.c1.properties = p1
self.c2.properties = p2
self.c1.open()
self.c2.open()
self.pump()
assert self.c2.remote_properties == p1, (self.c2.remote_properties, p1)
assert self.c1.remote_properties == p2, (self.c2.remote_properties, p2)
# The proton implementation limits channel_max to 32767.
# If I set the application's limit lower than that, I should
# get my wish. If I set it higher -- not.
def test_channel_max_low(self, value=1234):
self.c1.transport.channel_max = value
self.c1.open()
self.pump()
assert self.c1.transport.channel_max == value, (self.c1.transport.channel_max, value)
def test_channel_max_high(self, value=65535):
self.c1.transport.channel_max = value
self.c1.open()
self.pump()
if "java" in sys.platform:
assert self.c1.transport.channel_max == 65535, (self.c1.transport.channel_max, value)
else:
assert self.c1.transport.channel_max == 32767, (self.c1.transport.channel_max, value)
def test_channel_max_raise_and_lower(self):
if "java" in sys.platform:
upper_limit = 65535
else:
upper_limit = 32767
# It's OK to lower the max below upper_limit.
self.c1.transport.channel_max = 12345
assert self.c1.transport.channel_max == 12345
# But it won't let us raise the limit above PN_IMPL_CHANNEL_MAX.
self.c1.transport.channel_max = 65535
assert self.c1.transport.channel_max == upper_limit
# send the OPEN frame
self.c1.open()
self.pump()
# Now it's too late to make any change, because
# we have already sent the OPEN frame.
try:
self.c1.transport.channel_max = 666
assert False, "expected session exception"
except:
pass
assert self.c1.transport.channel_max == upper_limit
def test_channel_max_limits_sessions(self):
return
# This is an index -- so max number of channels should be 1.
self.c1.transport.channel_max = 0
self.c1.open()
self.c2.open()
ssn_0 = self.c2.session()
assert ssn_0 != None
ssn_0.open()
self.pump()
try:
ssn_1 = self.c2.session()
assert False, "expected session exception"
except SessionException:
pass
def test_cleanup(self):
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
t1 = self.c1.transport
t2 = self.c2.transport
c2 = self.c2
self.c1.close()
# release all references to C1, except that held by the transport
self.cleanup()
gc.collect()
# transport should flush last state from C1:
pump(t1, t2)
assert c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
def test_user_config(self):
if "java" in sys.platform:
raise Skipped("Unsupported API")
self.c1.user = "vindaloo"
self.c1.password = "secret"
self.c1.open()
self.pump()
self.c2.user = "leela"
self.c2.password = "trustno1"
self.c2.open()
self.pump()
assert self.c1.user == "vindaloo", self.c1.user
assert self.c1.password == None, self.c1.password
assert self.c2.user == "leela", self.c2.user
assert self.c2.password == None, self.c2.password
class SessionTest(Test):
def setup(self):
gc.enable()
self.c1, self.c2 = self.connection()
self.ssn = self.c1.session()
self.c1.open()
self.c2.open()
def cleanup(self):
# release resources created by this class
super(SessionTest, self).cleanup()
self.c1 = None
self.c2 = None
self.ssn = None
def teardown(self):
self.cleanup()
gc.collect()
assert not gc.garbage
def test_open_close(self):
assert self.ssn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.ssn.open()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
assert ssn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
ssn.open()
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
ssn.close()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.ssn.close()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
self.pump()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_close(self):
self.ssn.open()
self.pump()
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
ssn.open()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.ssn.close()
ssn.close()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_closing_connection(self):
self.ssn.open()
self.pump()
self.c1.close()
self.pump()
self.ssn.close()
self.pump()
def test_condition(self):
self.ssn.open()
self.pump()
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
ssn.open()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.ssn.condition = cond
self.ssn.close()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = ssn.remote_condition
assert rcond == cond, (rcond, cond)
def test_cleanup(self):
snd, rcv = self.link("test-link")
snd.open()
rcv.open()
self.pump()
snd_ssn = snd.session
rcv_ssn = rcv.session
assert rcv_ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.ssn = None
snd_ssn.close()
snd_ssn.free()
del snd_ssn
gc.collect()
self.pump()
assert rcv_ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
def test_reopen_on_same_session_without_free(self):
"""
confirm that a link is correctly opened when attaching to a previously
closed link *that has not been freed yet* on the same session
"""
self.ssn.open()
self.pump()
ssn2 = self.c2.session_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
ssn2.open()
self.pump()
snd = self.ssn.sender("test-link")
rcv = ssn2.receiver("test-link")
assert snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
snd.open()
rcv.open()
self.pump()
assert snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
snd.close()
rcv.close()
self.pump()
assert snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
snd = self.ssn.sender("test-link")
rcv = ssn2.receiver("test-link")
assert snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
snd.open()
rcv.open()
self.pump()
assert snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
def test_set_get_outgoing_window(self):
assert self.ssn.outgoing_window == 2147483647
self.ssn.outgoing_window = 1024
assert self.ssn.outgoing_window == 1024
class LinkTest(Test):
def setup(self):
gc.enable()
self.snd, self.rcv = self.link("test-link")
def cleanup(self):
# release resources created by this class
super(LinkTest, self).cleanup()
self.snd = None
self.rcv = None
def teardown(self):
self.cleanup()
gc.collect()
assert not gc.garbage, gc.garbage
def test_open_close(self):
assert self.snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.snd.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
self.rcv.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.snd.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.rcv.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_open_close(self):
assert self.snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.snd.open()
self.rcv.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.snd.close()
self.rcv.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_multiple(self):
rcv = self.snd.session.receiver("second-rcv")
assert rcv.name == "second-rcv"
self.snd.open()
rcv.open()
self.pump()
c2 = self.rcv.session.connection
l = c2.link_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
while l:
l.open()
l = l.next(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
self.pump()
assert self.snd
assert rcv
self.snd.close()
rcv.close()
ssn = rcv.session
conn = ssn.connection
ssn.close()
conn.close()
self.pump()
def test_closing_session(self):
self.snd.open()
self.rcv.open()
ssn1 = self.snd.session
self.pump()
ssn1.close()
self.pump()
self.snd.close()
self.pump()
def test_closing_connection(self):
self.snd.open()
self.rcv.open()
ssn1 = self.snd.session
c1 = ssn1.connection
self.pump()
c1.close()
self.pump()
self.snd.close()
self.pump()
def assertEqualTermini(self, t1, t2):
assert t1.type == t2.type, (t1.type, t2.type)
assert t1.address == t2.address, (t1.address, t2.address)
assert t1.durability == t2.durability, (t1.durability, t2.durability)
assert t1.expiry_policy == t2.expiry_policy, (t1.expiry_policy, t2.expiry_policy)
assert t1.timeout == t2.timeout, (t1.timeout, t2.timeout)
assert t1.dynamic == t2.dynamic, (t1.dynamic, t2.dynamic)
for attr in ["properties", "capabilities", "outcomes", "filter"]:
d1 = getattr(t1, attr)
d2 = getattr(t2, attr)
assert d1.format() == d2.format(), (attr, d1.format(), d2.format())
def _test_source_target(self, config_source, config_target):
if config_source is None:
self.snd.source.type = Terminus.UNSPECIFIED
else:
config_source(self.snd.source)
if config_target is None:
self.snd.target.type = Terminus.UNSPECIFIED
else:
config_target(self.snd.target)
self.snd.open()
self.pump()
self.assertEqualTermini(self.rcv.remote_source, self.snd.source)
self.assertEqualTermini(self.rcv.remote_target, self.snd.target)
self.rcv.target.copy(self.rcv.remote_target)
self.rcv.source.copy(self.rcv.remote_source)
self.rcv.open()
self.pump()
self.assertEqualTermini(self.snd.remote_target, self.snd.target)
self.assertEqualTermini(self.snd.remote_source, self.snd.source)
def test_source_target(self):
self._test_source_target(TerminusConfig(address="source"),
TerminusConfig(address="target"))
def test_source(self):
self._test_source_target(TerminusConfig(address="source"), None)
def test_target(self):
self._test_source_target(None, TerminusConfig(address="target"))
def test_coordinator(self):
self._test_source_target(None, TerminusConfig(type=Terminus.COORDINATOR))
def test_source_target_full(self):
self._test_source_target(TerminusConfig(address="source",
timeout=3,
dist_mode=Terminus.DIST_MODE_MOVE,
filter=[("int", 1), ("symbol", "two"), ("string", "three")],
capabilities=["one", "two", "three"]),
TerminusConfig(address="source",
timeout=7,
capabilities=[]))
def test_distribution_mode(self):
self._test_source_target(TerminusConfig(address="source",
dist_mode=Terminus.DIST_MODE_COPY),
TerminusConfig(address="target"))
assert self.rcv.remote_source.distribution_mode == Terminus.DIST_MODE_COPY
assert self.rcv.remote_target.distribution_mode == Terminus.DIST_MODE_UNSPECIFIED
def test_dynamic_link(self):
self._test_source_target(TerminusConfig(address=None, dynamic=True), None)
assert self.rcv.remote_source.dynamic
assert self.rcv.remote_source.address is None
def test_condition(self):
self.snd.open()
self.rcv.open()
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.snd.condition = cond
self.snd.close()
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = self.rcv.remote_condition
assert rcond == cond, (rcond, cond)
def test_settle_mode(self):
self.snd.snd_settle_mode = Link.SND_UNSETTLED
assert self.snd.snd_settle_mode == Link.SND_UNSETTLED
self.rcv.rcv_settle_mode = Link.RCV_SECOND
assert self.rcv.rcv_settle_mode == Link.RCV_SECOND
assert self.snd.remote_rcv_settle_mode != Link.RCV_SECOND
assert self.rcv.remote_snd_settle_mode != Link.SND_UNSETTLED
self.snd.open()
self.rcv.open()
self.pump()
assert self.snd.remote_rcv_settle_mode == Link.RCV_SECOND
assert self.rcv.remote_snd_settle_mode == Link.SND_UNSETTLED
def test_cleanup(self):
snd, rcv = self.link("test-link")
snd.open()
rcv.open()
self.pump()
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
snd.close()
snd.free()
del snd
gc.collect()
self.pump()
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
class TerminusConfig:
def __init__(self, type=None, address=None, timeout=None, durability=None,
filter=None, capabilities=None, dynamic=False, dist_mode=None):
self.address = address
self.timeout = timeout
self.durability = durability
self.filter = filter
self.capabilities = capabilities
self.dynamic = dynamic
self.dist_mode = dist_mode
self.type = type
def __call__(self, terminus):
if self.type is not None:
terminus.type = self.type
if self.address is not None:
terminus.address = self.address
if self.timeout is not None:
terminus.timeout = self.timeout
if self.durability is not None:
terminus.durability = self.durability
if self.capabilities is not None:
terminus.capabilities.put_array(False, Data.SYMBOL)
terminus.capabilities.enter()
for c in self.capabilities:
terminus.capabilities.put_symbol(c)
if self.filter is not None:
terminus.filter.put_map()
terminus.filter.enter()
for (t, v) in self.filter:
setter = getattr(terminus.filter, "put_%s" % t)
setter(v)
if self.dynamic:
terminus.dynamic = True
if self.dist_mode is not None:
terminus.distribution_mode = self.dist_mode
class TransferTest(Test):
def setup(self):
gc.enable()
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def cleanup(self):
# release resources created by this class
super(TransferTest, self).cleanup()
self.c1 = None
self.c2 = None
self.snd = None
self.rcv = None
def teardown(self):
self.cleanup()
gc.collect()
assert not gc.garbage
def test_work_queue(self):
assert self.c1.work_head is None
self.snd.delivery("tag")
assert self.c1.work_head is None
self.rcv.flow(1)
self.pump()
d = self.c1.work_head
assert d is not None
tag = d.tag
assert tag == "tag", tag
assert d.writable
n = self.snd.send(str2bin("this is a test"))
assert self.snd.advance()
assert self.c1.work_head is None
self.pump()
d = self.c2.work_head
assert d.tag == "tag"
assert d.readable
def test_multiframe(self):
self.rcv.flow(1)
self.snd.delivery("tag")
msg = str2bin("this is a test")
n = self.snd.send(msg)
assert n == len(msg)
self.pump()
d = self.rcv.current
assert d
assert d.tag == "tag", repr(d.tag)
assert d.readable
binary = self.rcv.recv(1024)
assert binary == msg, (binary, msg)
binary = self.rcv.recv(1024)
assert binary == str2bin("")
msg = str2bin("this is more")
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1024)
assert binary == msg, (binary, msg)
binary = self.rcv.recv(1024)
assert binary is None
def test_disposition(self):
self.rcv.flow(1)
self.pump()
sd = self.snd.delivery("tag")
msg = str2bin("this is a test")
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
rd = self.rcv.current
assert rd is not None
assert rd.tag == sd.tag
rmsg = self.rcv.recv(1024)
assert rmsg == msg
rd.update(Delivery.ACCEPTED)
self.pump()
rdisp = sd.remote_state
ldisp = rd.local_state
assert rdisp == ldisp == Delivery.ACCEPTED, (rdisp, ldisp)
assert sd.updated
sd.update(Delivery.ACCEPTED)
self.pump()
assert sd.local_state == rd.remote_state == Delivery.ACCEPTED
sd.settle()
def test_delivery_id_ordering(self):
self.rcv.flow(1024)
self.pump(buffer_size=64*1024)
#fill up delivery buffer on sender
for m in range(1024):
sd = self.snd.delivery("tag%s" % m)
msg = ("message %s" % m).encode('ascii')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump(buffer_size=64*1024)
#receive a session-windows worth of messages and accept them
for m in range(1024):
rd = self.rcv.current
assert rd is not None, m
assert rd.tag == ("tag%s" % m), (rd.tag, m)
msg = self.rcv.recv(1024)
assert msg == ("message %s" % m).encode('ascii'), (msg, m)
rd.update(Delivery.ACCEPTED)
rd.settle()
self.pump(buffer_size=64*1024)
#add some new deliveries
for m in range(1024, 1450):
sd = self.snd.delivery("tag%s" % m)
msg = ("message %s" % m).encode('ascii')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
#handle all disposition changes to sent messages
d = self.c1.work_head
while d:
next_d = d.work_next
if d.updated:
d.update(Delivery.ACCEPTED)
d.settle()
d = next_d
#submit some more deliveries
for m in range(1450, 1500):
sd = self.snd.delivery("tag%s" % m)
msg = ("message %s" % m).encode('ascii')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump(buffer_size=64*1024)
self.rcv.flow(1024)
self.pump(buffer_size=64*1024)
#verify remaining messages can be received and accepted
for m in range(1024, 1500):
rd = self.rcv.current
assert rd is not None, m
assert rd.tag == ("tag%s" % m), (rd.tag, m)
msg = self.rcv.recv(1024)
assert msg == ("message %s" % m).encode('ascii'), (msg, m)
rd.update(Delivery.ACCEPTED)
rd.settle()
def test_cleanup(self):
self.rcv.flow(10)
self.pump()
for x in range(10):
self.snd.delivery("tag%d" % x)
msg = str2bin("this is a test")
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.snd.close()
self.snd.free()
self.snd = None
gc.collect()
self.pump()
for x in range(10):
rd = self.rcv.current
assert rd is not None
assert rd.tag == "tag%d" % x
rmsg = self.rcv.recv(1024)
assert self.rcv.advance()
assert rmsg == msg
# close of snd should've settled:
assert rd.settled
rd.settle()
class MaxFrameTransferTest(Test):
def setup(self):
pass
def cleanup(self):
# release resources created by this class
super(MaxFrameTransferTest, self).cleanup()
self.c1 = None
self.c2 = None
self.snd = None
self.rcv = None
def teardown(self):
self.cleanup()
def message(self, size):
parts = []
for i in range(size):
parts.append(str(i))
return "/".join(parts)[:size].encode("utf-8")
def testMinFrame(self):
"""
Configure receiver to support minimum max-frame as defined by AMQP-1.0.
Verify transfer of messages larger than 512.
"""
self.snd, self.rcv = self.link("test-link", max_frame=[0,512])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
assert self.rcv.session.connection.transport.max_frame_size == 512
assert self.snd.session.connection.transport.remote_max_frame_size == 512
self.rcv.flow(1)
self.snd.delivery("tag")
msg = self.message(513)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(513)
assert binary == msg
binary = self.rcv.recv(1024)
assert binary == None
def testOddFrame(self):
"""
Test an odd sized max limit with data that will require multiple frames to
be transfered.
"""
self.snd, self.rcv = self.link("test-link", max_frame=[0,521])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
assert self.rcv.session.connection.transport.max_frame_size == 521
assert self.snd.session.connection.transport.remote_max_frame_size == 521
self.rcv.flow(2)
self.snd.delivery("tag")
msg = ("X" * 1699).encode('utf-8')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1699)
assert binary == msg
binary = self.rcv.recv(1024)
assert binary == None
self.rcv.advance()
self.snd.delivery("gat")
msg = self.message(1426)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1426)
assert binary == msg
self.pump()
binary = self.rcv.recv(1024)
assert binary == None
def testBigMessage(self):
"""
Test transfering a big message.
"""
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
self.rcv.flow(2)
self.snd.delivery("tag")
msg = self.message(1024*256)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1024*256)
assert binary == msg
binary = self.rcv.recv(1024)
assert binary == None
class IdleTimeoutTest(Test):
def setup(self):
pass
def cleanup(self):
# release resources created by this class
super(IdleTimeoutTest, self).cleanup()
self.snd = None
self.rcv = None
self.c1 = None
self.c2 = None
def teardown(self):
self.cleanup()
def message(self, size):
parts = []
for i in range(size):
parts.append(str(i))
return "/".join(parts)[:size]
def testGetSet(self):
"""
Verify the configuration and negotiation of the idle timeout.
"""
self.snd, self.rcv = self.link("test-link", idle_timeout=[1.0,2.0])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
# proton advertises 1/2 the configured timeout to the peer:
assert self.rcv.session.connection.transport.idle_timeout == 2.0
assert self.rcv.session.connection.transport.remote_idle_timeout == 0.5
assert self.snd.session.connection.transport.idle_timeout == 1.0
assert self.snd.session.connection.transport.remote_idle_timeout == 1.0
def testTimeout(self):
"""
Verify the AMQP Connection idle timeout.
"""
# snd will timeout the Connection if no frame is received within 1000 ticks
self.snd, self.rcv = self.link("test-link", idle_timeout=[1.0,0])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
t_snd = self.snd.session.connection.transport
t_rcv = self.rcv.session.connection.transport
assert t_rcv.idle_timeout == 0.0
# proton advertises 1/2 the timeout (see spec)
assert t_rcv.remote_idle_timeout == 0.5
assert t_snd.idle_timeout == 1.0
assert t_snd.remote_idle_timeout == 0.0
sndr_frames_in = t_snd.frames_input
rcvr_frames_out = t_rcv.frames_output
# at t+1msec, nothing should happen:
clock = 0.001
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.251, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# at one tick from expected idle frame send, nothing should happen:
clock = 0.250
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.251, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# this should cause rcvr to expire and send a keepalive
clock = 0.251
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.501, "deadline to send keepalive"
self.pump()
sndr_frames_in += 1
rcvr_frames_out += 1
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
assert rcvr_frames_out == t_rcv.frames_output, "unexpected frame"
# since a keepalive was received, sndr will rebase its clock against this tick:
# and the receiver should not change its deadline
clock = 0.498
assert t_snd.tick(clock) == 1.498, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.501, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# now expire sndr
clock = 1.499
t_snd.tick(clock)
self.pump()
assert self.c2.state & Endpoint.REMOTE_CLOSED
assert self.c2.remote_condition.name == "amqp:resource-limit-exceeded"
class CreditTest(Test):
def setup(self):
self.snd, self.rcv = self.link("test-link", max_frame=(16*1024, 16*1024))
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def cleanup(self):
# release resources created by this class
super(CreditTest, self).cleanup()
self.c1 = None
self.snd = None
self.c2 = None
self.rcv2 = None
self.snd2 = None
def teardown(self):
self.cleanup()
def testCreditSender(self, count=1024):
credit = self.snd.credit
assert credit == 0, credit
self.rcv.flow(10)
self.pump()
credit = self.snd.credit
assert credit == 10, credit
self.rcv.flow(count)
self.pump()
credit = self.snd.credit
assert credit == 10 + count, credit
def testCreditReceiver(self):
self.rcv.flow(10)
self.pump()
assert self.rcv.credit == 10, self.rcv.credit
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.pump()
assert self.rcv.credit == 10, self.rcv.credit
assert self.rcv.queued == 1, self.rcv.queued
c = self.rcv.current
assert c.tag == "tag", c.tag
assert self.rcv.advance()
assert self.rcv.credit == 9, self.rcv.credit
assert self.rcv.queued == 0, self.rcv.queued
def _testBufferingOnClose(self, a, b):
for i in range(10):
d = self.snd.delivery("tag-%s" % i)
assert d
d.settle()
self.pump()
assert self.snd.queued == 10
endpoints = {"connection": (self.c1, self.c2),
"session": (self.snd.session, self.rcv.session),
"link": (self.snd, self.rcv)}
local_a, remote_a = endpoints[a]
local_b, remote_b = endpoints[b]
remote_b.close()
self.pump()
assert local_b.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
local_a.close()
self.pump()
assert remote_a.state & Endpoint.REMOTE_CLOSED
assert self.snd.queued == 10
def testBufferingOnCloseLinkLink(self):
self._testBufferingOnClose("link", "link")
def testBufferingOnCloseLinkSession(self):
self._testBufferingOnClose("link", "session")
def testBufferingOnCloseLinkConnection(self):
self._testBufferingOnClose("link", "connection")
def testBufferingOnCloseSessionLink(self):
self._testBufferingOnClose("session", "link")
def testBufferingOnCloseSessionSession(self):
self._testBufferingOnClose("session", "session")
def testBufferingOnCloseSessionConnection(self):
self._testBufferingOnClose("session", "connection")
def testBufferingOnCloseConnectionLink(self):
self._testBufferingOnClose("connection", "link")
def testBufferingOnCloseConnectionSession(self):
self._testBufferingOnClose("connection", "session")
def testBufferingOnCloseConnectionConnection(self):
self._testBufferingOnClose("connection", "connection")
def testFullDrain(self):
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.drain(10)
assert self.rcv.draining()
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
assert self.rcv.draining()
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 0
assert self.rcv.draining()
self.pump()
assert self.rcv.credit == 0
assert self.snd.credit == 0
assert not self.rcv.draining()
drained = self.rcv.drained()
assert drained == 10, drained
def testPartialDrain(self):
self.rcv.drain(2)
assert self.rcv.draining()
self.pump()
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.snd.drained()
assert self.rcv.draining()
self.pump()
assert not self.rcv.draining()
c = self.rcv.current
assert self.rcv.queued == 1, self.rcv.queued
assert c.tag == d.tag, c.tag
assert self.rcv.advance()
assert not self.rcv.current
assert self.rcv.credit == 0, self.rcv.credit
assert not self.rcv.draining()
drained = self.rcv.drained()
assert drained == 1, drained
def testDrainFlow(self):
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.drain(10)
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.flow(10)
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
drained = self.rcv.drained()
assert drained == 10, drained
def testNegative(self):
assert self.snd.credit == 0
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.pump()
assert self.rcv.credit == 0
assert self.rcv.queued == 0
self.rcv.flow(1)
assert self.rcv.credit == 1
assert self.rcv.queued == 0
self.pump()
assert self.rcv.credit == 1
assert self.rcv.queued == 1, self.rcv.queued
c = self.rcv.current
assert c
assert c.tag == "tag"
assert self.rcv.advance()
assert self.rcv.credit == 0
assert self.rcv.queued == 0
def testDrainZero(self):
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.snd.drained()
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.rcv.drain(0)
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.snd.drained()
assert self.snd.credit == 0
assert self.rcv.credit == 10
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.pump()
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 10
def testDrainOrder(self):
""" Verify drain/drained works regardless of ordering. See PROTON-401
"""
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
#self.rcv.session.connection.transport.trace(Transport.TRACE_FRM)
#self.snd.session.connection.transport.trace(Transport.TRACE_FRM)
## verify that a sender that has reached the drain state will respond
## promptly to a drain issued by the peer.
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagA")
assert sd
n = self.snd.send(str2bin("A"))
assert n == 1
self.pump()
self.snd.advance()
# done sending, so signal that we are drained:
self.snd.drained()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
self.rcv.drain(0)
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
data = self.rcv.recv(10)
assert data == str2bin("A"), data
self.rcv.advance()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 9, self.rcv.credit
self.snd.drained()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 0, self.rcv.credit
# verify that a drain requested by the peer is not "acknowledged" until
# after the sender has completed sending its pending messages
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagB")
assert sd
n = self.snd.send(str2bin("B"))
assert n == 1
self.snd.advance()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
self.rcv.drain(0)
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagC")
assert sd
n = self.snd.send(str2bin("C"))
assert n == 1
self.snd.advance()
self.pump()
assert self.snd.credit == 8, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
# now that the sender has finished sending everything, it can signal
# drained
self.snd.drained()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 2, self.rcv.credit
data = self.rcv.recv(10)
assert data == str2bin("B"), data
self.rcv.advance()
data = self.rcv.recv(10)
assert data == str2bin("C"), data
self.rcv.advance()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 0, self.rcv.credit
def testPushback(self, count=10):
assert self.snd.credit == 0
assert self.rcv.credit == 0
self.rcv.flow(count)
self.pump()
for i in range(count):
d = self.snd.delivery("tag%s" % i)
assert d
self.snd.advance()
assert self.snd.queued == count
assert self.rcv.queued == 0
self.pump()
assert self.snd.queued == 0
assert self.rcv.queued == count
d = self.snd.delivery("extra")
self.snd.advance()
assert self.snd.queued == 1
assert self.rcv.queued == count
self.pump()
assert self.snd.queued == 1
assert self.rcv.queued == count
def testHeadOfLineBlocking(self):
self.snd2 = self.snd.session.sender("link-2")
self.rcv2 = self.rcv.session.receiver("link-2")
self.snd2.open()
self.rcv2.open()
self.pump()
assert self.snd2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.rcv.flow(5)
self.rcv2.flow(10)
self.pump()
assert self.snd.credit == 5
assert self.snd2.credit == 10
for i in range(10):
tag = "test %d" % i
self.snd.delivery( tag )
self.snd.send( tag.encode("ascii") )
assert self.snd.advance()
self.snd2.delivery( tag )
self.snd2.send( tag.encode("ascii") )
assert self.snd2.advance()
self.pump()
for i in range(5):
b = self.rcv.recv( 512 )
assert self.rcv.advance()
b = self.rcv2.recv( 512 )
assert self.rcv2.advance()
for i in range(5):
b = self.rcv2.recv( 512 )
assert self.rcv2.advance()
class SessionCreditTest(Test):
def teardown(self):
self.cleanup()
def testBuffering(self, count=32, size=1024, capacity=16*1024, max_frame=1024):
snd, rcv = self.link("test-link", max_frame=(max_frame, max_frame))
rcv.session.incoming_capacity = capacity
snd.open()
rcv.open()
rcv.flow(count)
self.pump()
assert count > 0
total_bytes = count * size
assert snd.session.outgoing_bytes == 0, snd.session.outgoing_bytes
assert rcv.session.incoming_bytes == 0, rcv.session.incoming_bytes
assert snd.queued == 0, snd.queued
assert rcv.queued == 0, rcv.queued
data = bytes(bytearray(size))
idx = 0
while snd.credit:
d = snd.delivery("tag%s" % idx)
assert d
n = snd.send(data)
assert n == size, (n, size)
assert snd.advance()
self.pump()
idx += 1
assert idx == count, (idx, count)
assert snd.session.outgoing_bytes < total_bytes, (snd.session.outgoing_bytes, total_bytes)
assert rcv.session.incoming_bytes < capacity, (rcv.session.incoming_bytes, capacity)
assert snd.session.outgoing_bytes + rcv.session.incoming_bytes == total_bytes, \
(snd.session.outgoing_bytes, rcv.session.incoming_bytes, total_bytes)
if snd.session.outgoing_bytes > 0:
available = rcv.session.incoming_capacity - rcv.session.incoming_bytes
assert available < max_frame, (available, max_frame)
for i in range(count):
d = rcv.current
assert d, i
pending = d.pending
before = rcv.session.incoming_bytes
assert rcv.advance()
after = rcv.session.incoming_bytes
assert before - after == pending, (before, after, pending)
snd_before = snd.session.incoming_bytes
self.pump()
snd_after = snd.session.incoming_bytes
assert rcv.session.incoming_bytes < capacity
if snd_before > 0:
assert capacity - after <= max_frame
assert snd_before > snd_after
if snd_after > 0:
available = rcv.session.incoming_capacity - rcv.session.incoming_bytes
assert available < max_frame, available
def testBufferingSize16(self):
self.testBuffering(size=16)
def testBufferingSize256(self):
self.testBuffering(size=256)
def testBufferingSize512(self):
self.testBuffering(size=512)
def testBufferingSize2048(self):
self.testBuffering(size=2048)
def testBufferingSize1025(self):
self.testBuffering(size=1025)
def testBufferingSize1023(self):
self.testBuffering(size=1023)
def testBufferingSize989(self):
self.testBuffering(size=989)
def testBufferingSize1059(self):
self.testBuffering(size=1059)
def testCreditWithBuffering(self):
snd, rcv = self.link("test-link", max_frame=(1024, 1024))
rcv.session.incoming_capacity = 64*1024
snd.open()
rcv.open()
rcv.flow(128)
self.pump()
assert snd.credit == 128, snd.credit
assert rcv.queued == 0, rcv.queued
idx = 0
while snd.credit:
d = snd.delivery("tag%s" % idx)
snd.send(("x"*1024).encode('ascii'))
assert d
assert snd.advance()
self.pump()
idx += 1
assert idx == 128, idx
assert rcv.queued < 128, rcv.queued
rcv.flow(1)
self.pump()
assert snd.credit == 1, snd.credit
class SettlementTest(Test):
def setup(self):
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def cleanup(self):
# release resources created by this class
super(SettlementTest, self).cleanup()
self.c1 = None
self.snd = None
self.c2 = None
self.rcv2 = None
self.snd2 = None
def teardown(self):
self.cleanup()
def testSettleCurrent(self):
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
d = self.snd.delivery("tag")
e = self.snd.delivery("tag2")
assert d
assert e
c = self.snd.current
assert c.tag == "tag", c.tag
c.settle()
c = self.snd.current
assert c.tag == "tag2", c.tag
c.settle()
c = self.snd.current
assert not c
self.pump()
c = self.rcv.current
assert c
assert c.tag == "tag", c.tag
assert c.settled
c.settle()
c = self.rcv.current
assert c
assert c.tag == "tag2", c.tag
assert c.settled
c.settle()
c = self.rcv.current
assert not c
def testUnsettled(self):
self.rcv.flow(10)
self.pump()
assert self.snd.unsettled == 0, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
d = self.snd.delivery("tag")
assert d
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
assert self.snd.advance()
self.pump()
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 1, self.rcv.unsettled
c = self.rcv.current
assert c
c.settle()
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
def testMultipleUnsettled(self, count=1024, size=1024):
self.rcv.flow(count)
self.pump()
assert self.snd.unsettled == 0, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
unsettled = []
for i in range(count):
sd = self.snd.delivery("tag%s" % i)
assert sd
n = self.snd.send(("x"*size).encode('ascii'))
assert n == size, n
assert self.snd.advance()
self.pump()
rd = self.rcv.current
assert rd, "did not receive delivery %s" % i
n = rd.pending
b = self.rcv.recv(n)
assert len(b) == n, (b, n)
rd.update(Delivery.ACCEPTED)
assert self.rcv.advance()
self.pump()
unsettled.append(rd)
assert self.rcv.unsettled == count
for rd in unsettled:
rd.settle()
def testMultipleUnsettled2K1K(self):
self.testMultipleUnsettled(2048, 1024)
def testMultipleUnsettled4K1K(self):
self.testMultipleUnsettled(4096, 1024)
def testMultipleUnsettled1K2K(self):
self.testMultipleUnsettled(1024, 2048)
def testMultipleUnsettled2K2K(self):
self.testMultipleUnsettled(2048, 2048)
def testMultipleUnsettled4K2K(self):
self.testMultipleUnsettled(4096, 2048)
class PipelineTest(Test):
def setup(self):
self.c1, self.c2 = self.connection()
def cleanup(self):
# release resources created by this class
super(PipelineTest, self).cleanup()
self.c1 = None
self.c2 = None
def teardown(self):
self.cleanup()
def test(self):
ssn = self.c1.session()
snd = ssn.sender("sender")
self.c1.open()
ssn.open()
snd.open()
for i in range(10):
d = snd.delivery("delivery-%s" % i)
snd.send(str2bin("delivery-%s" % i))
d.settle()
snd.close()
ssn.close()
self.c1.close()
self.pump()
state = self.c2.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
ssn2 = self.c2.session_head(Endpoint.LOCAL_UNINIT)
assert ssn2
state == ssn2.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
rcv = self.c2.link_head(Endpoint.LOCAL_UNINIT)
assert rcv
state = rcv.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
self.c2.open()
ssn2.open()
rcv.open()
rcv.flow(10)
assert rcv.queued == 0, rcv.queued
self.pump()
assert rcv.queued == 10, rcv.queued
state = rcv.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
state = ssn2.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
state = self.c2.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
for i in range(rcv.queued):
d = rcv.current
assert d
assert d.tag == "delivery-%s" % i
d.settle()
assert rcv.queued == 0, rcv.queued
class ServerTest(Test):
def testKeepalive(self):
""" Verify that idle frames are sent to keep a Connection alive
"""
if "java" in sys.platform:
raise Skipped()
idle_timeout = self.delay
server = common.TestServer()
server.start()
class Program:
def on_reactor_init(self, event):
self.conn = event.reactor.connection()
self.conn.hostname = "%s:%s" % (server.host, server.port)
self.conn.open()
self.old_count = None
event.reactor.schedule(3 * idle_timeout, self)
def on_connection_bound(self, event):
event.transport.idle_timeout = idle_timeout
def on_connection_remote_open(self, event):
self.old_count = event.transport.frames_input
def on_timer_task(self, event):
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection terminated"
assert self.conn.transport.frames_input > self.old_count, "No idle frames received"
self.conn.close()
Reactor(Program()).run()
server.stop()
def testIdleTimeout(self):
""" Verify that a Connection is terminated properly when Idle frames do not
arrive in a timely manner.
"""
if "java" in sys.platform:
raise Skipped()
idle_timeout = self.delay
server = common.TestServer(idle_timeout=idle_timeout)
server.start()
class Program:
def on_reactor_init(self, event):
self.conn = event.reactor.connection()
self.conn.hostname = "%s:%s" % (server.host, server.port)
self.conn.open()
self.remote_condition = None
self.old_count = None
# verify the connection stays up even if we don't explicitly send stuff
# wait up to 3x the idle timeout
event.reactor.schedule(3 * idle_timeout, self)
def on_connection_bound(self, event):
self.transport = event.transport
def on_connection_remote_open(self, event):
self.old_count = event.transport.frames_output
def on_connection_remote_close(self, event):
assert self.conn.remote_condition
assert self.conn.remote_condition.name == "amqp:resource-limit-exceeded"
self.remote_condition = self.conn.remote_condition
def on_timer_task(self, event):
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection terminated"
assert self.conn.transport.frames_output > self.old_count, "No idle frames sent"
# now wait to explicitly cause the other side to expire:
sleep(3 * idle_timeout)
p = Program()
Reactor(p).run()
assert p.remote_condition
assert p.remote_condition.name == "amqp:resource-limit-exceeded"
server.stop()
class NoValue:
def __init__(self):
pass
def apply(self, dlv):
pass
def check(self, dlv):
assert dlv.data == None
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class RejectValue:
def __init__(self, condition):
self.condition = condition
def apply(self, dlv):
dlv.condition = self.condition
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == self.condition, (dlv.condition, self.condition)
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class ReceivedValue:
def __init__(self, section_number, section_offset):
self.section_number = section_number
self.section_offset = section_offset
def apply(self, dlv):
dlv.section_number = self.section_number
dlv.section_offset = self.section_offset
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == self.section_number, (dlv.section_number, self.section_number)
assert dlv.section_offset == self.section_offset
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class ModifiedValue:
def __init__(self, failed, undeliverable, annotations):
self.failed = failed
self.undeliverable = undeliverable
self.annotations = annotations
def apply(self, dlv):
dlv.failed = self.failed
dlv.undeliverable = self.undeliverable
dlv.annotations = self.annotations
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == self.failed
assert dlv.undeliverable == self.undeliverable
assert dlv.annotations == self.annotations, (dlv.annotations, self.annotations)
class CustomValue:
def __init__(self, data):
self.data = data
def apply(self, dlv):
dlv.data = self.data
def check(self, dlv):
assert dlv.data == self.data, (dlv.data, self.data)
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class DeliveryTest(Test):
def teardown(self):
self.cleanup()
def testDisposition(self, count=1, tag="tag%i", type=Delivery.ACCEPTED, value=NoValue()):
snd, rcv = self.link("test-link")
snd.open()
rcv.open()
snd_deliveries = []
for i in range(count):
d = snd.delivery(tag % i)
snd_deliveries.append(d)
snd.advance()
rcv.flow(count)
self.pump()
rcv_deliveries = []
for i in range(count):
d = rcv.current
assert d.tag == (tag % i)
rcv_deliveries.append(d)
rcv.advance()
for d in rcv_deliveries:
value.apply(d.local)
d.update(type)
self.pump()
for d in snd_deliveries:
assert d.remote_state == type
assert d.remote.type == type
value.check(d.remote)
value.apply(d.local)
d.update(type)
self.pump()
for d in rcv_deliveries:
assert d.remote_state == type
assert d.remote.type == type
value.check(d.remote)
for d in snd_deliveries:
d.settle()
self.pump()
for d in rcv_deliveries:
assert d.settled, d.settled
d.settle()
def testReceived(self):
self.testDisposition(type=Disposition.RECEIVED, value=ReceivedValue(1, 2))
def testRejected(self):
self.testDisposition(type=Disposition.REJECTED, value=RejectValue(Condition(symbol("foo"))))
def testReleased(self):
self.testDisposition(type=Disposition.RELEASED)
def testModified(self):
self.testDisposition(type=Disposition.MODIFIED,
value=ModifiedValue(failed=True, undeliverable=True,
annotations={"key": "value"}))
def testCustom(self):
self.testDisposition(type=0x12345, value=CustomValue([1, 2, 3]))
class CollectorTest(Test):
def setup(self):
self.collector = Collector()
def drain(self):
result = []
while True:
e = self.collector.peek()
if e:
result.append(e)
self.collector.pop()
else:
break
return result
def expect(self, *types):
return self.expect_oneof(types)
def expect_oneof(self, *sequences):
events = self.drain()
types = tuple([e.type for e in events])
for alternative in sequences:
if types == alternative:
if len(events) == 1:
return events[0]
elif len(events) > 1:
return events
else:
return
assert False, "actual events %s did not match any of the expected sequences: %s" % (events, sequences)
def expect_until(self, *types):
events = self.drain()
etypes = tuple([e.type for e in events[-len(types):]])
assert etypes == types, "actual events %s did not end in expect sequence: %s" % (events, types)
class EventTest(CollectorTest):
def teardown(self):
self.cleanup()
def testEndpointEvents(self):
c1, c2 = self.connection()
c1.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
self.pump()
self.expect()
c2.open()
self.pump()
self.expect(Event.CONNECTION_REMOTE_OPEN)
self.pump()
self.expect()
ssn = c2.session()
snd = ssn.sender("sender")
ssn.open()
snd.open()
self.expect()
self.pump()
self.expect(Event.SESSION_INIT, Event.SESSION_REMOTE_OPEN,
Event.LINK_INIT, Event.LINK_REMOTE_OPEN)
c1.open()
ssn2 = c1.session()
ssn2.open()
rcv = ssn2.receiver("receiver")
rcv.open()
self.pump()
self.expect(Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT,
Event.SESSION_INIT, Event.SESSION_LOCAL_OPEN,
Event.TRANSPORT, Event.LINK_INIT, Event.LINK_LOCAL_OPEN,
Event.TRANSPORT)
rcv.close()
self.expect(Event.LINK_LOCAL_CLOSE, Event.TRANSPORT)
self.pump()
rcv.free()
del rcv
self.expect(Event.LINK_FINAL)
ssn2.free()
del ssn2
self.pump()
c1.free()
c1.transport.unbind()
self.expect_oneof((Event.SESSION_FINAL, Event.LINK_FINAL, Event.SESSION_FINAL,
Event.CONNECTION_UNBOUND, Event.CONNECTION_FINAL),
(Event.CONNECTION_UNBOUND, Event.SESSION_FINAL, Event.LINK_FINAL,
Event.SESSION_FINAL, Event.CONNECTION_FINAL))
def testConnectionINIT_FINAL(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
c.free()
self.expect(Event.CONNECTION_FINAL)
def testSessionINIT_FINAL(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
s = c.session()
self.expect(Event.SESSION_INIT)
s.free()
self.expect(Event.SESSION_FINAL)
c.free()
self.expect(Event.CONNECTION_FINAL)
def testLinkINIT_FINAL(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
s = c.session()
self.expect(Event.SESSION_INIT)
r = s.receiver("asdf")
self.expect(Event.LINK_INIT)
r.free()
self.expect(Event.LINK_FINAL)
c.free()
self.expect(Event.SESSION_FINAL, Event.CONNECTION_FINAL)
def testFlowEvents(self):
snd, rcv = self.link("test-link")
snd.session.connection.collect(self.collector)
rcv.open()
rcv.flow(10)
self.pump()
self.expect(Event.CONNECTION_INIT, Event.SESSION_INIT,
Event.LINK_INIT, Event.LINK_REMOTE_OPEN, Event.LINK_FLOW)
rcv.flow(10)
self.pump()
self.expect(Event.LINK_FLOW)
return snd, rcv
def testDeliveryEvents(self):
snd, rcv = self.link("test-link")
rcv.session.connection.collect(self.collector)
rcv.open()
rcv.flow(10)
self.pump()
self.expect(Event.CONNECTION_INIT, Event.SESSION_INIT,
Event.LINK_INIT, Event.LINK_LOCAL_OPEN, Event.TRANSPORT)
snd.delivery("delivery")
snd.send(str2bin("Hello World!"))
snd.advance()
self.pump()
self.expect()
snd.open()
self.pump()
self.expect(Event.LINK_REMOTE_OPEN, Event.DELIVERY)
rcv.session.connection.transport.unbind()
rcv.session.connection.free()
self.expect(Event.CONNECTION_UNBOUND, Event.TRANSPORT, Event.LINK_FINAL,
Event.SESSION_FINAL, Event.CONNECTION_FINAL)
def testDeliveryEventsDisp(self):
snd, rcv = self.testFlowEvents()
snd.open()
dlv = snd.delivery("delivery")
snd.send(str2bin("Hello World!"))
assert snd.advance()
self.expect(Event.LINK_LOCAL_OPEN, Event.TRANSPORT)
self.pump()
self.expect(Event.LINK_FLOW)
rdlv = rcv.current
assert rdlv != None
assert rdlv.tag == "delivery"
rdlv.update(Delivery.ACCEPTED)
self.pump()
event = self.expect(Event.DELIVERY)
assert event.context == dlv, (dlv, event.context)
def testConnectionBOUND_UNBOUND(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
t = Transport()
t.bind(c)
self.expect(Event.CONNECTION_BOUND)
t.unbind()
self.expect(Event.CONNECTION_UNBOUND, Event.TRANSPORT)
def testTransportERROR_CLOSE(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
t = Transport()
t.bind(c)
self.expect(Event.CONNECTION_BOUND)
assert t.condition is None
t.push(str2bin("asdf"))
self.expect(Event.TRANSPORT_ERROR, Event.TRANSPORT_TAIL_CLOSED)
assert t.condition is not None
assert t.condition.name == "amqp:connection:framing-error"
assert "AMQP header mismatch" in t.condition.description
p = t.pending()
assert p > 0
t.pop(p)
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.TRANSPORT_CLOSED)
def testTransportCLOSED(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
t = Transport()
t.bind(c)
c.open()
self.expect(Event.CONNECTION_BOUND, Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT)
c2 = Connection()
t2 = Transport()
t2.bind(c2)
c2.open()
c2.close()
pump(t, t2)
self.expect(Event.CONNECTION_REMOTE_OPEN, Event.CONNECTION_REMOTE_CLOSE,
Event.TRANSPORT_TAIL_CLOSED)
c.close()
pump(t, t2)
self.expect(Event.CONNECTION_LOCAL_CLOSE, Event.TRANSPORT,
Event.TRANSPORT_HEAD_CLOSED, Event.TRANSPORT_CLOSED)
def testLinkDetach(self):
c1 = Connection()
c1.collect(self.collector)
t1 = Transport()
t1.bind(c1)
c1.open()
s1 = c1.session()
s1.open()
l1 = s1.sender("asdf")
l1.open()
l1.detach()
self.expect_until(Event.LINK_LOCAL_DETACH, Event.TRANSPORT)
c2 = Connection()
c2.collect(self.collector)
t2 = Transport()
t2.bind(c2)
pump(t1, t2)
self.expect_until(Event.LINK_REMOTE_DETACH)
class PeerTest(CollectorTest):
def setup(self):
CollectorTest.setup(self)
self.connection = Connection()
self.connection.collect(self.collector)
self.transport = Transport()
self.transport.bind(self.connection)
self.peer = Connection()
self.peer_transport = Transport()
self.peer_transport.bind(self.peer)
self.peer_transport.trace(Transport.TRACE_OFF)
def pump(self):
pump(self.transport, self.peer_transport)
class TeardownLeakTest(PeerTest):
def doLeak(self, local, remote):
self.connection.open()
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND,
Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT)
ssn = self.connection.session()
ssn.open()
self.expect(Event.SESSION_INIT, Event.SESSION_LOCAL_OPEN, Event.TRANSPORT)
snd = ssn.sender("sender")
snd.open()
self.expect(Event.LINK_INIT, Event.LINK_LOCAL_OPEN, Event.TRANSPORT)
self.pump()
self.peer.open()
self.peer.session_head(0).open()
self.peer.link_head(0).open()
self.pump()
self.expect_oneof((Event.CONNECTION_REMOTE_OPEN, Event.SESSION_REMOTE_OPEN,
Event.LINK_REMOTE_OPEN, Event.LINK_FLOW),
(Event.CONNECTION_REMOTE_OPEN, Event.SESSION_REMOTE_OPEN,
Event.LINK_REMOTE_OPEN))
if local:
snd.close() # ha!!
self.expect(Event.LINK_LOCAL_CLOSE, Event.TRANSPORT)
ssn.close()
self.expect(Event.SESSION_LOCAL_CLOSE, Event.TRANSPORT)
self.connection.close()
self.expect(Event.CONNECTION_LOCAL_CLOSE, Event.TRANSPORT)
if remote:
self.peer.link_head(0).close() # ha!!
self.peer.session_head(0).close()
self.peer.close()
self.pump()
if remote:
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.LINK_REMOTE_CLOSE,
Event.SESSION_REMOTE_CLOSE, Event.CONNECTION_REMOTE_CLOSE,
Event.TRANSPORT_TAIL_CLOSED, Event.TRANSPORT_CLOSED)
else:
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.SESSION_REMOTE_CLOSE,
Event.CONNECTION_REMOTE_CLOSE, Event.TRANSPORT_TAIL_CLOSED,
Event.TRANSPORT_CLOSED)
self.connection.free()
self.expect(Event.LINK_FINAL, Event.SESSION_FINAL)
self.transport.unbind()
self.expect(Event.CONNECTION_UNBOUND, Event.CONNECTION_FINAL)
def testLocalRemoteLeak(self):
self.doLeak(True, True)
def testLocalLeak(self):
self.doLeak(True, False)
def testRemoteLeak(self):
self.doLeak(False, True)
def testLeak(self):
self.doLeak(False, False)
class IdleTimeoutEventTest(PeerTest):
def half_pump(self):
p = self.transport.pending()
if p>0:
self.transport.pop(p)
def testTimeoutWithZombieServer(self, expectOpenCloseFrames=True):
self.transport.idle_timeout = self.delay
self.connection.open()
self.half_pump()
self.transport.tick(time())
sleep(self.delay*2)
self.transport.tick(time())
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND,
Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT,
Event.TRANSPORT_ERROR, Event.TRANSPORT_TAIL_CLOSED)
assert self.transport.capacity() < 0
if expectOpenCloseFrames:
assert self.transport.pending() > 0
self.half_pump()
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.TRANSPORT_CLOSED)
assert self.transport.pending() < 0
def testTimeoutWithZombieServerAndSASL(self):
sasl = self.transport.sasl()
self.testTimeoutWithZombieServer(expectOpenCloseFrames=False)
class DeliverySegFaultTest(Test):
def testDeliveryAfterUnbind(self):
conn = Connection()
t = Transport()
ssn = conn.session()
snd = ssn.sender("sender")
dlv = snd.delivery("tag")
dlv.settle()
del dlv
t.bind(conn)
t.unbind()
dlv = snd.delivery("tag")
class SaslEventTest(CollectorTest):
def testAnonymousNoInitialResponse(self):
if "java" in sys.platform:
raise Skipped()
conn = Connection()
conn.collect(self.collector)
transport = Transport(Transport.SERVER)
transport.bind(conn)
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND)
transport.push(str2bin('AMQP\x03\x01\x00\x00\x00\x00\x00 \x02\x01\x00\x00\x00SA'
'\xd0\x00\x00\x00\x10\x00\x00\x00\x02\xa3\tANONYMOUS@'
'AMQP\x00\x01\x00\x00'))
self.expect(Event.TRANSPORT)
for i in range(1024):
p = transport.pending()
self.drain()
p = transport.pending()
self.expect()
def testPipelinedServerReadFirst(self):
if "java" in sys.platform:
raise Skipped()
conn = Connection()
conn.collect(self.collector)
transport = Transport(Transport.CLIENT)
s = transport.sasl()
s.allowed_mechs("ANONYMOUS PLAIN")
transport.bind(conn)
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND)
transport.push(str2bin('AMQP\x03\x01\x00\x00\x00\x00\x00\x1c\x02\x01\x00\x00\x00S@'
'\xc0\x0f\x01\xe0\x0c\x01\xa3\tANONYMOUS\x00\x00\x00\x10'
'\x02\x01\x00\x00\x00SD\xc0\x03\x01P\x00AMQP\x00\x01\x00'
'\x00'))
self.expect(Event.TRANSPORT)
p = transport.pending()
bytes = transport.peek(p)
transport.pop(p)
server = Transport(Transport.SERVER)
server.push(bytes)
assert server.sasl().outcome == SASL.OK
def testPipelinedServerWriteFirst(self):
if "java" in sys.platform:
raise Skipped()
conn = Connection()
conn.collect(self.collector)
transport = Transport(Transport.CLIENT)
s = transport.sasl()
s.allowed_mechs("ANONYMOUS")
transport.bind(conn)
p = transport.pending()
bytes = transport.peek(p)
transport.pop(p)
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND, Event.TRANSPORT)
transport.push(str2bin('AMQP\x03\x01\x00\x00\x00\x00\x00\x1c\x02\x01\x00\x00\x00S@'
'\xc0\x0f\x01\xe0\x0c\x01\xa3\tANONYMOUS\x00\x00\x00\x10'
'\x02\x01\x00\x00\x00SD\xc0\x03\x01P\x00AMQP\x00\x01\x00'
'\x00'))
self.expect(Event.TRANSPORT)
p = transport.pending()
bytes = transport.peek(p)
transport.pop(p)
# XXX: the bytes above appear to be correct, but we don't get any
# sort of event indicating that the transport is authenticated
| clemensv/qpid-proton | tests/python/proton_tests/engine.py | Python | apache-2.0 | 75,989 |
from app import app
from app.litlink import API, page_meta
from flask import render_template, jsonify, request
'''
The site's index page.
'''
@app.route('/')
def index():
page = page_meta(title = 'Home', scripts = ['litlink.js'])
return render_template(
'index.html',
page = page
)
'''
Takes a link provided in the POST body of the request and returns a URL for it.
'''
@app.route('/shorten', methods = ['POST'])
def shorten():
if not request.form['link']:
response, status = API.error(1)
else:
response, status = {'url' : 'Hello world'}, 200
return jsonify(response), status | p810/litlink | app/routes.py | Python | gpl-2.0 | 608 |
# -*- coding: utf-8 -*-
'''
These preprocessing utils would greatly benefit
from a fast Cython rewrite.
'''
from __future__ import absolute_import
import string, sys
import numpy as np
from six.moves import range
from six.moves import zip
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
def base_filter():
f = string.punctuation
f = f.replace("'", '')
f += '\t\n'
return f
def text_to_word_sequence(text, filters=base_filter(), lower=True, split=" "):
'''prune: sequence of characters to filter out
'''
if lower:
text = text.lower()
text = text.translate(maketrans(filters, split*len(filters)))
seq = text.split(split)
return [_f for _f in seq if _f]
def one_hot(text, n, filters=base_filter(), lower=True, split=" "):
seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split)
return [(abs(hash(w))%(n-1)+1) for w in seq]
class Tokenizer(object):
def __init__(self, nb_words=None, filters=base_filter(), lower=True, split=" "):
self.word_counts = {}
self.word_docs = {}
self.filters = filters
self.split = split
self.lower = lower
self.nb_words = nb_words
self.document_count = 0
def fit_on_texts(self, texts):
'''
required before using texts_to_sequences or texts_to_matrix
@param texts: can be a list or a generator (for memory-efficiency)
'''
self.document_count = 0
for text in texts:
self.document_count += 1
seq = text_to_word_sequence(text, self.filters, self.lower, self.split)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
if w in self.word_docs:
self.word_docs[w] += 1
else:
self.word_docs[w] = 1
wcounts = list(self.word_counts.items())
wcounts.sort(key = lambda x: x[1], reverse=True)
sorted_voc = [wc[0] for wc in wcounts]
self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc)+1)))))
self.index_docs = {}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def fit_on_sequences(self, sequences):
'''
required before using sequences_to_matrix
(if fit_on_texts was never called)
'''
self.document_count = len(sequences)
self.index_docs = {}
for seq in sequences:
seq = set(seq)
for i in seq:
if i not in self.index_docs:
self.index_docs[i] = 1
else:
self.index_docs[i] += 1
def texts_to_sequences(self, texts):
'''
Transform each text in texts in a sequence of integers.
Only top "nb_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Returns a list of sequences.
'''
res = []
for vect in self.texts_to_sequences_generator(texts):
res.append(vect)
return res
def texts_to_sequences_generator(self, texts):
'''
Transform each text in texts in a sequence of integers.
Only top "nb_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
Yields individual sequences.
'''
nb_words = self.nb_words
for text in texts:
seq = text_to_word_sequence(text, self.filters, self.lower, self.split)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if nb_words and i >= nb_words:
pass
else:
vect.append(i)
yield vect
def texts_to_matrix(self, texts, mode="binary"):
'''
modes: binary, count, tfidf, freq
'''
sequences = self.texts_to_sequences(texts)
return self.sequences_to_matrix(sequences, mode=mode)
def sequences_to_matrix(self, sequences, mode="binary"):
'''
modes: binary, count, tfidf, freq
'''
if not self.nb_words:
if self.word_index:
nb_words = len(self.word_index)
else:
raise Exception("Specify a dimension (nb_words argument), or fit on some text data first")
else:
nb_words = self.nb_words
if mode == "tfidf" and not self.document_count:
raise Exception("Fit the Tokenizer on some data before using tfidf mode")
X = np.zeros((len(sequences), nb_words))
for i, seq in enumerate(sequences):
if not seq:
pass
counts = {}
for j in seq:
if j >= nb_words:
pass
if j not in counts:
counts[j] = 1.
else:
counts[j] += 1
for j, c in list(counts.items()):
if mode == "count":
X[i][j] = c
elif mode == "freq":
X[i][j] = c/len(seq)
elif mode == "binary":
X[i][j] = 1
elif mode == "tfidf":
tf = np.log(c/len(seq))
df = (1 + np.log(1 + self.index_docs.get(j, 0)/(1 + self.document_count)))
X[i][j] = tf / df
else:
raise Exception("Unknown vectorization mode: " + str(mode))
return X
| zhangxujinsh/keras | keras/preprocessing/text.py | Python | mit | 5,920 |
"""Map file definitions for postfix."""
from modoboa.core.commands.postfix_maps import registry
class RelayDomainsMap(object):
"""Map file to list all relay domains."""
filename = "sql-relaydomains.cf"
mysql = (
"SELECT name FROM postfix_relay_domains_relaydomain "
"WHERE name='%s' AND enabled=1"
)
postgres = (
"SELECT name FROM postfix_relay_domains_relaydomain "
"WHERE name='%s' AND enabled"
)
sqlite = (
"SELECT name FROM postfix_relay_domains_relaydomain "
"WHERE name='%s' AND enabled=1"
)
class RelayDomainsTransportMap(object):
"""A transport map for relay domains."""
filename = "sql-relaydomains-transport.cf"
mysql = (
"SELECT CONCAT(srv.name, ':[', rdom.target_host, ']') "
"FROM postfix_relay_domains_service AS srv "
"INNER JOIN postfix_relay_domains_relaydomain AS rdom "
"ON rdom.service_id=srv.id WHERE rdom.enabled=1 AND rdom.name='%s'"
)
postgres = (
"SELECT srv.name || ':[' || rdom.target_host || ']' "
"FROM postfix_relay_domains_service AS srv "
"INNER JOIN postfix_relay_domains_relaydomain AS rdom "
"ON rdom.service_id=srv.id WHERE rdom.enabled AND rdom.name='%s'"
)
sqlite = (
"SELECT srv.name || ':[' || rdom.target_host || ']' "
"FROM postfix_relay_domains_service AS srv "
"INNER JOIN postfix_relay_domains_relaydomain AS rdom "
"ON rdom.service_id=srv.id WHERE rdom.enabled=1 AND rdom.name='%s'"
)
class RelayDomainAliasesTransportMap(object):
"""A transport map for relay domain aliases."""
filename = "sql-relaydomain-aliases-transport.cf"
mysql = (
"SELECT CONCAT(srv.name, ':[', rdom.target_host, ']') "
"FROM postfix_relay_domains_service AS srv "
"INNER JOIN postfix_relay_domains_relaydomain AS rdom "
"ON rdom.service_id=srv.id "
"INNER JOIN postfix_relay_domains_relaydomainalias AS rdomalias "
"ON rdom.id=rdomalias.target_id WHERE rdom.enabled=1 "
"AND rdomalias.enabled=1 AND rdomalias.name='%s'"
)
postgres = (
"SELECT srv.name || ':[' || rdom.target_host || ']' "
"FROM postfix_relay_domains_service AS srv "
"INNER JOIN postfix_relay_domains_relaydomain AS rdom "
"ON rdom.service_id=srv.id "
"INNER JOIN postfix_relay_domains_relaydomainalias AS rdomalias "
"ON rdom.id=rdomalias.target_id WHERE rdom.enabled "
"AND rdomalias.enabled AND rdomalias.name='%s'"
)
sqlite = (
"SELECT srv.name || ':[' || rdom.target_host || ']' "
"FROM postfix_relay_domains_service AS srv "
"INNER JOIN postfix_relay_domains_relaydomain AS rdom "
"ON rdom.service_id=srv.id "
"INNER JOIN postfix_relay_domains_relaydomainalias AS rdomalias "
"ON rdom.id=rdomalias.target_id WHERE rdom.enabled=1 "
"AND rdomalias.enabled=1 AND rdomalias.name='%s'"
)
class RelayRecipientVerification(object):
"""A map file to enable recipient verification."""
filename = "sql-relay-recipient-verification.cf"
mysql = (
"SELECT 'reject_unverified_recipient' "
"FROM postfix_relay_domains_relaydomain "
"WHERE verify_recipients=1 AND name='%d'"
)
postgres = (
"SELECT 'reject_unverified_recipient' "
"FROM postfix_relay_domains_relaydomain "
"WHERE verify_recipients AND name='%d'"
)
sqlite = (
"SELECT 'reject_unverified_recipient' "
"FROM postfix_relay_domains_relaydomain "
"WHERE verify_recipients=1 AND name='%d'"
)
registry.add_files([
RelayDomainsMap, RelayDomainsTransportMap, RelayDomainAliasesTransportMap,
RelayRecipientVerification
])
| disko/modoboa-admin-relaydomains | modoboa_admin_relaydomains/postfix_maps.py | Python | mit | 3,795 |
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from cinder.openstack.common import log as logging
from cinder.volume import configuration
brcd_zone_opts = [
cfg.StrOpt('fc_fabric_address',
default='',
help='Management IP of fabric'),
cfg.StrOpt('fc_fabric_user',
default='',
help='Fabric user ID'),
cfg.StrOpt('fc_fabric_password',
default='',
help='Password for user',
secret=True),
cfg.IntOpt('fc_fabric_port',
default=22,
help='Connecting port'),
cfg.StrOpt('zoning_policy',
default='initiator-target',
help='overridden zoning policy'),
cfg.BoolOpt('zone_activate',
default=True,
help='overridden zoning activation state'),
cfg.StrOpt('zone_name_prefix',
default=None,
help='overridden zone name prefix'),
cfg.StrOpt('principal_switch_wwn',
default=None,
help='Principal switch WWN of the fabric'),
]
CONF = cfg.CONF
CONF.register_opts(brcd_zone_opts, 'BRCD_FABRIC_EXAMPLE')
LOG = logging.getLogger(__name__)
def load_fabric_configurations(fabric_names):
fabric_configs = {}
for fabric_name in fabric_names:
config = configuration.Configuration(brcd_zone_opts, fabric_name)
LOG.debug("Loaded FC fabric config %s" % fabric_name)
fabric_configs[fabric_name] = config
return fabric_configs
| Akrog/cinder | cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py | Python | apache-2.0 | 2,207 |
"""
A simple file-system like interface that supports
both the regular filesystem and zipfiles
"""
__all__ = ('FileIO', 'ReadOnlyIO')
import os, time, zipfile
class FileIO (object):
"""
A simple interface that makes it possible
to write simple filesystem structures using
the interface that's exposed by the zipfile
module.
"""
def __init__(self, prefix):
self.prefix = prefix
def writestr(self, path, data):
"""
Write 'data' into file at 'path',
using read-only file permissions.
"""
while path.startswith('/'):
path = path[1:]
fname = os.join(self.prefix, path)
dirname = os.path.dirname(fname)
if not os.path.exists(fname):
os.makedirs(fname, mode=0755)
fp = open(fname, 'wb')
fp.write(data)
fp.close()
os.chmod(fname, 0444)
class ReadOnlyIO (object):
"""
A minimal read-only interface to the filesystem.
This interface transparently deals with zipfiles
(that is, ``io.read('/foo.zip/bar')`` extracts
the contents of ``bar`` from the zipfile.
This interface is designed to be useful for py2app
and is not intended to be fast or generally useful.
"""
def read(self, path):
"""
Return the contents of ``path``
"""
zf, zp = self._zippath(path)
if zf is None:
fp = open(path, 'rb')
data = fp.read()
fp.close()
return data
else:
zf = zipfile.ZipFile(zf, 'r')
return zf.read(zp)
def get_mtime(self, path):
"""
Return the ``mtime`` attribute of ``path``.
"""
zf, zp = self._zippath(path)
if zf is None:
return os.stat(path).st_mtime
else:
zf = zipfile.ZipFile(zf)
info = zf.getinfo(zp)
return time.mktime(info.date_time + (0, 0, 0))
def exists(self, path):
"""
Return True if ``path`` exists
"""
return self.is_file(path) or self.is_dir(path) or self.is_symlink(path)
def is_dir(self, path):
"""
Return True if ``path`` exists and is a directory
"""
zf, zp = self._zippath(path, strict=False)
if zf is None:
return os.path.isdir(path)
return bool(listdir(path))
def is_symlink(self, path):
"""
Return True if ``path`` exists and is a symbolic link
"""
zf, zp = self._zippath(path, strict=False)
if zf is not None:
return False
return os.path.islink(path)
def readlink(self, path):
zf, zp = self._zippath(path)
if zf is None:
return os.readlink(path)
raise IOError("%r is not a symlink"%(path,))
def is_file(self, path):
"""
Return True if ``path`` exists and is a regular file
"""
try:
zf, zp = self._zippath(self, path, strict=True)
except IOError:
return False
if zf is None:
return os.path.isdir(path)
else:
# 'strict==True' hence the object must
# exist in the zipfile and should therefore
# be a file and not a directory or link.
return True
def listdir(self, path):
"""
Return the contents of directory at ``path``.
NOTE: if ``path`` is in a zipfile this will
not raise an error if the directory does not
exist.
"""
zf, zp = self._zippath(path, strict=False)
if zf is None:
return os.listdir(path)
else:
_zf = zf
zf = zipfile.ZipFile(zf, 'r')
rest = rest + '/'
result = set()
for nm in zf.namelist():
if nm == rest:
raise IOError("%r is not a directory in %r"%(path, _zf))
if nm.startswith(rest):
result.add(nm[len(rest):].split('/')[0])
return list(result)
def _zippath(self, path, strict=True):
"""
Return either ``(zipfilename, zippath)`` or ``(None, path)``
If ``zipfilename`` is not None is points to a zipfile
that may contain the file as ``zippath``. Otherwise
the file is definitely not in a zipfile
Raises ``IOError`` when the file doesn't exist, but won't
check if the file exists in the zipfile unless ``strict``
is True.
"""
if os.path.exists(path):
return (None, path)
else:
rest = ''
while curpath and not os.path.exists(curpath):
curpath, r = os.path.split(curpath)
rest = os.path.join(r, rest)
if not curpath:
raise IOError("file %r does not exist"%(path,))
try:
zf = zipfile.ZipFile(curpath)
except zipfile.BadZipfile:
raise IOError("bad zipfile %r for %r"%(curpath, path))
if rest.endswith('/'):
rest = rest[:-1]
if strict:
try:
zf.getinfo(rest)
except KeyError:
raise IOError("file %r does not exist in %r", path, curpath)
return curpath, rest
| kamitchell/py2app | py2app/simpleio.py | Python | mit | 5,394 |
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
The event module implements the classes that make up the event system.
The Event class and its subclasses are used to represent "stuff that happens".
The EventEmitter class provides an interface to connect to events and
to emit events. The EmitterGroup groups EventEmitter objects.
For more information see http://github.com/vispy/vispy/wiki/API_Events
"""
from __future__ import division
from collections import OrderedDict
import inspect
import traceback
import weakref
from .logs import logger, _handle_exception
class Event(object):
"""Class describing events that occur and can be reacted to with callbacks.
Each event instance contains information about a single event that has
occurred such as a key press, mouse motion, timer activation, etc.
Subclasses: :class:`KeyEvent`, :class:`MouseEvent`, :class:`TouchEvent`,
:class:`StylusEvent`
The creation of events and passing of events to the appropriate callback
functions is the responsibility of :class:`EventEmitter` instances.
Note that each event object has an attribute for each of the input
arguments listed below.
Parameters
----------
type : str
String indicating the event type (e.g. mouse_press, key_release)
native : object (optional)
The native GUI event object
**kwargs : keyword arguments
All extra keyword arguments become attributes of the event object.
"""
def __init__(self, type, native=None, **kwargs):
# stack of all sources this event has been emitted through
self._sources = []
self._handled = False
self._blocked = False
# Store args
self._type = type
self._native = native
for k, v in kwargs.items():
setattr(self, k, v)
@property
def source(self):
"""The object that the event applies to (i.e. the source of the event).
"""
return self._sources[-1] if self._sources else None
@property
def sources(self):
""" List of objects that the event applies to (i.e. are or have
been a source of the event). Can contain multiple objects in case
the event traverses a hierarchy of objects.
"""
return self._sources
def _push_source(self, source):
self._sources.append(source)
def _pop_source(self):
return self._sources.pop()
@property
def type(self):
# No docstring; documeted in class docstring
return self._type
@property
def native(self):
# No docstring; documeted in class docstring
return self._native
@property
def handled(self):
"""This boolean property indicates whether the event has already been
acted on by an event handler. Since many handlers may have access to
the same events, it is recommended that each check whether the event
has already been handled as well as set handled=True if it decides to
act on the event.
"""
return self._handled
@handled.setter
def handled(self, val):
self._handled = bool(val)
@property
def blocked(self):
"""This boolean property indicates whether the event will be delivered
to event callbacks. If it is set to True, then no further callbacks
will receive the event. When possible, it is recommended to use
Event.handled rather than Event.blocked.
"""
return self._blocked
@blocked.setter
def blocked(self, val):
self._blocked = bool(val)
def __repr__(self):
# Try to generate a nice string representation of the event that
# includes the interesting properties.
# need to keep track of depth because it is
# very difficult to avoid excessive recursion.
global _event_repr_depth
_event_repr_depth += 1
try:
if _event_repr_depth > 2:
return "<...>"
attrs = []
for name in dir(self):
if name.startswith('_'):
continue
# select only properties
if not hasattr(type(self), name) or \
not isinstance(getattr(type(self), name), property):
continue
attr = getattr(self, name)
attrs.append("%s=%s" % (name, attr))
return "<%s %s>" % (self.__class__.__name__, " ".join(attrs))
finally:
_event_repr_depth -= 1
def __str__(self):
"""Shorter string representation"""
return self.__class__.__name__
_event_repr_depth = 0
class EventEmitter(object):
"""Encapsulates a list of event callbacks.
Each instance of EventEmitter represents the source of a stream of similar
events, such as mouse click events or timer activation events. For
example, the following diagram shows the propagation of a mouse click
event to the list of callbacks that are registered to listen for that
event::
User clicks |Canvas creates
mouse on |MouseEvent: |'mouse_press' EventEmitter: |callbacks in sequence: # noqa
Canvas | | | # noqa
-->|event = MouseEvent(...) -->|Canvas.events.mouse_press(event) -->|callback1(event) # noqa
| | -->|callback2(event) # noqa
| | -->|callback3(event) # noqa
Callback functions may be added or removed from an EventEmitter using
:func:`connect() <vispy.event.EventEmitter.connect>` or
:func:`disconnect() <vispy.event.EventEmitter.disconnect>`.
Calling an instance of EventEmitter will cause each of its callbacks
to be invoked in sequence. All callbacks are invoked with a single
argument which will be an instance of :class:`Event <vispy.event.Event>`.
EventEmitters are generally created by an EmitterGroup instance.
Parameters
----------
source : object
The object that the generated events apply to. All emitted Events will
have their .source property set to this value.
type : str or None
String indicating the event type (e.g. mouse_press, key_release)
event_class : subclass of Event
The class of events that this emitter will generate.
"""
def __init__(self, source=None, type=None, event_class=Event):
self._callbacks = []
self._callback_refs = []
# count number of times this emitter is blocked for each callback.
self._blocked = {None: 0}
# used to detect emitter loops
self._emitting = False
self.source = source
self.default_args = {}
if type is not None:
self.default_args['type'] = type
assert inspect.isclass(event_class)
self.event_class = event_class
self._ignore_callback_errors = True
self.print_callback_errors = 'reminders'
@property
def ignore_callback_errors(self):
"""Whether exceptions during callbacks will be caught by the emitter
This allows it to continue invoking other callbacks if an error
occurs.
"""
return self._ignore_callback_errors
@ignore_callback_errors.setter
def ignore_callback_errors(self, val):
self._ignore_callback_errors = val
@property
def print_callback_errors(self):
"""Print a message and stack trace if a callback raises an exception
Valid values are "first" (only show first instance), "reminders" (show
complete first instance, then counts), "always" (always show full
traceback), or "never".
This assumes ignore_callback_errors=True. These will be raised as
warnings, so ensure that the vispy logging level is set to at
least "warning".
"""
return self._print_callback_errors
@print_callback_errors.setter
def print_callback_errors(self, val):
if val not in ('first', 'reminders', 'always', 'never'):
raise ValueError('print_callback_errors must be "first", '
'"reminders", "always", or "never"')
self._print_callback_errors = val
@property
def callback_refs(self):
"""The set of callback references"""
return tuple(self._callback_refs)
@property
def callbacks(self):
"""The set of callbacks"""
return tuple(self._callbacks)
@property
def source(self):
"""The object that events generated by this emitter apply to"""
return None if self._source is None else self._source(
) # get object behind weakref
@source.setter
def source(self, s):
if s is None:
self._source = None
else:
self._source = weakref.ref(s)
def connect(self, callback, ref=False, position='first',
before=None, after=None):
"""Connect this emitter to a new callback.
Parameters
----------
callback : function | tuple
*callback* may be either a callable object or a tuple
(object, attr_name) where object.attr_name will point to a
callable object. Note that only a weak reference to ``object``
will be kept.
ref : bool | str
Reference used to identify the callback in ``before``/``after``.
If True, the callback ref will automatically determined (see
Notes). If False, the callback cannot be referred to by a string.
If str, the given string will be used. Note that if ``ref``
is not unique in ``callback_refs``, an error will be thrown.
position : str
If ``'first'``, the first eligible position is used (that
meets the before and after criteria), ``'last'`` will use
the last position.
before : str | callback | list of str or callback | None
List of callbacks that the current callback should precede.
Can be None if no before-criteria should be used.
after : str | callback | list of str or callback | None
List of callbacks that the current callback should follow.
Can be None if no after-criteria should be used.
Notes
-----
If ``ref=True``, the callback reference will be determined from:
1. If ``callback`` is ``tuple``, the secend element in the tuple.
2. The ``__name__`` attribute.
3. The ``__class__.__name__`` attribute.
The current list of callback refs can be obtained using
``event.callback_refs``. Callbacks can be referred to by either
their string reference (if given), or by the actual callback that
was attached (e.g., ``(canvas, 'swap_buffers')``).
If the specified callback is already connected, then the request is
ignored.
If before is None and after is None (default), the new callback will
be added to the beginning of the callback list. Thus the
callback that is connected _last_ will be the _first_ to receive
events from the emitter.
"""
callbacks = self.callbacks
callback_refs = self.callback_refs
callback = self._normalize_cb(callback)
if callback in callbacks:
return
# deal with the ref
if isinstance(ref, bool):
if ref:
if isinstance(callback, tuple):
ref = callback[1]
elif hasattr(callback, '__name__'): # function
ref = callback.__name__
else: # Method, or other
ref = callback.__class__.__name__
else:
ref = None
elif not isinstance(ref, str):
raise TypeError('ref must be a bool or string')
if ref is not None and ref in self._callback_refs:
raise ValueError('ref "%s" is not unique' % ref)
# positions
if position not in ('first', 'last'):
raise ValueError('position must be "first" or "last", not %s'
% position)
# bounds
bounds = list() # upper & lower bnds (inclusive) of possible cb locs
for ri, criteria in enumerate((before, after)):
if criteria is None or criteria == []:
bounds.append(len(callback_refs) if ri == 0 else 0)
else:
if not isinstance(criteria, list):
criteria = [criteria]
for c in criteria:
count = sum([(c == cn or c == cc) for cn, cc
in zip(callback_refs, callbacks)])
if count != 1:
raise ValueError('criteria "%s" is in the current '
'callback list %s times:\n%s\n%s'
% (criteria, count,
callback_refs, callbacks))
matches = [ci for ci, (cn, cc) in enumerate(zip(callback_refs,
callbacks))
if (cc in criteria or cn in criteria)]
bounds.append(matches[0] if ri == 0 else (matches[-1] + 1))
if bounds[0] < bounds[1]: # i.e., "place before" < "place after"
raise RuntimeError('cannot place callback before "%s" '
'and after "%s" for callbacks: %s'
% (before, after, callback_refs))
idx = bounds[1] if position == 'first' else bounds[0] # 'last'
# actually add the callback
self._callbacks.insert(idx, callback)
self._callback_refs.insert(idx, ref)
return callback # allows connect to be used as a decorator
def disconnect(self, callback=None):
"""Disconnect a callback from this emitter.
If no callback is specified, then *all* callbacks are removed.
If the callback was not already connected, then the call does nothing.
"""
if callback is None:
self._callbacks = []
self._callback_refs = []
else:
callback = self._normalize_cb(callback)
if callback in self._callbacks:
idx = self._callbacks.index(callback)
self._callbacks.pop(idx)
self._callback_refs.pop(idx)
def _normalize_cb(self, callback):
# dereference methods into a (self, method_name) pair so that we can
# make the connection without making a strong reference to the
# instance.
if inspect.ismethod(callback):
callback = (callback.__self__, callback.__name__)
# always use a weak ref
if (isinstance(callback, tuple) and not
isinstance(callback[0], weakref.ref)):
callback = (weakref.ref(callback[0]),) + callback[1:]
return callback
def __call__(self, *args, **kwargs):
""" __call__(**kwargs)
Invoke all callbacks for this emitter.
Emit a new event object, created with the given keyword
arguments, which must match with the input arguments of the
corresponding event class. Note that the 'type' argument is
filled in by the emitter.
Alternatively, the emitter can also be called with an Event
instance as the only argument. In this case, the specified
Event will be used rather than generating a new one. This allows
customized Event instances to be emitted and also allows EventEmitters
to be chained by connecting one directly to another.
Note that the same Event instance is sent to all callbacks.
This allows some level of communication between the callbacks
(notably, via Event.handled) but also requires that callbacks
be careful not to inadvertently modify the Event.
"""
# This is a VERY highly used method; must be fast!
blocked = self._blocked
if self._emitting:
raise RuntimeError('EventEmitter loop detected!')
# create / massage event as needed
event = self._prepare_event(*args, **kwargs)
# Add our source to the event; remove it after all callbacks have been
# invoked.
event._push_source(self.source)
self._emitting = True
try:
if blocked.get(None, 0) > 0: # this is the same as self.blocked()
return event
rem = []
for cb in self._callbacks[:]:
if isinstance(cb, tuple):
obj = cb[0]()
if obj is None:
rem.append(cb)
continue
cb = getattr(obj, cb[1], None)
if cb is None:
continue
if blocked.get(cb, 0) > 0:
continue
self._invoke_callback(cb, event)
if event.blocked:
break
# remove callbacks to dead objects
for cb in rem:
self.disconnect(cb)
finally:
self._emitting = False
if event._pop_source() != self.source:
raise RuntimeError("Event source-stack mismatch.")
return event
def _invoke_callback(self, cb, event):
try:
cb(event)
except Exception:
_handle_exception(self.ignore_callback_errors,
self.print_callback_errors,
self, cb_event=(cb, event))
def _prepare_event(self, *args, **kwargs):
# When emitting, this method is called to create or otherwise alter
# an event before it is sent to callbacks. Subclasses may extend
# this method to make custom modifications to the event.
if len(args) == 1 and not kwargs and isinstance(args[0], Event):
event = args[0]
# Ensure that the given event matches what we want to emit
assert isinstance(event, self.event_class)
elif not args:
args = self.default_args.copy()
args.update(kwargs)
event = self.event_class(**args)
else:
raise ValueError("Event emitters can be called with an Event "
"instance or with keyword arguments only.")
return event
def blocked(self, callback=None):
"""Return boolean indicating whether the emitter is blocked for
the given callback.
"""
return self._blocked.get(callback, 0) > 0
def block(self, callback=None):
"""Block this emitter. Any attempts to emit an event while blocked
will be silently ignored. If *callback* is given, then the emitter
is only blocked for that specific callback.
Calls to block are cumulative; the emitter must be unblocked the same
number of times as it is blocked.
"""
self._blocked[callback] = self._blocked.get(callback, 0) + 1
def unblock(self, callback=None):
""" Unblock this emitter. See :func:`event.EventEmitter.block`.
Note: Use of ``unblock(None)`` only reverses the effect of
``block(None)``; it does not unblock callbacks that were explicitly
blocked using ``block(callback)``.
"""
if callback not in self._blocked or self._blocked[callback] == 0:
raise RuntimeError("Cannot unblock %s for callback %s; emitter "
"was not previously blocked." %
(self, callback))
b = self._blocked[callback] - 1
if b == 0 and callback is not None:
del self._blocked[callback]
else:
self._blocked[callback] = b
def blocker(self, callback=None):
"""Return an EventBlocker to be used in 'with' statements
Notes
-----
For example, one could do::
with emitter.blocker():
pass # ..do stuff; no events will be emitted..
"""
return EventBlocker(self, callback)
class WarningEmitter(EventEmitter):
"""
EventEmitter subclass used to allow deprecated events to be used with a
warning message.
"""
def __init__(self, message, *args, **kwargs):
self._message = message
self._warned = False
EventEmitter.__init__(self, *args, **kwargs)
def connect(self, cb, *args, **kwargs):
self._warn(cb)
return EventEmitter.connect(self, cb, *args, **kwargs)
def _invoke_callback(self, cb, event):
self._warn(cb)
return EventEmitter._invoke_callback(self, cb, event)
def _warn(self, cb):
if self._warned:
return
# don't warn about unimplemented connections
if isinstance(cb, tuple) and getattr(cb[0], cb[1], None) is None:
return
traceback.print_stack()
logger.warning(self._message)
self._warned = True
class EmitterGroup(EventEmitter):
"""EmitterGroup instances manage a set of related
:class:`EventEmitters <vispy.event.EventEmitter>`.
Its primary purpose is to provide organization for objects
that make use of multiple emitters and to reduce the boilerplate code
needed to initialize those emitters with default connections.
EmitterGroup instances are usually stored as an 'events' attribute on
objects that use multiple emitters. For example::
EmitterGroup EventEmitter
| |
Canvas.events.mouse_press
Canvas.events.resized
Canvas.events.key_press
EmitterGroup is also a subclass of
:class:`EventEmitters <vispy.event.EventEmitter>`,
allowing it to emit its own
events. Any callback that connects directly to the EmitterGroup will
receive *all* of the events generated by the group's emitters.
Parameters
----------
source : object
The object that the generated events apply to.
auto_connect : bool
If *auto_connect* is True (default), then one connection will
be made for each emitter that looks like
:func:`emitter.connect((source, 'on_' + event_name))
<vispy.event.EventEmitter.connect>`.
This provides a simple mechanism for automatically connecting a large
group of emitters to default callbacks.
emitters : keyword arguments
See the :func:`add <vispy.event.EmitterGroup.add>` method.
"""
def __init__(self, source=None, auto_connect=True, **emitters):
EventEmitter.__init__(self, source)
self.auto_connect = auto_connect
self.auto_connect_format = "on_%s"
self._emitters = OrderedDict()
# whether the sub-emitters have been connected to the group:
self._emitters_connected = False
self.add(**emitters)
def __getitem__(self, name):
"""
Return the emitter assigned to the specified name.
Note that emitters may also be retrieved as an attribute of the
EmitterGroup.
"""
return self._emitters[name]
def __setitem__(self, name, emitter):
"""
Alias for EmitterGroup.add(name=emitter)
"""
self.add(**{name: emitter})
def add(self, auto_connect=None, **kwargs):
""" Add one or more EventEmitter instances to this emitter group.
Each keyword argument may be specified as either an EventEmitter
instance or an Event subclass, in which case an EventEmitter will be
generated automatically::
# This statement:
group.add(mouse_press=MouseEvent,
mouse_release=MouseEvent)
# ..is equivalent to this statement:
group.add(mouse_press=EventEmitter(group.source, 'mouse_press',
MouseEvent),
mouse_release=EventEmitter(group.source, 'mouse_press',
MouseEvent))
"""
if auto_connect is None:
auto_connect = self.auto_connect
# check all names before adding anything
for name in kwargs:
if name in self._emitters:
raise ValueError(
"EmitterGroup already has an emitter named '%s'" %
name)
elif hasattr(self, name):
raise ValueError("The name '%s' cannot be used as an emitter; "
"it is already an attribute of EmitterGroup"
% name)
# add each emitter specified in the keyword arguments
for name, emitter in kwargs.items():
if emitter is None:
emitter = Event
if inspect.isclass(emitter) and issubclass(emitter, Event):
emitter = EventEmitter(
source=self.source,
type=name,
event_class=emitter)
elif not isinstance(emitter, EventEmitter):
raise Exception('Emitter must be specified as either an '
'EventEmitter instance or Event subclass. '
'(got %s=%s)' % (name, emitter))
# give this emitter the same source as the group.
emitter.source = self.source
setattr(self, name, emitter)
self._emitters[name] = emitter
if auto_connect and self.source is not None:
emitter.connect((self.source, self.auto_connect_format % name))
# If emitters are connected to the group already, then this one
# should be connected as well.
if self._emitters_connected:
emitter.connect(self)
@property
def emitters(self):
""" List of current emitters in this group.
"""
return self._emitters
def __iter__(self):
"""
Iterates over the names of emitters in this group.
"""
for k in self._emitters:
yield k
def block_all(self):
""" Block all emitters in this group.
"""
self.block()
for em in self._emitters.values():
em.block()
def unblock_all(self):
""" Unblock all emitters in this group.
"""
self.unblock()
for em in self._emitters.values():
em.unblock()
def connect(self, callback, ref=False, position='first',
before=None, after=None):
""" Connect the callback to the event group. The callback will receive
events from *all* of the emitters in the group.
See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>`
for arguments.
"""
self._connect_emitters(True)
return EventEmitter.connect(self, callback, ref, position,
before, after)
def disconnect(self, callback=None):
""" Disconnect the callback from this group. See
:func:`connect() <vispy.event.EmitterGroup.connect>` and
:func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for
more information.
"""
ret = EventEmitter.disconnect(self, callback)
if len(self._callbacks) == 0:
self._connect_emitters(False)
return ret
def _connect_emitters(self, connect):
# Connect/disconnect all sub-emitters from the group. This allows the
# group to emit an event whenever _any_ of the sub-emitters emit,
# while simultaneously eliminating the overhead if nobody is listening.
if connect:
for emitter in self:
self[emitter].connect(self)
else:
for emitter in self:
self[emitter].disconnect(self)
self._emitters_connected = connect
@property
def ignore_callback_errors(self):
return super(EventEmitter, self).ignore_callback_errors
@ignore_callback_errors.setter
def ignore_callback_errors(self, ignore):
EventEmitter.ignore_callback_errors.fset(self, ignore)
for emitter in self._emitters.values():
if isinstance(emitter, EventEmitter):
emitter.ignore_callback_errors = ignore
elif isinstance(emitter, EmitterGroup):
emitter.ignore_callback_errors_all(ignore)
class EventBlocker(object):
""" Represents a block for an EventEmitter to be used in a context
manager (i.e. 'with' statement).
"""
def __init__(self, target, callback=None):
self.target = target
self.callback = callback
def __enter__(self):
self.target.block(self.callback)
def __exit__(self, *args):
self.target.unblock(self.callback)
| Eric89GXL/vispy | vispy/util/event.py | Python | bsd-3-clause | 29,289 |
__version__ = "0.2.6"
| grocsvs/grocsvs | src/grocsvs/__init__.py | Python | mit | 22 |
"""
Django Extensions additional model fields
"""
import re
import six
import string
import warnings
try:
import uuid
HAS_UUID = True
except ImportError:
HAS_UUID = False
try:
import shortuuid
HAS_SHORT_UUID = True
except ImportError:
HAS_SHORT_UUID = False
from django.core.exceptions import ImproperlyConfigured
from django.db.models import DateTimeField, CharField, SlugField
from django.utils.crypto import get_random_string
from django.template.defaultfilters import slugify
try:
from django.utils.timezone import now as datetime_now
assert datetime_now
except ImportError:
import datetime
datetime_now = datetime.datetime.now
try:
from django.utils.encoding import force_unicode # NOQA
except ImportError:
from django.utils.encoding import force_text as force_unicode # NOQA
MAX_UNIQUE_QUERY_ATTEMPTS = 100
class UniqueFieldMixin(object):
def check_is_bool(self, attrname):
if not isinstance(getattr(self, attrname), bool):
raise ValueError("'{}' argument must be True or False".format(attrname))
def get_queryset(self, model_cls, slug_field):
for field, model in model_cls._meta.get_fields_with_model():
if model and field == slug_field:
return model._default_manager.all()
return model_cls._default_manager.all()
def find_unique(self, model_instance, field, iterator, *args):
# exclude the current model instance from the queryset used in finding
# next valid hash
queryset = self.get_queryset(model_instance.__class__, field)
if model_instance.pk:
queryset = queryset.exclude(pk=model_instance.pk)
# form a kwarg dict used to impliment any unique_together contraints
kwargs = {}
for params in model_instance._meta.unique_together:
if self.attname in params:
for param in params:
kwargs[param] = getattr(model_instance, param, None)
new = six.next(iterator)
kwargs[self.attname] = new
while not new or queryset.filter(**kwargs):
new = six.next(iterator)
kwargs[self.attname] = new
setattr(model_instance, self.attname, new)
return new
class AutoSlugField(UniqueFieldMixin, SlugField):
""" AutoSlugField
By default, sets editable=False, blank=True.
Required arguments:
populate_from
Specifies which field or list of fields the slug is populated from.
Optional arguments:
separator
Defines the used separator (default: '-')
overwrite
If set to True, overwrites the slug on every save (default: False)
Inspired by SmileyChris' Unique Slugify snippet:
http://www.djangosnippets.org/snippets/690/
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('editable', False)
populate_from = kwargs.pop('populate_from', None)
if populate_from is None:
raise ValueError("missing 'populate_from' argument")
else:
self._populate_from = populate_from
self.slugify_function = kwargs.pop('slugify_function', slugify)
self.separator = kwargs.pop('separator', six.u('-'))
self.overwrite = kwargs.pop('overwrite', False)
self.check_is_bool('overwrite')
self.allow_duplicates = kwargs.pop('allow_duplicates', False)
self.check_is_bool('allow_duplicates')
super(AutoSlugField, self).__init__(*args, **kwargs)
def _slug_strip(self, value):
"""
Cleans up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
def slugify_func(self, content):
if content:
return self.slugify_function(content)
return ''
def slug_generator(self, original_slug, start):
yield original_slug
for i in range(start, MAX_UNIQUE_QUERY_ATTEMPTS):
slug = original_slug
end = '%s%s' % (self.separator, i)
end_len = len(end)
if self.slug_len and len(slug) + end_len > self.slug_len:
slug = slug[:self.slug_len - end_len]
slug = self._slug_strip(slug)
slug = '%s%s' % (slug, end)
yield slug
raise RuntimeError('max slug attempts for %s exceeded (%s)' %
(original_slug, MAX_UNIQUE_QUERY_ATTEMPTS))
def create_slug(self, model_instance, add):
# get fields to populate from and slug field to set
if not isinstance(self._populate_from, (list, tuple)):
self._populate_from = (self._populate_from, )
slug_field = model_instance._meta.get_field(self.attname)
if add or self.overwrite:
# slugify the original field content and set next step to 2
slug_for_field = lambda field: self.slugify_func(getattr(model_instance, field))
slug = self.separator.join(map(slug_for_field, self._populate_from))
start = 2
else:
# get slug from the current model instance
slug = getattr(model_instance, self.attname)
# model_instance is being modified, and overwrite is False,
# so instead of doing anything, just return the current slug
return slug
# strip slug depending on max_length attribute of the slug field
# and clean-up
self.slug_len = slug_field.max_length
if self.slug_len:
slug = slug[:self.slug_len]
slug = self._slug_strip(slug)
original_slug = slug
if self.allow_duplicates:
return slug
return super(AutoSlugField, self).find_unique(
model_instance, slug_field, self.slug_generator(original_slug, start))
def pre_save(self, model_instance, add):
value = force_unicode(self.create_slug(model_instance, add))
return value
def get_internal_type(self):
return "SlugField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = '%s.AutoSlugField' % self.__module__
args, kwargs = introspector(self)
kwargs.update({
'populate_from': repr(self._populate_from),
'separator': repr(self.separator),
'overwrite': repr(self.overwrite),
'allow_duplicates': repr(self.allow_duplicates),
})
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(AutoSlugField, self).deconstruct()
kwargs['populate_from'] = self._populate_from
if not self.separator == six.u('-'):
kwargs['separator'] = self.separator
if self.overwrite is not False:
kwargs['overwrite'] = True
if self.allow_duplicates is not False:
kwargs['allow_duplicates'] = True
return name, path, args, kwargs
class RandomCharField(UniqueFieldMixin, CharField):
""" RandomCharField
By default, sets editable=False, blank=True, unique=False.
Required arguments:
length
Specifies the length of the field
Optional arguments:
unique
If set to True, duplicate entries are not allowed (default: False)
lowercase
If set to True, lowercase the alpha characters (default: False)
uppercase
If set to True, uppercase the alpha characters (default: False)
include_alpha
If set to True, include alpha characters (default: True)
include_digits
If set to True, include digit characters (default: True)
include_punctuation
If set to True, include punctuation characters (default: False)
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('editable', False)
self.length = kwargs.pop('length', None)
if self.length is None:
raise ValueError("missing 'length' argument")
kwargs['max_length'] = self.length
self.lowercase = kwargs.pop('lowercase', False)
self.check_is_bool('lowercase')
self.uppercase = kwargs.pop('uppercase', False)
self.check_is_bool('uppercase')
if self.uppercase and self.lowercase:
raise ValueError("the 'lowercase' and 'uppercase' arguments are mutually exclusive")
self.include_digits = kwargs.pop('include_digits', True)
self.check_is_bool('include_digits')
self.include_alpha = kwargs.pop('include_alpha', True)
self.check_is_bool('include_alpha')
self.include_punctuation = kwargs.pop('include_punctuation', False)
self.check_is_bool('include_punctuation')
# Set unique=False unless it's been set manually.
if 'unique' not in kwargs:
kwargs['unique'] = False
super(RandomCharField, self).__init__(*args, **kwargs)
def random_char_generator(self, chars):
for i in range(MAX_UNIQUE_QUERY_ATTEMPTS):
yield ''.join(get_random_string(self.length, chars))
raise RuntimeError('max random character attempts exceeded (%s)' %
MAX_UNIQUE_QUERY_ATTEMPTS)
def pre_save(self, model_instance, add):
if not add and getattr(model_instance, self.attname) != '':
return getattr(model_instance, self.attname)
population = ''
if self.include_alpha:
if self.lowercase:
population += string.ascii_lowercase
elif self.uppercase:
population += string.ascii_uppercase
else:
population += string.ascii_letters
if self.include_digits:
population += string.digits
if self.include_punctuation:
population += string.punctuation
random_chars = self.random_char_generator(population)
if not self.unique:
return random_chars
return super(RandomCharField, self).find_unique(
model_instance,
model_instance._meta.get_field(self.attname),
random_chars,
)
def internal_type(self):
return "CharField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = '%s.RandomCharField' % self.__module__
args, kwargs = introspector(self)
kwargs.update({
'lowercase': repr(self.lowercase),
'include_digits': repr(self.include_digits),
'include_aphla': repr(self.include_alpha),
'include_punctuation': repr(self.include_punctuation),
'length': repr(self.length),
'unique': repr(self.unique),
})
del kwargs['max_length']
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(RandomCharField, self).deconstruct()
kwargs['length'] = self.length
del kwargs['max_length']
if self.lowercase is True:
kwargs['lowercase'] = self.lowercase
if self.uppercase is True:
kwargs['uppercase'] = self.uppercase
if self.include_alpha is False:
kwargs['include_alpha'] = self.include_alpha
if self.include_digits is False:
kwargs['include_digits'] = self.include_digits
if self.include_punctuation is True:
kwargs['include_punctuation'] = self.include_punctuation
if self.unique is True:
kwargs['unique'] = self.unique
return name, path, args, kwargs
class CreationDateTimeField(DateTimeField):
""" CreationDateTimeField
By default, sets editable=False, blank=True, auto_now_add=True
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('editable', False)
kwargs.setdefault('blank', True)
kwargs.setdefault('auto_now_add', True)
DateTimeField.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "DateTimeField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.DateTimeField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(CreationDateTimeField, self).deconstruct()
if self.editable is not False:
kwargs['editable'] = True
if self.blank is not True:
kwargs['blank'] = False
if self.auto_now_add is not False:
kwargs['auto_now_add'] = True
return name, path, args, kwargs
class ModificationDateTimeField(CreationDateTimeField):
""" ModificationDateTimeField
By default, sets editable=False, blank=True, auto_now=True
Sets value to now every time the object is saved.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('auto_now', True)
DateTimeField.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "DateTimeField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.DateTimeField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ModificationDateTimeField, self).deconstruct()
if self.auto_now is not False:
kwargs['auto_now'] = True
return name, path, args, kwargs
class UUIDVersionError(Exception):
pass
class UUIDField(CharField):
""" UUIDField
By default uses UUID version 4 (randomly generated UUID).
The field support all uuid versions which are natively supported by the uuid python module, except version 2.
For more information see: http://docs.python.org/lib/module-uuid.html
"""
DEFAULT_MAX_LENGTH = 36
def __init__(self, verbose_name=None, name=None, auto=True, version=4, node=None, clock_seq=None, namespace=None, uuid_name=None, *args, **kwargs):
warnings.warn("Django 1.8 features a native UUIDField, this UUIDField will be removed after Django 1.7 becomes unsupported.", DeprecationWarning)
if not HAS_UUID:
raise ImproperlyConfigured("'uuid' module is required for UUIDField. (Do you have Python 2.5 or higher installed ?)")
kwargs.setdefault('max_length', self.DEFAULT_MAX_LENGTH)
if auto:
self.empty_strings_allowed = False
kwargs['blank'] = True
kwargs.setdefault('editable', False)
self.auto = auto
self.version = version
self.node = node
self.clock_seq = clock_seq
self.namespace = namespace
self.uuid_name = uuid_name or name
super(UUIDField, self).__init__(verbose_name=verbose_name, *args, **kwargs)
def create_uuid(self):
if not self.version or self.version == 4:
return uuid.uuid4()
elif self.version == 1:
return uuid.uuid1(self.node, self.clock_seq)
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
return uuid.uuid3(self.namespace, self.uuid_name)
elif self.version == 5:
return uuid.uuid5(self.namespace, self.uuid_name)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
def pre_save(self, model_instance, add):
value = super(UUIDField, self).pre_save(model_instance, add)
if self.auto and add and value is None:
value = force_unicode(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
else:
if self.auto and not value:
value = force_unicode(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
def formfield(self, **kwargs):
if self.auto:
return None
return super(UUIDField, self).formfield(**kwargs)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.CharField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
if kwargs.get('max_length', None) == self.DEFAULT_MAX_LENGTH:
del kwargs['max_length']
if self.auto is not True:
kwargs['auto'] = self.auto
if self.version != 4:
kwargs['version'] = self.version
if self.node is not None:
kwargs['node'] = self.node
if self.clock_seq is not None:
kwargs['clock_seq'] = self.clock_seq
if self.namespace is not None:
kwargs['namespace'] = self.namespace
if self.uuid_name is not None:
kwargs['uuid_name'] = self.name
return name, path, args, kwargs
class PostgreSQLUUIDField(UUIDField):
def __init__(self, *args, **kwargs):
warnings.warn("Django 1.8 features a native UUIDField, this UUIDField will be removed after Django 1.7 becomes unsupported.", DeprecationWarning)
super(PostgreSQLUUIDField, self).__init__(*args, **kwargs)
def db_type(self, connection=None):
return "UUID"
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, six.integer_types):
value = uuid.UUID(int=value)
elif isinstance(value, (six.string_types, six.binary_type)):
if len(value) == 16:
value = uuid.UUID(bytes=value)
else:
value = uuid.UUID(value)
return super(PostgreSQLUUIDField, self).get_db_prep_value(
value, connection, prepared=False)
class ShortUUIDField(UUIDField):
""" ShortUUIDFied
Generates concise (22 characters instead of 36), unambiguous, URL-safe UUIDs.
Based on `shortuuid`: https://github.com/stochastic-technologies/shortuuid
"""
DEFAULT_MAX_LENGTH = 22
def __init__(self, *args, **kwargs):
super(ShortUUIDField, self).__init__(*args, **kwargs)
if not HAS_SHORT_UUID:
raise ImproperlyConfigured("'shortuuid' module is required for ShortUUIDField. (Do you have Python 2.5 or higher installed ?)")
kwargs.setdefault('max_length', self.DEFAULT_MAX_LENGTH)
def create_uuid(self):
if not self.version or self.version == 4:
return shortuuid.uuid()
elif self.version == 1:
return shortuuid.uuid()
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
raise UUIDVersionError("UUID version 3 is not supported.")
elif self.version == 5:
return shortuuid.uuid(name=self.namespace)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django_extensions/db/fields/__init__.py | Python | agpl-3.0 | 20,102 |
from datetime import datetime
from django.db import models
from tagging.models import Tag, TaggedItem
class PostImageManager(models.Manager):
"""
Post Image Manager
"""
# use for related fields
use_for_related_fields = True
def get_gallery_images(self):
"""
Get gallery images
Gallery images are PostImages that have a non-null gallery position
"""
return self.get_query_set().filter(gallery_position__isnull=False)
class PostManager(models.Manager):
"""
Post Manager
"""
# use for related fields
use_for_related_fields = True
def build_query(self, require_published=True, year=None, month=None,
category_slug=None, series_slug=None, tag=None, require_featured=False):
# Initial posts by require published indicator
if require_published:
posts = self.get_query_set().filter(is_published=True,
publish_date__lt=datetime.now)
else:
posts = self.get_query_set()
# featured
if require_featured == True:
posts = posts.filter(is_featured=True)
# date
if year:
posts = posts.filter(publish_date__year=year)
if month:
posts = posts.filter(publish_date__month=month)
#category and series
if category_slug:
posts = posts.filter(categories__slug=category_slug)
if series_slug:
posts = posts.filter(series__slug=series_slug)
# tag
if tag:
# return posts filtered by the tag
return TaggedItem.objects.get_by_model(posts, [tag,])
else:
return posts
def get_published_posts(self):
"""
Get published posts
"""
return self.build_query(require_published=True)
def get_featured_posts(self):
"""
Get featured posts
"""
return self.build_query(require_published=True, require_featured=True)
def get_post_archive(self, require_published=True, year=None, month=None,
category_slug=None, tag=None):
"""
Return a Post Archive
A blog post archive is a tuple of (year, months[]),
each month containing a tuple of (month, days[]),
each day containing a tuple of (day, posts[])
"""
# This was originally done as a dictionary
# but python dictionaries can't guarantee sort order.
posts = self.build_query(require_published=require_published, year=year,
month=month, category_slug=category_slug, tag=tag)
post_archive = {}
for post in posts.order_by('-publish_date'):
if not post_archive.has_key(post.publish_date.year):
post_archive[post.publish_date.year] = {}
if not post_archive[post.publish_date.year].has_key(post.publish_date.month):
post_archive[post.publish_date.year][post.publish_date.month] = {}
if not post_archive[post.publish_date.year][post.publish_date.month].has_key(post.publish_date.day):
post_archive[post.publish_date.year][post.publish_date.month][post.publish_date.day] = []
post_archive[post.publish_date.year][post.publish_date.month][post.publish_date.day].append(post)
# Now that all of that lifting is done, convert the dictionaries into tuples with lists
sorted_years = [(k,[]) for k in sorted(post_archive.keys(),
reverse=True)]
for sorted_year in sorted_years:
sorted_months = [(k,[]) for k in sorted(post_archive[sorted_year[0]],
reverse=True)]
sorted_year[1].extend(sorted_months)
for sorted_month in sorted_months:
sorted_days = [(k,[]) for k in sorted(
post_archive[sorted_year[0]][sorted_month[0]], reverse=True)]
sorted_month[1].extend(sorted_days)
for sorted_day in sorted_days:
sorted_day[1].extend(
post_archive[sorted_year[0]][sorted_month[0]][sorted_day[0]])
return sorted_years
@classmethod
def get_tags_in_use(cls):
"""
Return the tags in use
"""
return Tag.objects.filter(
id__in=TaggedItem.objects.filter(
content_type=ContentType.objects.get(
app_label='blogyall',
model=cls
)
).values('tag_id')
)
class PublishedPostManager(PostManager):
"""
Published Post Manager
"""
def get_query_set(self):
return super(PublishedPostManager, self).get_query_set().filter(is_published=True)
| davisd/django-blogyall | blog/managers.py | Python | bsd-3-clause | 4,842 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts MNIST data to TFRecords of TF-Example protos.
This module downloads the MNIST data, uncompresses it, reads the files
that make up the MNIST data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy as np
from six.moves import urllib
import tensorflow as tf
import dataset_utils
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'dataset_dir',
'/tmp/mnist',
'The directory where the output TFRecords and temporary files are saved.')
# The URLs where the MNIST data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
_TRAIN_LABELS_FILENAME = 'train-labels-idx1-ubyte.gz'
_TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
_TEST_LABELS_FILENAME = 't10k-labels-idx1-ubyte.gz'
_IMAGE_SIZE = 28
_NUM_CHANNELS = 1
# The names of the classes.
_CLASS_NAMES = [
'zero',
'one',
'two',
'three',
'four',
'five',
'size',
'seven',
'eight',
'nine',
]
def _extract_images(filename, num_images):
"""Extract the images into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
_IMAGE_SIZE * _IMAGE_SIZE * num_images * _NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data
def _extract_labels(filename, num_labels):
"""Extract the labels into a vector of int64 label IDs.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A numpy array of shape [number_of_labels]
"""
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _add_to_tfrecord(data_filename, labels_filename, num_images,
tfrecord_writer):
"""Loads data from the binary MNIST files and writes files to a TFRecord.
Args:
data_filename: The filename of the MNIST images.
labels_filename: The filename of the MNIST labels.
num_images: The number of images in the dataset.
tfrecord_writer: The TFRecord writer to use for writing.
"""
images = _extract_images(data_filename, num_images)
labels = _extract_labels(labels_filename, num_images)
shape = (_IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
with tf.Session('') as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
png_string = sess.run(encoded_png, feed_dict={image: images[j]})
example = dataset_utils.image_to_tfexample(
png_string, 'png'.encode(), _IMAGE_SIZE, _IMAGE_SIZE, labels[j])
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_%s.tfrecord' % (dataset_dir, split_name)
def _download_dataset(dataset_dir):
"""Downloads MNIST locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
print('Downloading file %s...' % filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %.1f%%' % (
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL + filename,
filepath,
_progress)
print()
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
for filename in [_TRAIN_DATA_FILENAME,
_TRAIN_LABELS_FILENAME,
_TEST_DATA_FILENAME,
_TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
def run(args):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
dataset_dir = FLAGS.dataset_dir
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_dataset(dataset_dir)
# First, process the training data:
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TRAIN_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TRAIN_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 60000, tfrecord_writer)
# Next, process the testing data:
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
data_filename = os.path.join(dataset_dir, _TEST_DATA_FILENAME)
labels_filename = os.path.join(dataset_dir, _TEST_LABELS_FILENAME)
_add_to_tfrecord(data_filename, labels_filename, 10000, tfrecord_writer)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the MNIST dataset!')
if __name__ == '__main__':
tf.app.run(main=run)
| mnuke/tf-slim-mnist | datasets/download_and_convert_mnist.py | Python | apache-2.0 | 7,628 |
from tkinter import *
from logic import *
from random import *
SIZE = 500
GRID_LEN = 4
GRID_PADDING = 10
BACKGROUND_COLOR_GAME = "#92877d"
BACKGROUND_COLOR_CELL_EMPTY = "#9e948a"
BACKGROUND_COLOR_DICT = {2: "#eee4da", 4: "#ede0c8", 8: "#f2b179", 16: "#f59563", \
32: "#f67c5f", 64: "#f65e3b", 128: "#edcf72", 256: "#edcc61", \
512: "#edc850", 1024: "#edc53f", 2048: "#edc22e"}
CELL_COLOR_DICT = {2: "#776e65", 4: "#776e65", 8: "#f9f6f2", 16: "#f9f6f2", \
32: "#f9f6f2", 64: "#f9f6f2", 128: "#f9f6f2", 256: "#f9f6f2", \
512: "#f9f6f2", 1024: "#f9f6f2", 2048: "#f9f6f2"}
FONT = ("Verdana", 40, "bold")
KEY_UP_ALT = "\'\\uf700\'"
KEY_DOWN_ALT = "\'\\uf701\'"
KEY_LEFT_ALT = "\'\\uf702\'"
KEY_RIGHT_ALT = "\'\\uf703\'"
KEY_UP = "'w'"
KEY_DOWN = "'s'"
KEY_LEFT = "'a'"
KEY_RIGHT = "'d'"
class GameGrid(Frame):
def __init__(self):
Frame.__init__(self)
self.grid()
self.master.title('2048')
self.master.bind("<Key>", self.key_down)
# self.gamelogic = gamelogic
self.commands = {KEY_UP: up, KEY_DOWN: down, KEY_LEFT: left, KEY_RIGHT: right,
KEY_UP_ALT: up, KEY_DOWN_ALT: down, KEY_LEFT_ALT: left, KEY_RIGHT_ALT: right}
self.grid_cells = []
self.init_grid()
self.init_matrix()
self.update_grid_cells()
self.mainloop()
def init_grid(self):
background = Frame(self, bg=BACKGROUND_COLOR_GAME, width=SIZE, height=SIZE)
background.grid()
for i in range(GRID_LEN):
grid_row = []
for j in range(GRID_LEN):
cell = Frame(background, bg=BACKGROUND_COLOR_CELL_EMPTY, width=SIZE / GRID_LEN, height=SIZE / GRID_LEN)
cell.grid(row=i, column=j, padx=GRID_PADDING, pady=GRID_PADDING)
# font = Font(size=FONT_SIZE, family=FONT_FAMILY, weight=FONT_WEIGHT)
t = Label(master=cell, text="", bg=BACKGROUND_COLOR_CELL_EMPTY, justify=CENTER, font=FONT, width=4,
height=2)
t.grid()
grid_row.append(t)
self.grid_cells.append(grid_row)
def gen(self):
return randint(0, GRID_LEN - 1)
def init_matrix(self):
self.matrix = new_game(4)
self.matrix = add_two(self.matrix)
self.matrix = add_two(self.matrix)
def update_grid_cells(self):
for i in range(GRID_LEN):
for j in range(GRID_LEN):
new_number = self.matrix[i][j]
if new_number == 0:
self.grid_cells[i][j].configure(text="", bg=BACKGROUND_COLOR_CELL_EMPTY)
else:
self.grid_cells[i][j].configure(text=str(new_number), bg=BACKGROUND_COLOR_DICT[new_number],
fg=CELL_COLOR_DICT[new_number])
self.update_idletasks()
def key_down(self, event):
key = repr(event.char)
if key in self.commands:
self.matrix, done = self.commands[repr(event.char)](self.matrix)
if done:
self.matrix = add_two(self.matrix)
self.update_grid_cells()
done = False
if game_state(self.matrix) == 'win':
self.grid_cells[1][1].configure(text="You", bg=BACKGROUND_COLOR_CELL_EMPTY)
self.grid_cells[1][2].configure(text="Win!", bg=BACKGROUND_COLOR_CELL_EMPTY)
if game_state(self.matrix) == 'lose':
self.grid_cells[1][1].configure(text="You", bg=BACKGROUND_COLOR_CELL_EMPTY)
self.grid_cells[1][2].configure(text="Lose!", bg=BACKGROUND_COLOR_CELL_EMPTY)
def generate_next(self):
index = (self.gen(), self.gen())
while self.matrix[index[0]][index[1]] != 0:
index = (self.gen(), self.gen())
self.matrix[index[0]][index[1]] = 2
gamegrid = GameGrid() | memogame/tic-tac-toe | 2048_python/puzzle.py | Python | mit | 3,988 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
ext_modules=[
Extension("enm_cython",
["./epidemic_network_modelling/enm_cython.pyx"],
# libraries=["m"],
extra_compile_args = ["-O3", "-ffast-math", "-march=native", "/openmp", "-fopenmp" ],
extra_link_args=['-fopenmp']
)
]
setup(
name='epidemic_network_modelling',
version='0.1.0',
description="A program that looks at questions associated with epidemic modelling on networks. ",
long_description=readme + '\n\n' + history,
author="Unni Kurumbail",
author_email='ukurumba@u.rochester.edu',
url='https://github.com/ukurumba/epidemic_network_modelling',
packages=[
'epidemic_network_modelling',
],
package_dir={'epidemic_network_modelling':
'epidemic_network_modelling'},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='epidemic_network_modelling',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
cmdclass = {'build_ext': build_ext},
ext_modules=ext_modules,
test_suite='tests',
tests_require=test_requirements
)
| ukurumba/epidemic_network_modelling | setup.py | Python | mit | 2,161 |
# GridCal
# Copyright (C) 2022 Santiago Peñate Vera
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp
class Wire(EditableDevice):
def __init__(self, name='', idtag=None, gmr=0.01, r=0.01, x=0.0, max_current=1):
"""
Wire definition
:param name: Name of the wire type
:param gmr: Geometric Mean Radius (m)
:param r: Resistance per unit length (Ohm / km)
:param x: Reactance per unit length (Ohm / km)
:param max_current: Maximum current of the conductor in (kA)
"""
EditableDevice.__init__(self,
name=name,
idtag=idtag,
active=True,
device_type=DeviceType.WireDevice,
editable_headers={'name': GCProp('', str, "Name of the conductor"),
'idtag': GCProp('', str, 'Unique ID'),
'r': GCProp('Ohm/km', float, "resistance of the conductor"),
'x': GCProp('Ohm/km', float, "reactance of the conductor"),
'gmr': GCProp('m', float, "Geometric Mean Radius of the conductor"),
'max_current': GCProp('kA', float, "Maximum current of the conductor")
},
non_editable_attributes=list(),
properties_with_profile={})
# self.wire_name = name
self.r = r
self.x = x
self.gmr = gmr
self.max_current = max_current
def copy(self):
"""
Copy of the wire
:return:
"""
# name='', idtag=None, gmr=0.01, r=0.01, x=0.0, max_current=1
return Wire(name=self.name, gmr=self.gmr, r=self.r, x=self.x, max_current=self.max_current)
| SanPen/GridCal | src/GridCal/Engine/Devices/wire.py | Python | lgpl-3.0 | 2,759 |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import mox
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova import exception
from nova.objects import external_event as external_event_obj
from nova import test
from nova.virt import fake
from nova.virt import virtapi
class VirtAPIBaseTest(test.NoDBTestCase, test.APICoverage):
cover_api = virtapi.VirtAPI
def setUp(self):
super(VirtAPIBaseTest, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.set_up_virtapi()
def set_up_virtapi(self):
self.virtapi = virtapi.VirtAPI()
def assertExpected(self, method, *args, **kwargs):
self.assertRaises(NotImplementedError,
getattr(self.virtapi, method), self.context,
*args, **kwargs)
def test_provider_fw_rule_get_all(self):
self.assertExpected('provider_fw_rule_get_all')
def test_agent_build_get_by_triple(self):
self.assertExpected('agent_build_get_by_triple',
'fake-hv', 'gnu/hurd', 'fake-arch')
def test_wait_for_instance_event(self):
self.assertExpected('wait_for_instance_event',
'instance', ['event'])
class FakeVirtAPITest(VirtAPIBaseTest):
cover_api = fake.FakeVirtAPI
def set_up_virtapi(self):
self.virtapi = fake.FakeVirtAPI()
def assertExpected(self, method, *args, **kwargs):
if method == 'wait_for_instance_event':
run = False
with self.virtapi.wait_for_instance_event(*args, **kwargs):
run = True
self.assertTrue(run)
return
self.mox.StubOutWithMock(db, method)
if method in ('aggregate_metadata_add', 'aggregate_metadata_delete',
'security_group_rule_get_by_security_group'):
# NOTE(danms): FakeVirtAPI will convert the first argument to
# argument['id'], so expect that in the actual db call
e_args = tuple([args[0]['id']] + list(args[1:]))
elif method == 'security_group_get_by_instance':
e_args = tuple([args[0]['uuid']] + list(args[1:]))
else:
e_args = args
getattr(db, method)(self.context, *e_args, **kwargs).AndReturn(
'it worked')
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
class FakeCompute(object):
def __init__(self):
self.conductor_api = mox.MockAnything()
self.db = mox.MockAnything()
self._events = []
self.instance_events = mock.MagicMock()
self.instance_events.prepare_for_instance_event.side_effect = \
self._prepare_for_instance_event
def _event_waiter(self):
event = mock.MagicMock()
event.status = 'completed'
return event
def _prepare_for_instance_event(self, instance, event_name):
m = mock.MagicMock()
m.instance = instance
m.event_name = event_name
m.wait.side_effect = self._event_waiter
self._events.append(m)
return m
class ComputeVirtAPITest(VirtAPIBaseTest):
cover_api = compute_manager.ComputeVirtAPI
def set_up_virtapi(self):
self.compute = FakeCompute()
self.virtapi = compute_manager.ComputeVirtAPI(self.compute)
def assertExpected(self, method, *args, **kwargs):
self.mox.StubOutWithMock(self.compute.conductor_api, method)
getattr(self.compute.conductor_api, method)(
self.context, *args, **kwargs).AndReturn('it worked')
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
def test_wait_for_instance_event(self):
and_i_ran = ''
event_1_tag = external_event_obj.InstanceExternalEvent.make_key(
'event1')
event_2_tag = external_event_obj.InstanceExternalEvent.make_key(
'event2', 'tag')
events = {
'event1': event_1_tag,
('event2', 'tag'): event_2_tag,
}
with self.virtapi.wait_for_instance_event('instance', events.keys()):
and_i_ran = 'I ran so far a-waa-y'
self.assertEqual('I ran so far a-waa-y', and_i_ran)
self.assertEqual(2, len(self.compute._events))
for event in self.compute._events:
self.assertEqual('instance', event.instance)
self.assertIn(event.event_name, events.values())
event.wait.assert_called_once_with()
def test_wait_for_instance_event_failed(self):
def _failer():
event = mock.MagicMock()
event.status = 'failed'
return event
@mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
def do_test():
with self.virtapi.wait_for_instance_event('instance', ['foo']):
pass
self.assertRaises(exception.NovaException, do_test)
def test_wait_for_instance_event_failed_callback(self):
def _failer():
event = mock.MagicMock()
event.status = 'failed'
return event
@mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
def do_test():
callback = mock.MagicMock()
with self.virtapi.wait_for_instance_event('instance', ['foo'],
error_callback=callback):
pass
callback.assert_called_with('foo', 'instance')
do_test()
def test_wait_for_instance_event_timeout(self):
class TestException(Exception):
pass
def _failer():
raise TestException()
@mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
@mock.patch('eventlet.timeout.Timeout')
def do_test(timeout):
with self.virtapi.wait_for_instance_event('instance', ['foo']):
pass
self.assertRaises(TestException, do_test)
| tanglei528/nova | nova/tests/compute/test_virtapi.py | Python | apache-2.0 | 6,743 |
#!/ur/bin/python
import numpy as np
import pandas as pd
import os
import argparse
import matplotlib.pyplot as plt
from molmod.constants import boltzmann
from molmod.io.xyz import XYZFile
from molmod.ic import bend_angle
from molmod.ic import bond_length
from scipy.optimize import curve_fit
import json
def main(file_name, parameters, start_step, end_step, temp):
"""
Loads molecular geometries, generates file with time evolution
of given angles and bonds. Prints force parameters for a
harmonic oscillator that reproduces the behavior of
each bond/angle to use as input for biased simulations
"""
# Timestep in fs
timestep = 0.5
# Create output directory
out_dir = "output_txt/"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Create trajectory object and store the geometries in numpy array
xyz_file = XYZFile(file_name)
geometries = xyz_file.geometries[start_step:end_step]
# Read atom list from input file (input is 1-based indexing)
with open(parameters, 'r') as f:
atoms_input = json.load(f)
atoms = [np.array(a) - 1 for a in atoms_input]
# Calculate bonds and angles
time = (np.arange(geometries.shape[0]) + 1) * timestep
bonds_angles = [get_bonds_angles(geometries, i) for i in atoms]
labels = [convert_label(i) for i in atoms_input]
# Compute histograms and saves results
for i, qty in enumerate(bonds_angles):
all_distr, coefficients = generate_histogram(qty, temp)
np.savetxt("{}{}-hist.dat".format(out_dir, labels[i]), all_distr)
np.savetxt("{}{}-time.dat".format(out_dir,
labels[i]), np.stack((time, qty)).transpose())
np.savetxt("{}{}-coeff.dat".format(out_dir,
labels[i]),
coefficients,
fmt='%1.3f',
header='x0, sigma, k, R2')
plot_all(all_distr, qty, coefficients, atoms_input[i], time)
# Store in a pandas dataframe for further analysis (to do)
all_data = pd.DataFrame(
data=np.stack(bonds_angles).transpose(),
index=time,
columns=labels)
all_data.to_csv("{}all_data.dat".format(out_dir), sep='\t')
def generate_histogram(colvar, temp):
"""
Calculates a histogram from a quantity evolution during the simulation
"""
# Define the histogram and shift the bins to have data on the centres
hist, bin_edges = np.histogram(colvar, bins=50, density=True)
bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2
histogram = np.stack((bin_centres, hist))
gaussian_distr, oscillator_distr, coefficients = fit_distribution(
histogram, temp)
all_distr = np.stack(
(bin_centres,
hist,
gaussian_distr,
oscillator_distr),
axis=1)
return all_distr, coefficients
def fit_distribution(data, temp, p0=[2, 0.1]):
"""
Takes histogram and bins of same shape and returns a fitted gaussian distribution
and the force constants of the corresponding harmonic oscillator
"""
bins, hist = data[0], data[1]
coeff, var_matrix = curve_fit(gaussian_distribution, bins, hist, p0=p0)
# Fit a gaussian distribution to the bond/angle distribution
# We do not fit directly the oscillator distribution because sometimes the
# fit fails
gauss_fit = gaussian_distribution(bins, *coeff)
# Obtain force constant for the oscillator that generates this distribution
# append it to the coefficient list.
# sigma = sqrt(k/2pi kb T)
kb = boltzmann # kb in hartree
k = (temp * kb) / (coeff[1]**2)
# Calculate R^2
residuals = hist - gauss_fit
ss_res = np.sum(residuals**2)
ss_tot = np.sum((hist - np.mean(hist))**2)
r_squared = 1 - (ss_res / ss_tot)
all_coefficients = np.append(coeff, [k, r_squared])
# Check if k replicates the real distribution
coeff_distr = k, coeff[0], temp
osc_distr = oscillator_distribution(bins, *coeff_distr)
return gauss_fit, osc_distr, all_coefficients
def gaussian_distribution(x, *p):
"""
Defines a gaussian function to fit data. A = 1 / np.sqrt(2 * np.pi * sigma**2)
"""
mu, sigma = p
return (1 / (np.sqrt(2 * np.pi) * sigma)) * \
np.exp(-(x - mu)**2 / (2. * sigma**2))
def oscillator_distribution(x, *p):
"""
Define the probability distribution function at given k and T for testing purposes.
This distribution should be the same as the gaussian one.
"""
k, x0, temp = p
kb = boltzmann
return(1 / (np.sqrt(2 * np.pi * kb * temp / k)) *
np.exp(-(k / (2 * kb * temp)) * (x - x0)**2))
def get_bonds_angles(geometries, atoms):
"""
This functions takes an array with the geometries, and a list with a group of atoms
and returns the bond or angle evolution during the simulation
"""
number_of_steps, number_of_atoms, _ = geometries.shape
colvar = np.empty(number_of_steps)
if len(atoms) == 2:
for frame in range(number_of_steps):
colvar[frame] = bond_length(geometries[frame, atoms])[0]
elif len(atoms) == 3:
for frame in range(number_of_steps):
colvar[frame] = bend_angle(geometries[frame, atoms])[0]
return colvar
def convert_label(colvar):
"""
Generates a label with the list of atoms [70, 170] --> Bond_70_170
"""
if len(colvar) == 3:
label = "angle_"
else:
label = "bond_"
lab = '_'.join(str(x) for x in colvar)
return label + lab
def plot_all(all_distr, qty, coefficients, atoms, time):
"""
Plots the time evolution and the distribution + fit
"""
plt.style.use('default')
fig, (p1, p2) = plt.subplots(1, 2, figsize=(
10, 2.5), gridspec_kw={'width_ratios': [3, 1]})
# Define names for the axes and plots depending on the atoms
unit = 'Angle (rad)' if len(atoms) == 3 else 'Bond (a.u.)'
namefile = convert_label(atoms)
name = ' '.join(namefile.split('_'))
# Plot with the time evolution of the bond/length
p1.set_xlabel('Time (ps)')
p1.set_ylabel(unit)
p1.set_title('Time evolution of {}'.format(name))
p1.plot(time * 0.001, qty)
# Plot with the distribution + fit
p2.set_xlabel('Distribution')
p2.axes.get_yaxis().set_visible(False)
p2.plot(all_distr[:, 1], all_distr[:, 0])
p2.plot(all_distr[:, 2], all_distr[:, 0])
# Annotate the values for the distribution
textstr = '\n'.join((
r'$\mu=%.2f$' % (coefficients[0]),
r'$\sigma=%.2f$' % (coefficients[1]),
r'$k=%.2f$' % (coefficients[2])))
p2.text(0.7, 0.95, textstr, transform=p2.transAxes, fontsize=10,
verticalalignment='top', bbox=dict(color='orange', alpha=0.7))
plt.tight_layout()
plt.subplots_adjust(wspace=0.02)
plt.savefig("{}.png".format(namefile))
if __name__ == "__main__":
msg = "angle_bond -i <path/to/trajectory> -p <parameter file> -st <start frame> -et <end frame> -t <temperature>"
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('-i', required=True, help='path to the xyz trajectory')
parser.add_argument(
'-p',
required=False,
default='atoms.txt',
help='path to the parameters file')
parser.add_argument(
'-st',
required=False,
default=0,
type=int,
help='starting time of the simulation (default=0)')
parser.add_argument('-et', required=False, default=-
1, type=int, help='ending time of the simulation')
parser.add_argument('-t', required=False, default=298, help='temperature')
args = parser.parse_args()
main(args.i, args.p, args.st, args.et, args.t)
| ccaratelli/insertion_deletion | angle_bond.py | Python | gpl-3.0 | 7,768 |
import datetime
from django.forms.utils import flatatt, pretty_name
from django.forms.widgets import Textarea, TextInput
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
__all__ = ('BoundField',)
@html_safe
class BoundField:
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __str__(self):
"""Render this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
@cached_property
def subwidgets(self):
"""
Most widgets yield a single subwidget, but others like RadioSelect and
CheckboxSelectMultiple produce one subwidget for each choice.
This property is cached so that only one database query occurs when
rendering ModelChoiceFields.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
attrs = self.build_widget_attrs(attrs)
return [
BoundWidget(self.field.widget, widget, self.form.renderer)
for widget in self.field.widget.subwidgets(self.html_name, self.value(), attrs=attrs)
]
def __bool__(self):
# BoundField evaluates to True even if it doesn't have subwidgets.
return True
def __iter__(self):
return iter(self.subwidgets)
def __len__(self):
return len(self.subwidgets)
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, (int, slice)):
raise TypeError(
'BoundField indices must be integers or slices, not %s.'
% type(idx).__name__
)
return self.subwidgets[idx]
@property
def errors(self):
"""
Return an ErrorList (empty if there are no errors) for this field.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Render the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If a widget isn't specified, use the
field's default widget.
"""
widget = widget or self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
attrs = self.build_widget_attrs(attrs, widget)
if self.auto_id and 'id' not in widget.attrs:
attrs.setdefault('id', self.html_initial_id if only_initial else self.auto_id)
return widget.render(
name=self.html_initial_name if only_initial else self.html_name,
value=self.value(),
attrs=attrs,
renderer=self.form.renderer,
)
def as_text(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"""Return a string of HTML for representing this as a <textarea>."""
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Return the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Return the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
data = self.initial
if self.form.is_bound:
data = self.field.bound_data(self.data, data)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wrap the given contents in a <label>, if the field has an ID attribute.
contents should be mark_safe'd to avoid HTML escaping. If contents
aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <label> tag.
label_suffix overrides the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{}{}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = {**(attrs or {}), 'for': id_for_label}
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{}>{}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Return a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"""Return True if this BoundField's widget is hidden."""
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculate and return the ID attribute for this BoundField, if the
associated Form has specified auto_id. Return an empty string otherwise.
"""
auto_id = self.form.auto_id # Boolean or string
if auto_id and '%s' in str(auto_id):
return auto_id % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
@cached_property
def initial(self):
data = self.form.get_initial_for_field(self.field, self.name)
# If this is an auto-generated default date, nix the microseconds for
# standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not self.field.widget.supports_microseconds):
data = data.replace(microsecond=0)
return data
def build_widget_attrs(self, attrs, widget=None):
widget = widget or self.field.widget
attrs = dict(attrs) # Copy attrs to avoid modifying the argument.
if widget.use_required_attribute(self.initial) and self.field.required and self.form.use_required_attribute:
attrs['required'] = True
if self.field.disabled:
attrs['disabled'] = True
return attrs
@html_safe
class BoundWidget:
"""
A container class used for iterating over widgets. This is useful for
widgets that have choices. For example, the following can be used in a
template:
{% for radio in myform.beatles %}
<label for="{{ radio.id_for_label }}">
{{ radio.choice_label }}
<span class="radio">{{ radio.tag }}</span>
</label>
{% endfor %}
"""
def __init__(self, parent_widget, data, renderer):
self.parent_widget = parent_widget
self.data = data
self.renderer = renderer
def __str__(self):
return self.tag(wrap_label=True)
def tag(self, wrap_label=False):
context = {'widget': {**self.data, 'wrap_label': wrap_label}}
return self.parent_widget._render(self.template_name, context, self.renderer)
@property
def template_name(self):
if 'template_name' in self.data:
return self.data['template_name']
return self.parent_widget.template_name
@property
def id_for_label(self):
return 'id_%s_%s' % (self.data['name'], self.data['index'])
@property
def choice_label(self):
return self.data['label']
| georgemarshall/django | django/forms/boundfield.py | Python | bsd-3-clause | 10,103 |
# -*- coding: utf-8 -*-
# import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import time
from selenium.webdriver.support.ui import Select
"""
Usage:
python sele_chrome.py
"""
#Config Sele_Chrome at DAIWA
"""
Do not change this config.
If you use this code for NIKKO, please copy this file.
"""
url = "https://lzone.daiwa.co.jp/lzone/"
username = "shinichiro.ueno@gci.jp"
password = "gcigci"
ticker = 3382
period_from = "2011/04/01"
period_to = "2017/04/28"
file_type = 2
#Set WebDriver Chrome
driver = webdriver.Chrome(executable_path = '/home/gci/Downloads/chromedriver') #PATH
#wait = WebDriverWait(driver,10) #Prototype
#Run
driver.get(url)
print driver.current_url
time.sleep(5)
driver.find_element_by_id('ticker').send_keys(Keys.ENTER)
driver.find_element_by_id('input-text').send_keys(ticker)
driver.find_element_by_id('input-btn-se').send_keys(Keys.ENTER)
time.sleep(3)
driver.save_screenshot('DAIWA_test1.png')
print driver.current_url
driver.save_screenshot('DAIWA_test2.png')
driver.find_element_by_css_selector('input[name="memberId"]').send_keys(username)
driver.find_element_by_css_selector('input[name="passWord"]').send_keys(password)
driver.save_screenshot('DAIWA_test3.png')
driver.find_element_by_id('image-btn_ok').send_keys(Keys.ENTER)
driver.save_screenshot('DAIWA_test4.png')
time.sleep(3)
driver.find_element_by_id('ticker').send_keys(Keys.ENTER)
driver.find_element_by_id('input-text').send_keys(ticker)
driver.find_element_by_id('input-btn-ad').send_keys(Keys.ENTER)
driver.save_screenshot('DAIWA_test5.png')
time.sleep(3)
driver.save_screenshot('DAIWA_test6.png')
elements = driver.find_elements_by_css_selector("input[type ='radio'][value='equity']")
for element in elements:
element.click()
driver.find_element_by_name('model.tickerCd').send_keys(ticker)
#driver.find_element_by_id('model_periodFrom').send_keys(Keys.CONTROL,"a")
driver.find_element_by_id('date1').send_keys(10*Keys.BACKSPACE)
driver.find_element_by_id('date1').send_keys(period_from)
select = Select(driver.find_element_by_name('model.docType'))
select.select_by_value("2")
time.sleep(2)
#all_options = element.find_elements_by_tag_name("option")
#all_options[1].click()
driver.find_element_by_id('image-btn_search').send_keys(Keys.ENTER)
time.sleep(10)
driver.save_screenshot('DAIWA_test7.png')
driver.close()
| oniwan/GCI | sele_chrome.py | Python | mit | 2,504 |
from django.test import TestCase
from geotrek.infrastructure.factories import InfrastructureFactory, SignageFactory
from geotrek.maintenance.factories import InterventionFactory, ProjectFactory
from geotrek.core.factories import TopologyFactory, PathAggregationFactory
from geotrek.land.factories import (SignageManagementEdgeFactory, WorkManagementEdgeFactory,
CompetenceEdgeFactory)
from geotrek.zoning.factories import (CityEdgeFactory, DistrictEdgeFactory,
RestrictedAreaEdgeFactory)
class ProjectTest(TestCase):
def test_helpers(self):
i1 = InterventionFactory.create()
i2 = InterventionFactory.create()
i3 = InterventionFactory.create()
sign = SignageFactory.create()
i1.set_infrastructure(sign)
p1 = sign.paths.get()
infra = InfrastructureFactory.create()
i2.set_infrastructure(infra)
p2 = infra.paths.get()
t = TopologyFactory.create(no_path=True)
PathAggregationFactory.create(topo_object=t, path=p1)
i3.topology = t
proj = ProjectFactory.create()
self.assertItemsEqual(proj.paths.all(), [])
self.assertEquals(proj.signages, [])
self.assertEquals(proj.infrastructures, [])
proj.interventions.add(i1)
self.assertItemsEqual(proj.paths.all(), [p1])
self.assertEquals(proj.signages, [sign])
self.assertEquals(proj.infrastructures, [])
proj.interventions.add(i2)
self.assertItemsEqual(proj.paths.all(), [p1, p2])
self.assertEquals(proj.signages, [sign])
self.assertEquals(proj.infrastructures, [infra])
proj.interventions.add(i3)
self.assertItemsEqual(proj.paths.all(), [p1, p2])
self.assertEquals(proj.signages, [sign])
self.assertEquals(proj.infrastructures, [infra])
def test_deleted_intervention(self):
i1 = InterventionFactory.create()
sign = SignageFactory.create()
i1.set_infrastructure(sign)
proj = ProjectFactory.create()
proj.interventions.add(i1)
self.assertEquals(proj.signages, [sign])
i1.delete()
self.assertEquals(proj.signages, [])
def test_deleted_infrastructure(self):
i1 = InterventionFactory.create()
infra = InfrastructureFactory.create()
i1.set_infrastructure(infra)
proj = ProjectFactory.create()
proj.interventions.add(i1)
self.assertEquals(proj.infrastructures, [infra])
infra.delete()
self.assertEquals(proj.infrastructures, [])
class ProjectLandTest(TestCase):
def setUp(self):
self.intervention = InterventionFactory.create()
self.project = ProjectFactory.create()
self.project.interventions.add(self.intervention)
self.project.interventions.add(InterventionFactory.create())
infra = InfrastructureFactory.create()
self.intervention.set_infrastructure(infra)
self.intervention.save()
path = infra.paths.get()
self.signagemgt = SignageManagementEdgeFactory.create(no_path=True)
self.signagemgt.add_path(path, start=0.3, end=0.7)
self.workmgt = WorkManagementEdgeFactory.create(no_path=True)
self.workmgt.add_path(path, start=0.3, end=0.7)
self.competencemgt = CompetenceEdgeFactory.create(no_path=True)
self.competencemgt.add_path(path, start=0.3, end=0.7)
self.cityedge = CityEdgeFactory.create(no_path=True)
self.cityedge.add_path(path, start=0.3, end=0.7)
self.districtedge = DistrictEdgeFactory.create(no_path=True)
self.districtedge.add_path(path, start=0.3, end=0.7)
self.restricted = RestrictedAreaEdgeFactory.create(no_path=True)
self.restricted.add_path(path, start=0.3, end=0.7)
def test_project_has_signage_management(self):
self.assertIn(self.signagemgt, self.intervention.signage_edges)
self.assertIn(self.signagemgt, self.project.signage_edges)
def test_project_has_work_management(self):
self.assertIn(self.workmgt, self.intervention.work_edges)
self.assertIn(self.workmgt, self.project.work_edges)
def test_project_has_competence_management(self):
self.assertIn(self.competencemgt, self.intervention.competence_edges)
self.assertIn(self.competencemgt, self.project.competence_edges)
def test_project_has_city_management(self):
self.assertIn(self.cityedge, self.intervention.city_edges)
self.assertIn(self.cityedge, self.project.city_edges)
self.assertIn(self.cityedge.city, self.project.cities)
def test_project_has_district_management(self):
self.assertIn(self.districtedge, self.intervention.district_edges)
self.assertIn(self.districtedge, self.project.district_edges)
self.assertIn(self.districtedge.district, self.project.districts)
def test_project_has_restricted_management(self):
self.assertIn(self.restricted, self.intervention.area_edges)
self.assertIn(self.restricted, self.project.area_edges)
self.assertIn(self.restricted.restricted_area, self.project.areas)
| mabhub/Geotrek | geotrek/maintenance/tests/test_project.py | Python | bsd-2-clause | 5,195 |
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# Helper modules to configure freevo using wxPython
# -----------------------------------------------------------------------
# $Id$
#
# Notes:
# Work-in-progress
# Todo:
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
import sys, os
import re
from pprint import pprint, pformat
import config
import event
from helpers import plugins
#print('config=%s' % pformat(dir(config)))
#print('our_locals=%s' % (pformat(config.our_locals),))
print('freevo_config is %r' % (os.environ['FREEVO_CONFIG'],))
print('local_conf is %r' % (config.overridefile,))
def parse_freevo_config(filename):
"""
Parse the file for variables.
"""
items = {}
try:
fd = open(filename)
except IOError, why:
print why
raise SystemExit
# create a list of non-blank lines
#lines = [i.strip() for i in fd.readlines() if i.strip() ]
# create a list of stripped lines
lines = [i.rstrip() for i in fd.readlines() ]
fd.close()
# skip change set
for i in range(len(lines)):
if lines[i] == '# ======================================================================':
i += 1
break
#print i
#print lines[i:i+1]
for j in range(len(lines[i:])):
if lines[i+j] == '# ======================================================================':
i += j + 1
break
#print j
#print i
note_pat = re.compile('^#\s*(.*)$')
tip_pat = re.compile('^([A-Z]\w+)\s*=\s*[^#]+#\s*(.*)$')
tipmore_pat = re.compile('^\s+#\s*(.*)$')
var_pat = re.compile('^\s*([A-Z]\w+)\s*=\s*[^#]+$')
comment_pat = re.compile('^\s*#\s*[=-]+$')
plugin_pat = re.compile('^\s*plugin.*$')
# now we should be at the first config line
notes = []
tips = []
for j in range(len(lines[i:])):
line = lines[i+j]
if not line:
continue
#print 'line=%s' % line
comment_mat = comment_pat.match(line)
plugin_mat = plugin_pat.match(line)
if comment_mat or plugin_mat:
continue
# what do we do with if/else blocks?
note_mat = note_pat.match(line)
tip_mat = tip_pat.match(line)
tipmore_mat = tipmore_pat.match(line)
var_mat = var_pat.match(line)
#print('note_mat=%r tip_mat=%r tipmore_mat=%r var_mat=%r' % (note_mat, tip_mat, tipmore_mat, var_mat))
if note_mat:
note = note_mat.group(1)
if note:
notes.append(note)
elif tipmore_mat:
tip = tipmore_mat.group(1)
if tip:
tips.append(tip)
elif tip_mat:
if tips:
if var:
tip, note = items[var]
note = ' '.join([tip] + tips)
tip = ''
items[var] = (tip, note)
else:
print 'no var for %r' % (tips,)
var = tip_mat.group(1)
tip = tip_mat.group(2)
if tip and not notes:
notes = [tip]
tip = ''
items[var] = (tip, ' '.join(notes))
notes = []
tips = []
elif var_mat:
if tips:
if var:
tip, note = items[var]
note = ' '.join([tip] + tips)
tip = ''
items[var] = (tip, note)
else:
print 'no var for %r' % (tips,)
var = var_mat.group(1)
items[var] = ('', ' '.join(notes))
notes = []
tips = []
else:
#print('***=%s' % line)
pass
return items
def build_config(doc_items):
config_var_pat = re.compile('^[A-Z].*$')
items = []
for var in dir(config):
if config_var_pat.match(var) is None:
continue
if var in ('LOCAL_CONF_CHANGES', 'EVENTS'):
continue
#print 'var=%r' % (var,)
if var in doc_items:
tip = doc_items[var][0]
note = doc_items[var][1]
else:
tip = ''
note = ''
if var in config.our_locals:
overridden = True
value = config.our_locals[var]
else:
overridden = False
try:
value = eval('config.%s' % var)
except AttributeError, why:
print '%r: %s' % (var, why)
value = None
if isinstance(value, event.Event):
continue
#print('%s%r=%r' % ('*** ' if overridden else ' ', var, value))
items.append((var, value, overridden, tip, note))
return items
def build_plugin_list():
return plugins.parse_plugins()
def main():
print 'building documentation from freevo_config...'
doc_items = parse_freevo_config(os.environ['FREEVO_CONFIG'])
#print('doc_items=\n%s' % pformat(doc_items))
print 'building configuration...'
cfg_items = build_config(doc_items)
#print('cfg_items=\n%s' % pformat(cfg_items))
print 'building plug-in configuration...'
plugin_items = build_plugin_list()
if __name__ == '__main__':
try:
main()
except StandardException, why:
print why
| freevo/freevo1 | src/helpers/wxconfig.py | Python | gpl-2.0 | 6,335 |
import os
import sys
import time
import logging
import datetime
import numpy as np
from data import *
from time import clock
from parameters import *
from collections import defaultdict
spike_generators = {} # dict name_part : spikegenerator
spike_detectors = {} # dict name_part : spikedetector
multimeters = {} # dict name_part : multimeter
startsimulate = 0
endsimulate = 0
save_path = ""
txt_result_path = "" # path for txt results
all_parts = tuple() # tuple of all parts
MaxSynapses = 4000 # max synapses
SYNAPSES = 0 # synapse number
NEURONS = 0 # neurons number
times = [] # store time simulation
logging.basicConfig(format='%(name)s.%(levelname)s: %(message)s.', level=logging.DEBUG)
logger = logging.getLogger('function')
def getAllParts():
return all_parts
def generate_neurons(NNumber):
global NEURONS, all_parts
logger.debug("* * * Start generate neurons")
parts_no_dopa = PGI + BNST + Amy + Thalamus + PVN + LTD + PrH + AcbCore + Prefrontalcortex + AcbShell +\
Motorcortex + (LC[LC_Ach],LC[LC_GABA], RN[RN_a1], RN[RN_a2], VTA[VTA_a1])
parts_with_dopa = (LC[LC_NA_0], LC[LC_D1], LC[LC_D2], LC[LC_NA_1], RN[RN_5HT], Ab[Ab_NA], VTA[VTA_DA_0],\
VTA[VTA_DA_1], Aa[Aa_NA])
all_parts = tuple(sorted(parts_no_dopa + parts_with_dopa))
NN_coef = float(NNumber) / sum(item[k_NN] for item in all_parts)
for part in all_parts:
part[k_NN] = NN_minimal if int(part[k_NN] * NN_coef) < NN_minimal else int(part[k_NN] * NN_coef)
NEURONS = sum(item[k_NN] for item in all_parts)
logger.debug('Initialized: {0} neurons'.format(NEURONS))
# Init neuron models with our parameters
nest.SetDefaults('iaf_psc_exp', iaf_neuronparams)
nest.SetDefaults('iaf_psc_alpha', iaf_neuronparams)
# Parts without dopamine
for part in parts_no_dopa:
part[k_model] = 'iaf_psc_exp'
# Parts with dopamine
for part in parts_with_dopa:
part[k_model] = 'iaf_psc_alpha'
# Creating neurons
for part in all_parts:
part[k_IDs] = nest.Create(part[k_model], part[k_NN])
logger.debug("{0} [{1}, {2}] {3} neurons".format(part[k_name], part[k_IDs][0], part[k_IDs][-1:][0], part[k_NN]))
def log_connection(pre, post, syn_type, weight):
global SYNAPSES
connections = pre[k_NN] * post[k_NN] if post[k_NN] < MaxSynapses else pre[k_NN] * MaxSynapses
SYNAPSES += connections
logger.debug("{0} -> {1} ({2}) w[{3}] // "
"{4}x{5}={6} synapses".format(pre[k_name], post[k_name], syn_type[:-8], weight, pre[k_NN],
MaxSynapses if post[k_NN] > MaxSynapses else post[k_NN], connections))
def connect(pre, post, syn_type=GABA, weight_coef=1):
# Set new weight value (weight_coef * basic weight)
nest.SetDefaults(synapses[syn_type][model], {'weight': weight_coef * synapses[syn_type][basic_weight]})
# Create dictionary of connection rules
conn_dict = {'rule': 'fixed_outdegree',
'outdegree': MaxSynapses if post[k_NN] > MaxSynapses else post[k_NN],
'multapses': True}
# Connect PRE IDs neurons with POST IDs neurons, add Connection and Synapse specification
nest.Connect(pre[k_IDs], post[k_IDs], conn_spec=conn_dict, syn_spec=synapses[syn_type][model])
# Show data of new connection
log_connection(pre, post, synapses[syn_type][model], nest.GetDefaults(synapses[syn_type][model])['weight'])
def connect_generator(part, startTime=1, stopTime=T, rate=250, coef_part=1):
name = part[k_name]
# Add to spikeGenerators dict a new generator
spike_generators[name] = nest.Create('poisson_generator', 1, {'rate' : float(rate),
'start': float(startTime),
'stop' : float(stopTime)})
# Create dictionary of connection rules
conn_dict = {'rule': 'fixed_outdegree',
'outdegree': int(part[k_NN] * coef_part)}
# Connect generator and part IDs with connection specification and synapse specification
nest.Connect(spike_generators[name], part[k_IDs], conn_spec=conn_dict, syn_spec=static_syn)
# Show data of new generator
logger.debug("Generator => {0}. Element #{1}".format(name, spike_generators[name][0]))
def connect_detector(part):
name = part[k_name]
# Init number of neurons which will be under detector watching
number = part[k_NN] if part[k_NN] < N_detect else N_detect
# Add to spikeDetectors a new detector
spike_detectors[name] = nest.Create('spike_detector', params=detector_param)
# Connect N first neurons ID of part with detector
nest.Connect(part[k_IDs][:number], spike_detectors[name])
# Show data of new detector
logger.debug("Detector => {0}. Tracing {1} neurons".format(name, number))
def connect_multimeter(part):
name = part[k_name]
multimeters[name] = nest.Create('multimeter', params=multimeter_param) # ToDo add count of multimeters
nest.Connect(multimeters[name], (part[k_IDs][:N_volt]))
logger.debug("Multimeter => {0}. On {1}".format(name, part[k_IDs][:N_volt]))
'''Generates string full name of an image'''
def f_name_gen(path, name):
return "{0}{1}{2}.png".format(path, name, "+dopa" if dopamine_flag else "")
def simulate():
global startsimulate, endsimulate, save_path
begin = 0
save_path = "../results/output-{0}/".format(NEURONS)
if not os.path.exists(save_path):
os.makedirs(save_path)
nest.PrintNetwork()
logger.debug('* * * Simulating')
startsimulate = datetime.datetime.now()
for t in np.arange(0, T, dt):
print "SIMULATING [{0}, {1}]".format(t, t + dt)
nest.Simulate(dt)
end = clock()
times.append("{0:10.1f} {1:8.1f} "
"{2:10.1f} {3:4.1f} {4}\n".format(begin, end - begin, end, t, datetime.datetime.now().time()))
begin = end
print "COMPLETED {0}%\n".format(t/dt)
endsimulate = datetime.datetime.now()
logger.debug('* * * Simulation completed successfully')
def get_log(startbuild, endbuild):
logger.info("Number of neurons : {}".format(NEURONS))
logger.info("Number of synapses : {}".format(SYNAPSES))
logger.info("Building time : {}".format(endbuild - startbuild))
logger.info("Simulation time : {}".format(endsimulate - startsimulate))
logger.info("Dopamine : {}".format('YES' if dopamine_flag else 'NO'))
logger.info("Noise : {}".format('YES' if generator_flag else 'NO'))
def save(GUI):
global txt_result_path
if GUI:
import pylab as pl
import nest.raster_plot
import nest.voltage_trace
logger.debug("Saving IMAGES into {0}".format(save_path))
N_events_gen = len(spike_generators)
for key in spike_detectors:
try:
nest.raster_plot.from_device(spike_detectors[key], hist=True)
pl.savefig(f_name_gen(save_path, "spikes_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print("From {0} is NOTHING".format(key))
N_events_gen -= 1
for key in multimeters:
try:
nest.voltage_trace.from_device(multimeters[key])
pl.savefig(f_name_gen(save_path, "volt_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print("From {0} is NOTHING".format(key))
print "Results {0}/{1}".format(N_events_gen, len(spike_detectors))
print "Results {0}/{1}".format(N_events_gen, len(spike_detectors))
txt_result_path = save_path + 'txt/'
logger.debug("Saving TEXT into {0}".format(txt_result_path))
if not os.path.exists(txt_result_path):
os.mkdir(txt_result_path)
for key in spike_detectors:
save_spikes(spike_detectors[key], name=key)
#for key in multimeters:
# save_voltage(multimeters[key], name=key)
with open(txt_result_path + 'timeSimulation.txt', 'w') as f:
for item in times:
f.write(item)
def save_spikes(detec, name, hist=False):
title = "Raster plot from device '%i'" % detec[0]
ev = nest.GetStatus(detec, "events")[0]
ts = ev["times"]
gids = ev["senders"]
data = defaultdict(list)
if len(ts):
with open("{0}@spikes_{1}.txt".format(txt_result_path, name), 'w') as f:
f.write("Name: {0}, Title: {1}, Hist: {2}\n".format(name, title, "True" if hist else "False"))
for num in range(0, len(ev["times"])):
data[round(ts[num], 1)].append(gids[num])
for key in sorted(data.iterkeys()):
f.write("{0:>5} : {1:>4} : {2}\n".format(key, len(data[key]), sorted(data[key])))
else:
print "Spikes in {0} is NULL".format(name)
def save_voltage(detec, name):
title = "Membrane potential"
ev = nest.GetStatus(detec, "events")[0]
with open("{0}@voltage_{1}.txt".format(txt_result_path, name), 'w') as f:
f.write("Name: {0}, Title: {1}\n".format(name, title))
print int(T / multimeter_param['interval'])
for line in range(0, int(T / multimeter_param['interval'])):
for index in range(0, N_volt):
print "{0} {1} ".format(ev["times"][line], ev["V_m"][line])
#f.write("\n")
print "\n" | vitaliykomarov/NEUCOGAR | nest/noradrenaline/scripts/func.py | Python | gpl-2.0 | 9,514 |
from itertools import dropwhile, takewhile, islice
import re
import subprocess
from thefuck.utils import replace_command, for_app
from thefuck.specific.sudo import sudo_support
@sudo_support
@for_app('docker')
def match(command):
return 'is not a docker command' in command.stderr
def get_docker_commands():
proc = subprocess.Popen('docker', stdout=subprocess.PIPE)
lines = [line.decode('utf-8') for line in proc.stdout.readlines()]
lines = dropwhile(lambda line: not line.startswith('Commands:'), lines)
lines = islice(lines, 1, None)
lines = list(takewhile(lambda line: line != '\n', lines))
return [line.strip().split(' ')[0] for line in lines]
@sudo_support
def get_new_command(command):
wrong_command = re.findall(
r"docker: '(\w+)' is not a docker command.", command.stderr)[0]
return replace_command(command, wrong_command, get_docker_commands())
| redreamality/thefuck | thefuck/rules/docker_not_command.py | Python | mit | 904 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from .base import BaseAPITestCase
from contentcuration.models import Task
class TaskAPITestCase(BaseAPITestCase):
"""
Test that the Task API endpoints work properly. Note that since various APIs may create a task,
for the unit tests we manually create the db Task object.
"""
def setUp(self):
super(TaskAPITestCase, self).setUp()
self.task_url = '/api/task'
self.task_data = {
'status': 'STARTED',
'task_type': 'YOUTUBE_IMPORT',
'task_id': 'just_a_test',
'user': self.user.pk,
'metadata': {}
}
def create_new_task(self, type, metadata):
"""
Create a new Task object in the DB to simulate the creation of a Celery task and test the Task API.
:param type: A string with a task name constant.
:param metadata: A dictionary containing information about the task. See create_async_task docs for more details.
:return: The created Task object
"""
return Task.objects.create(task_type=type, metadata=metadata, status="STARTED", user=self.user)
def test_get_task(self):
"""
Ensure that GET operations using a Task ID return information about the specified task.
"""
task = self.create_new_task(type='YOUTUBE_IMPORT', metadata={'channel': self.channel.id})
url = '{}/{}'.format(self.task_url, task.id)
response = self.get(url)
self.assertEqual(response.data['status'], 'STARTED')
self.assertEqual(response.data['task_type'], 'YOUTUBE_IMPORT')
self.assertEqual(response.data['metadata'], {'channel': self.channel.id})
def test_get_task_list(self):
task = self.create_new_task(type='YOUTUBE_IMPORT', metadata={'channel': self.channel.id})
url = '{}'.format(self.task_url)
response = self.get(url)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['status'], 'STARTED')
self.assertEqual(response.data[0]['task_type'], 'YOUTUBE_IMPORT')
self.assertEqual(response.data[0]['metadata'], {'channel': self.channel.id})
def test_get_empty_task_list(self):
url = '{}'.format(self.task_url)
response = self.get(url)
self.assertEqual(len(response.data), 0)
def test_cannot_create_task(self):
"""
Tasks are created when Celery operations are started. It is not possible to manually create tasks via
the API, so ensure the API does not create a task.
"""
response = self.post(self.task_url, data=self.task_data)
self.assertEqual(response.status_code, 405)
def test_cannot_update_task(self):
"""
Task state is managed by the Celery async task that created it, so make sure we cannot update the task state
via API.
"""
task = self.create_new_task(type='NONE', metadata={})
url = '{}/{}'.format(self.task_url, task.id)
response = self.put(url, data=self.task_data)
self.assertEqual(response.status_code, 405)
def test_delete_task(self):
"""
Ensure that a call to DELETE the specified task results in its deletion.
"""
task = self.create_new_task(type='YOUTUBE_IMPORT', metadata={'channel': self.channel.id})
url = '{}/{}'.format(self.task_url, task.id)
response = self.get(url)
self.assertEqual(response.status_code, 200)
response = self.delete(url)
self.assertEqual(response.status_code, 204)
response = self.get(url)
self.assertEqual(response.status_code, 404)
| DXCanas/content-curation | contentcuration/contentcuration/tests/test_task_api.py | Python | mit | 3,713 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
from copy import deepcopy
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type
from unittest import mock
from urllib.parse import ParseResult, urlparse
import pytest
import yaml
from _pytest._code import ExceptionInfo
from botocore.exceptions import ClientError
from freezegun import freeze_time
from moto.core import ACCOUNT_ID
from moto.core.exceptions import AWSError
from moto.eks.exceptions import (
InvalidParameterException,
InvalidRequestException,
ResourceInUseException,
ResourceNotFoundException,
)
from moto.eks.models import (
CLUSTER_EXISTS_MSG,
CLUSTER_IN_USE_MSG,
CLUSTER_NOT_FOUND_MSG,
CLUSTER_NOT_READY_MSG,
FARGATE_PROFILE_EXISTS_MSG,
FARGATE_PROFILE_NEEDS_SELECTOR_MSG,
FARGATE_PROFILE_NOT_FOUND_MSG,
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
FARGATE_PROFILE_TOO_MANY_LABELS,
LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG,
LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG,
NODEGROUP_EXISTS_MSG,
NODEGROUP_NOT_FOUND_MSG,
)
from airflow.providers.amazon.aws.hooks.eks import EKSHook
from ..utils.eks_test_constants import (
DEFAULT_CONN_ID,
DEFAULT_NAMESPACE,
DISK_SIZE,
FROZEN_TIME,
INSTANCE_TYPES,
LAUNCH_TEMPLATE,
MAX_FARGATE_LABELS,
NON_EXISTING_CLUSTER_NAME,
NON_EXISTING_FARGATE_PROFILE_NAME,
NON_EXISTING_NODEGROUP_NAME,
PACKAGE_NOT_PRESENT_MSG,
PARTITION,
POD_EXECUTION_ROLE_ARN,
REGION,
REMOTE_ACCESS,
BatchCountSize,
ClusterAttributes,
ClusterInputs,
ErrorAttributes,
FargateProfileAttributes,
FargateProfileInputs,
NodegroupAttributes,
NodegroupInputs,
PossibleTestResults,
RegExTemplates,
ResponseAttributes,
)
from ..utils.eks_test_utils import (
attributes_to_test,
generate_clusters,
generate_dict,
generate_fargate_profiles,
generate_nodegroups,
iso_date,
region_matches_partition,
)
try:
from moto import mock_eks
except ImportError:
mock_eks = None
@pytest.fixture(scope="function")
def cluster_builder():
"""A fixture to generate a batch of EKS Clusters on the mocked backend for testing."""
class ClusterTestDataFactory:
"""A Factory class for building the Cluster objects."""
def __init__(self, count: int, minimal: bool) -> None:
# Generate 'count' number of Cluster objects.
self.cluster_names: List[str] = generate_clusters(
eks_hook=eks_hook, num_clusters=count, minimal=minimal
)
self.existing_cluster_name: str = self.cluster_names[0]
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_cluster() for the first Cluster.
self.cluster_describe_output: Dict = eks_hook.describe_cluster(name=self.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
# Generate a list of the Cluster attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=ClusterInputs, cluster_name=self.existing_cluster_name
)
def _execute(
count: Optional[int] = 1, minimal: Optional[bool] = True
) -> Tuple[EKSHook, ClusterTestDataFactory]:
return eks_hook, ClusterTestDataFactory(count=count, minimal=minimal)
mock_eks().start()
eks_hook = EKSHook(
aws_conn_id=DEFAULT_CONN_ID,
region_name=REGION,
)
yield _execute
mock_eks().stop()
@pytest.fixture(scope="function")
def fargate_profile_builder(cluster_builder):
"""A fixture to generate a batch of EKS Fargate profiles on the mocked backend for testing."""
class FargateProfileTestDataFactory:
"""A Factory class for building the Fargate profile objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name = cluster.existing_cluster_name
# Generate 'count' number of FargateProfile objects.
self.fargate_profile_names = generate_fargate_profiles(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_profiles=count,
minimal=minimal,
)
# Get the name of the first generated profile.
self.existing_fargate_profile_name: str = self.fargate_profile_names[0]
self.nonexistent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_fargate_profiles() for the first profile.
self.fargate_describe_output: Dict = eks_hook.describe_fargate_profile(
clusterName=self.cluster_name, fargateProfileName=self.existing_fargate_profile_name
)[ResponseAttributes.FARGATE_PROFILE]
# Generate a list of the Fargate Profile attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=FargateProfileInputs,
cluster_name=self.cluster_name,
fargate_profile_name=self.existing_fargate_profile_name,
)
def _execute(
count: Optional[int] = 1, minimal: Optional[bool] = True
) -> Tuple[EKSHook, FargateProfileTestDataFactory]:
return eks_hook, FargateProfileTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
@pytest.fixture(scope="function")
def nodegroup_builder(cluster_builder):
"""A fixture to generate a batch of EKS Managed Nodegroups on the mocked backend for testing."""
class NodegroupTestDataFactory:
"""A Factory class for building the Nodegroup objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name: str = cluster.existing_cluster_name
# Generate 'count' number of Nodegroup objects.
self.nodegroup_names: List[str] = generate_nodegroups(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_nodegroups=count,
minimal=minimal,
)
# Get the name of the first generated Nodegroup.
self.existing_nodegroup_name: str = self.nodegroup_names[0]
self.nonexistent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_nodegroup() for the first Nodegroup.
self.nodegroup_describe_output: Dict = eks_hook.describe_nodegroup(
clusterName=self.cluster_name, nodegroupName=self.existing_nodegroup_name
)[ResponseAttributes.NODEGROUP]
# Generate a list of the Nodegroup attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=NodegroupInputs,
cluster_name=self.cluster_name,
nodegroup_name=self.existing_nodegroup_name,
)
def _execute(
count: Optional[int] = 1, minimal: Optional[bool] = True
) -> Tuple[EKSHook, NodegroupTestDataFactory]:
return eks_hook, NodegroupTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
@pytest.mark.skipif(mock_eks is None, reason=PACKAGE_NOT_PRESENT_MSG)
class TestEKSHooks:
def test_hook(self, cluster_builder) -> None:
eks_hook, _ = cluster_builder()
assert eks_hook.get_conn() is not None
assert eks_hook.aws_conn_id == DEFAULT_CONN_ID
assert eks_hook.region_name == REGION
###
# This specific test does not use the fixture since
# it is intended to verify that there are no clusters
# in the list at initialization, which means the mock
# decorator must be used manually in this one case.
###
@mock_eks
def test_list_clusters_returns_empty_by_default(self) -> None:
eks_hook: EKSHook = EKSHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
result: List = eks_hook.list_clusters()
assert isinstance(result, list)
assert len(result) == 0
def test_list_clusters_returns_sorted_cluster_names(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.cluster_names)
result: List = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_clusters_returns_all_results(
self, cluster_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.cluster_names)
result: List = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result)
def test_create_cluster_throws_exception_when_cluster_exists(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_EXISTS_MSG.format(
clusterName=generated_test_data.existing_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_cluster(
name=generated_test_data.existing_cluster_name, **dict(ClusterInputs.REQUIRED)
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new cluster was created.
len_after_test: int = len(eks_hook.list_clusters())
assert len_after_test == initial_batch_size
def test_create_cluster_generates_valid_cluster_arn(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_names,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=generated_test_data.cluster_describe_output[ClusterAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_cluster_generates_valid_cluster_created_timestamp(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_time: str = generated_test_data.cluster_describe_output[ClusterAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_cluster_generates_valid_cluster_endpoint(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_endpoint: str = generated_test_data.cluster_describe_output[ClusterAttributes.ENDPOINT]
assert_is_valid_uri(result_endpoint)
def test_create_cluster_generates_valid_oidc_identity(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_issuer: str = generated_test_data.cluster_describe_output[ClusterAttributes.IDENTITY][
ClusterAttributes.OIDC
][ClusterAttributes.ISSUER]
assert_is_valid_uri(result_issuer)
def test_create_cluster_saves_provided_parameters(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.cluster_describe_output[key] == expected_value
def test_describe_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_returns_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_cluster_removes_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)
result_cluster_list: List = eks_hook.list_clusters()
assert len(result_cluster_list) == (initial_batch_size - 1)
assert generated_test_data.existing_cluster_name not in result_cluster_list
def test_delete_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify nothing was deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == initial_batch_size
def test_list_nodegroups_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_nodegroups_returns_sorted_nodegroup_names(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.nodegroup_names)
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_nodegroups_returns_all_results(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.nodegroup_names)
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_nodegroup_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EKSHook = EKSHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=non_existent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=non_existent_cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_nodegroup_throws_exception_when_nodegroup_already_exists(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = NODEGROUP_EXISTS_MSG.format(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
**dict(NodegroupInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_throws_exception_when_cluster_not_active(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_generates_valid_nodegroup_arn(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.nodegroup_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=generated_test_data.nodegroup_describe_output[NodegroupAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_created_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: str = generated_test_data.nodegroup_describe_output[NodegroupAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_modified_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: str = generated_test_data.nodegroup_describe_output[NodegroupAttributes.MODIFIED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_nodegroup_generates_valid_autoscaling_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_asg_name: str = result_resources[NodegroupAttributes.AUTOSCALING_GROUPS][0][
NodegroupAttributes.NAME
]
assert RegExTemplates.NODEGROUP_ASG_NAME_PATTERN.match(result_asg_name)
def test_create_nodegroup_generates_valid_security_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_security_group: str = result_resources[NodegroupAttributes.REMOTE_ACCESS_SG]
assert RegExTemplates.NODEGROUP_SECURITY_GROUP_NAME_PATTERN.match(result_security_group)
def test_create_nodegroup_saves_provided_parameters(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.nodegroup_describe_output[key] == expected_value
def test_describe_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_nodegroup_throws_exception_when_nodegroup_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_throws_exception_when_nodegroups_exist(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_IN_USE_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no clusters were deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == BatchCountSize.SINGLE
def test_delete_nodegroup_removes_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
result_nodegroup_list: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert len(result_nodegroup_list) == (initial_batch_size - 1)
assert generated_test_data.existing_nodegroup_name not in result_nodegroup_list
def test_delete_nodegroup_returns_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)[ResponseAttributes.NODEGROUP]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_nodegroup_throws_exception_when_nodegroup_not_found(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test: int = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
# If launch_template is specified, you can not specify instanceTypes, diskSize, or remoteAccess.
test_cases = [
# Happy Paths
(LAUNCH_TEMPLATE, None, None, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, None, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, None, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, None, None, PossibleTestResults.SUCCESS),
# Unhappy Paths
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
]
@pytest.mark.parametrize(
"launch_template, instance_types, disk_size, remote_access, expected_result",
test_cases,
)
def test_create_nodegroup_handles_launch_template_combinations(
self,
cluster_builder,
launch_template,
instance_types,
disk_size,
remote_access,
expected_result,
):
eks_hook, generated_test_data = cluster_builder()
nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = InvalidParameterException
expected_message: str = ""
test_inputs = dict(
deepcopy(
# Required Constants
NodegroupInputs.REQUIRED
# Required Variables
+ [
(
ClusterAttributes.CLUSTER_NAME,
generated_test_data.existing_cluster_name,
),
(NodegroupAttributes.NODEGROUP_NAME, nodegroup_name),
]
# Test Case Values
+ [_ for _ in [launch_template, instance_types, disk_size, remote_access] if _]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: Dict = eks_hook.create_nodegroup(**test_inputs)[ResponseAttributes.NODEGROUP]
for key, expected_value in test_inputs.items():
assert result[key] == expected_value
else:
if launch_template and disk_size:
expected_message = LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG
elif launch_template and remote_access:
expected_message = LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG
# Docs say this combination throws an exception but testing shows that
# instanceTypes overrides the launchTemplate instance values instead.
# Leaving here for easier correction if/when that gets fixed.
elif launch_template and instance_types:
pass
if expected_message:
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
def test_list_fargate_profiles_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_fargate_profiles_returns_sorted_profile_names(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.fargate_profile_names)
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_fargate_profiles_returns_all_results(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.fargate_profile_names)
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_fargate_profile_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EKSHook = EKSHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(clusterName=non_existent_cluster_name)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=non_existent_cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_fargate_profile_throws_exception_when_fargate_profile_already_exists(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = FARGATE_PROFILE_EXISTS_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_throws_exception_when_cluster_not_active(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED),
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_generates_valid_profile_arn(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.fargate_profile_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.FARGATE_PROFILE_ARN,
arn_under_test=generated_test_data.fargate_describe_output[FargateProfileAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_fargate_profile_generates_valid_created_timestamp(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
result_time: str = generated_test_data.fargate_describe_output[FargateProfileAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_fargate_profile_saves_provided_parameters(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.fargate_describe_output[key] == expected_value
def test_describe_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_fargate_profile_throws_exception_when_profile_not_found(
self, fargate_profile_builder
) -> None:
client, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
client.describe_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_removes_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(initial_batch_size)
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
result_fargate_profile_list: List = eks_hook.list_fargate_profiles(
clusterName=generated_test_data.cluster_name
)
assert len(result_fargate_profile_list) == (initial_batch_size - 1)
assert generated_test_data.existing_fargate_profile_name not in result_fargate_profile_list
def test_delete_fargate_profile_returns_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_throws_exception_when_fargate_profile_not_found(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
# The following Selector test cases have all been verified against the AWS API using cURL.
selector_formatting_test_cases = [
# Format is ([Selector(s), expected_message, expected_result])
# Happy Paths
# Selector with a Namespace and no Labels
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and an empty collection of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 0),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and one valid Label
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 1),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and the maximum number of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Two valid Selectors
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{FargateProfileAttributes.NAMESPACE: f'{DEFAULT_NAMESPACE}_2'},
],
None,
PossibleTestResults.SUCCESS,
),
# Unhappy Cases
# No Selectors provided
([], FARGATE_PROFILE_NEEDS_SELECTOR_MSG, PossibleTestResults.FAILURE),
# Empty Selector / Selector without a Namespace or Labels
([{}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Selector with labels but no Namespace
(
[{FargateProfileAttributes.LABELS: generate_dict("label", 1)}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Selector with Namespace but too many Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
}
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
# Valid Selector followed by Empty Selector
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}, {}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Valid Selector
(
[{}, {FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Empty Selector
([{}, {}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Valid Selector followed by Selector with Namespace but too many Labels
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
},
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
]
@pytest.mark.parametrize(
"selectors, expected_message, expected_result",
selector_formatting_test_cases,
)
@mock_eks
def test_create_fargate_selectors(self, cluster_builder, selectors, expected_message, expected_result):
client, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = InvalidParameterException
test_inputs = dict(
deepcopy(
# Required Constants
[POD_EXECUTION_ROLE_ARN]
# Required Variables
+ [
(ClusterAttributes.CLUSTER_NAME, cluster_name),
(FargateProfileAttributes.FARGATE_PROFILE_NAME, fargate_profile_name),
]
# Test Case Values
+ [(FargateProfileAttributes.SELECTORS, selectors)]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: List = client.create_fargate_profile(**test_inputs)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in test_inputs.items():
assert result[key] == expected_value
else:
with pytest.raises(ClientError) as raised_exception:
client.create_fargate_profile(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
class TestEKSHook:
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn')
@pytest.mark.parametrize(
"aws_conn_id, region_name, expected_args",
[
[
'test-id',
'test-region',
[
'-m',
'airflow.providers.amazon.aws.utils.eks_get_token',
'--region-name',
'test-region',
'--aws-conn-id',
'test-id',
'--cluster-name',
'test-cluster',
],
],
[
None,
'test-region',
[
'-m',
'airflow.providers.amazon.aws.utils.eks_get_token',
'--region-name',
'test-region',
'--cluster-name',
'test-cluster',
],
],
[
None,
None,
['-m', 'airflow.providers.amazon.aws.utils.eks_get_token', '--cluster-name', 'test-cluster'],
],
],
)
def test_generate_config_file(self, mock_conn, aws_conn_id, region_name, expected_args):
mock_conn.describe_cluster.return_value = {
'cluster': {'certificateAuthority': {'data': 'test-cert'}, 'endpoint': 'test-endpoint'}
}
hook = EKSHook(aws_conn_id=aws_conn_id, region_name=region_name)
with hook.generate_config_file(
eks_cluster_name='test-cluster', pod_namespace='k8s-namespace'
) as config_file:
config = yaml.safe_load(Path(config_file).read_text())
assert config == {
'apiVersion': 'v1',
'kind': 'Config',
'clusters': [
{
'cluster': {'server': 'test-endpoint', 'certificate-authority-data': 'test-cert'},
'name': 'test-cluster',
}
],
'contexts': [
{
'context': {'cluster': 'test-cluster', 'namespace': 'k8s-namespace', 'user': 'aws'},
'name': 'aws',
}
],
'current-context': 'aws',
'preferences': {},
'users': [
{
'name': 'aws',
'user': {
'exec': {
'apiVersion': 'client.authentication.k8s.io/v1alpha1',
'args': expected_args,
'command': sys.executable,
'env': [{'name': 'AIRFLOW__LOGGING__LOGGING_LEVEL', 'value': 'fatal'}],
'interactiveMode': 'Never',
}
},
}
],
}
@mock.patch('airflow.providers.amazon.aws.hooks.eks.RequestSigner')
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn')
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.get_session')
def test_fetch_access_token_for_cluster(self, mock_get_session, mock_conn, mock_signer):
mock_signer.return_value.generate_presigned_url.return_value = 'http://example.com'
mock_get_session.return_value.region_name = 'us-east-1'
hook = EKSHook()
token = hook.fetch_access_token_for_cluster(eks_cluster_name='test-cluster')
mock_signer.assert_called_once_with(
service_id=mock_conn.meta.service_model.service_id,
region_name='us-east-1',
signing_name='sts',
signature_version='v4',
credentials=mock_get_session.return_value.get_credentials.return_value,
event_emitter=mock_get_session.return_value.events,
)
mock_signer.return_value.generate_presigned_url.assert_called_once_with(
request_dict={
'method': 'GET',
'url': 'https://sts.us-east-1.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15',
'body': {},
'headers': {'x-k8s-aws-id': 'test-cluster'},
'context': {},
},
region_name='us-east-1',
expires_in=60,
operation_name='',
)
assert token == 'k8s-aws-v1.aHR0cDovL2V4YW1wbGUuY29t'
# Helper methods for repeated assert combinations.
def assert_all_arn_values_are_valid(expected_arn_values, pattern, arn_under_test) -> None:
"""
Applies regex `pattern` to `arn_under_test` and asserts
that each group matches the provided expected value.
A list entry of None in the 'expected_arn_values' will
assert that the value exists but not match a specific value.
"""
findall: List = pattern.findall(arn_under_test)[0]
# findall() returns a list of matches from right to left so it must be reversed
# in order to match the logical order of the 'expected_arn_values' list.
for value in reversed(findall):
expected_value = expected_arn_values.pop()
if expected_value:
assert value in expected_value
else:
assert value
assert region_matches_partition(findall[1], findall[0])
def assert_client_error_exception_thrown(
expected_exception: Type[AWSError], expected_msg: str, raised_exception: ExceptionInfo
) -> None:
"""
Asserts that the raised exception is of the expected type
and the resulting message matches the expected format.
"""
response = raised_exception.value.response[ErrorAttributes.ERROR]
assert response[ErrorAttributes.CODE] == expected_exception.TYPE
assert response[ErrorAttributes.MESSAGE] == expected_msg
def assert_result_matches_expected_list(
result: List, expected_result: List, expected_len: Optional[int] = None
) -> None:
assert result == expected_result
assert len(result) == expected_len or len(expected_result)
def assert_is_valid_uri(value: str) -> None:
result: ParseResult = urlparse(value)
assert all([result.scheme, result.netloc, result.path])
assert REGION in value
| apache/incubator-airflow | tests/providers/amazon/aws/hooks/test_eks.py | Python | apache-2.0 | 56,294 |
"""Let's Encrypt user-supplied configuration."""
import copy
import os
from six.moves.urllib import parse # pylint: disable=import-error
import zope.interface
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
class NamespaceConfig(object):
"""Configuration wrapper around :class:`argparse.Namespace`.
For more documentation, including available attributes, please see
:class:`letsencrypt.interfaces.IConfig`. However, note that
the following attributes are dynamically resolved using
:attr:`~letsencrypt.interfaces.IConfig.work_dir` and relative
paths defined in :py:mod:`letsencrypt.constants`:
- `accounts_dir`
- `csr_dir`
- `in_progress_dir`
- `key_dir`
- `renewer_config_file`
- `temp_checkpoint_dir`
:ivar namespace: Namespace typically produced by
:meth:`argparse.ArgumentParser.parse_args`.
:type namespace: :class:`argparse.Namespace`
"""
zope.interface.implements(interfaces.IConfig)
def __init__(self, namespace):
self.namespace = namespace
self.namespace.config_dir = os.path.abspath(self.namespace.config_dir)
self.namespace.work_dir = os.path.abspath(self.namespace.work_dir)
self.namespace.logs_dir = os.path.abspath(self.namespace.logs_dir)
# Check command line parameters sanity, and error out in case of problem.
check_config_sanity(self)
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def server_path(self):
"""File path based on ``server``."""
parsed = parse.urlparse(self.namespace.server)
return (parsed.netloc + parsed.path).replace('/', os.path.sep)
@property
def accounts_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.ACCOUNTS_DIR, self.server_path)
@property
def backup_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.BACKUP_DIR)
@property
def csr_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.CSR_DIR)
@property
def in_progress_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.IN_PROGRESS_DIR)
@property
def key_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.KEY_DIR)
@property
def temp_checkpoint_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.work_dir, constants.TEMP_CHECKPOINT_DIR)
def __deepcopy__(self, _memo):
# Work around https://bugs.python.org/issue1515 for py26 tests :( :(
# https://travis-ci.org/letsencrypt/letsencrypt/jobs/106900743#L3276
new_ns = copy.deepcopy(self.namespace)
return type(self)(new_ns)
class RenewerConfiguration(object):
"""Configuration wrapper for renewer."""
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def archive_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.ARCHIVE_DIR)
@property
def live_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.LIVE_DIR)
@property
def renewal_configs_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWAL_CONFIGS_DIR)
@property
def renewer_config_file(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWER_CONFIG_FILENAME)
def check_config_sanity(config):
"""Validate command line options and display error message if
requirements are not met.
:param config: IConfig instance holding user configuration
:type args: :class:`letsencrypt.interfaces.IConfig`
"""
# Port check
if config.http01_port == config.tls_sni_01_port:
raise errors.ConfigurationError(
"Trying to run http-01 and tls-sni-01 "
"on the same port ({0})".format(config.tls_sni_01_port))
# Domain checks
if config.namespace.domains is not None:
for domain in config.namespace.domains:
# This may be redundant, but let's be paranoid
le_util.enforce_domain_sanity(domain)
| TheBoegl/letsencrypt | letsencrypt/configuration.py | Python | apache-2.0 | 4,669 |
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing of
executable commands and scripts (in any language, not just Python), especially
commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd module
manages and cleans up one or more temporary workspace directories, and provides
methods for creating files and directories in those workspace directories from
in-line data, here-documents), allowing tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
test = TestCmd()
The TestCmd module provides pass_test(), fail_test(), and no_result() unbound
methods that report test results for use with the Aegis change management
system. These methods terminate the test immediately, reporting PASSED, FAILED
or NO RESULT respectively and exiting with status 0 (success), 1 or 2
respectively. This allows for a distinction between an actual failed test and a
test that could not be properly evaluated because of an external condition (such
as a full file system or incorrect permissions).
"""
# Copyright 2000 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# Copyright 2002-2003 Vladimir Prus.
# Copyright 2002-2003 Dave Abrahams.
# Copyright 2006 Rene Rivera.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from string import join, split
__author__ = "Steven Knight <knight@baldmt.com>"
__revision__ = "TestCmd.py 0.D002 2001/08/31 14:56:12 software"
__version__ = "0.02"
from types import *
import os
import os.path
import popen2
import re
import shutil
import stat
import sys
import tempfile
import traceback
tempfile.template = 'testcmd.'
_Cleanup = []
def _clean():
global _Cleanup
list = _Cleanup[:]
_Cleanup = []
list.reverse()
for test in list:
test.cleanup()
sys.exitfunc = _clean
def caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name == "?":
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self=None, condition=True, function=None, skip=0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED and exits
with a status of 1. If a condition argument is supplied, the test fails only
if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + join(self.program, " ")
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at + """
in directory: """ + os.getcwd() )
sys.exit(1)
def no_result(self=None, condition=True, function=None, skip=0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test and exits
with a status of 2. If a condition argument is supplied, the test fails only
if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
sys.exit(2)
def pass_test(self=None, condition=True, function=None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test and exits
with a status of 0. If a condition argument is supplied, the test passes
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines=None, matches=None):
"""Returns whether the given lists or strings containing lines separated
using newline characters contain exactly the same data.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(matches) is ListType:
matches = split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines=None, res=None):
"""Given lists or strings contain lines separated using newline characters.
This function matches those lines one by one, interpreting the lines in the
res parameter as regular expressions.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(res) is ListType:
res = split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
if not re.compile("^" + res[i] + "$").search(lines[i]):
return
return 1
class TestCmd:
"""Class TestCmd.
"""
def __init__(self, description=None, program=None, workdir=None,
subdir=None, verbose=False, match=None, inpath=None):
self._cwd = os.getcwd()
self.description_set(description)
if inpath:
self.program = program
else:
self.program_set(program)
self.verbose_set(verbose)
if not match is None:
self.match_func = match
else:
self.match_func = match_re
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
def cleanup(self, condition=None):
"""Removes any temporary working directories for the specified TestCmd
environment. If the environment variable PRESERVE was set when the
TestCmd environment was created, temporary working directories are not
removed. If any of the environment variables PRESERVE_PASS,
PRESERVE_FAIL or PRESERVE_NO_RESULT were set when the TestCmd
environment was created, then temporary working directories are not
removed if the test passed, failed or had no result, respectively.
Temporary working directories are also preserved for conditions
specified via the preserve method.
Typically, this method is not called directly, but is used when the
script exits to clean up temporary working directories as appropriate
for the exit status.
"""
if not self._dirlist:
return
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
self.workdir = None
os.chdir(self._cwd)
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
def fail_test(self, condition=True, function=None, skip=0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_func(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def no_result(self, condition=True, function=None, skip=0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition=True, function=None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the specified
TestCmd environment to be preserved for one or more conditions. If no
conditions are specified, arranges for the temporary working directories
to be preserved for all conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and program[0] and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
self.program = program
def read(self, file, mode='rb'):
"""Reads and returns the contents of the specified file name. The file
name may be a list, in which case the elements are concatenated with the
os.path.join() method. The file is assumed to be under the temporary
working directory unless it is an absolute path name. The I/O mode for
the file may be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
return open(file, mode).read()
def run(self, program=None, arguments=None, chdir=None, stdin=None):
"""Runs a test of the program or script for the test environment.
Standard output and error output are saved for future retrieval via the
stdout() and stderr() methods.
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
cmd = []
if program and program[0]:
if program[0] != self.program[0] and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
cmd += program
else:
cmd += self.program
if arguments:
cmd += arguments.split(" ")
if self.verbose:
sys.stderr.write(join(cmd, " ") + "\n")
try:
p = popen2.Popen3(cmd, 1)
except AttributeError:
# We end up here in case the popen2.Popen3 class is not available
# (e.g. on Windows). We will be using the os.popen3() Python API
# which takes a string parameter and so needs its executable quoted
# in case its name contains spaces.
cmd[0] = '"' + cmd[0] + '"'
command_string = join(cmd, " ")
if ( os.name == 'nt' ):
# This is a workaround for a longstanding Python bug on Windows
# when using os.popen(), os.system() and similar functions to
# execute a command containing quote characters. The bug seems
# to be related to the quote stripping functionality used by the
# Windows cmd.exe interpreter when its /S is not specified.
#
# Cleaned up quote from the cmd.exe help screen as displayed on
# Windows XP SP2:
#
# 1. If all of the following conditions are met, then quote
# characters on the command line are preserved:
#
# - no /S switch
# - exactly two quote characters
# - no special characters between the two quote
# characters, where special is one of: &<>()@^|
# - there are one or more whitespace characters between
# the two quote characters
# - the string between the two quote characters is the
# name of an executable file.
#
# 2. Otherwise, old behavior is to see if the first character
# is a quote character and if so, strip the leading
# character and remove the last quote character on the
# command line, preserving any text after the last quote
# character.
#
# This causes some commands containing quotes not to be executed
# correctly. For example:
#
# "\Long folder name\aaa.exe" --name="Jurko" --no-surname
#
# would get its outermost quotes stripped and would be executed
# as:
#
# \Long folder name\aaa.exe" --name="Jurko --no-surname
#
# which would report an error about '\Long' not being a valid
# command.
#
# cmd.exe help seems to indicate it would be enough to add an
# extra space character in front of the command to avoid this
# but this does not work, most likely due to the shell first
# stripping all leading whitespace characters from the command.
#
# Solution implemented here is to quote the whole command in
# case it contains any quote characters. Note thought this will
# not work correctly should Python ever fix this bug.
# (01.05.2008.) (Jurko)
if command_string.find('"') != -1:
command_string = '"' + command_string + '"'
(tochild, fromchild, childerr) = os.popen3(command_string)
if stdin:
if type(stdin) is ListType:
for line in stdin:
tochild.write(line)
else:
tochild.write(stdin)
tochild.close()
self._stdout.append(fromchild.read())
self._stderr.append(childerr.read())
fromchild.close()
self.status = childerr.close()
if not self.status:
self.status = 0
except:
raise
else:
if stdin:
if type(stdin) is ListType:
for line in stdin:
p.tochild.write(line)
else:
p.tochild.write(stdin)
p.tochild.close()
self._stdout.append(p.fromchild.read())
self._stderr.append(p.childerr.read())
self.status = p.wait()
if self.verbose:
sys.stdout.write(self._stdout[-1])
sys.stderr.write(self._stderr[-1])
if chdir:
os.chdir(oldcwd)
def stderr(self, run=None):
"""Returns the error output from the specified run number. If there is
no specified run number, then returns the error output of the last run.
If the run number is less than zero, then returns the error output from
that many runs back from the current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
if (run < 0):
return ''
return self._stderr[run]
def stdout(self, run=None):
"""Returns the standard output from the specified run number. If there
is no specified run number, then returns the standard output of the last
run. If the run number is less than zero, then returns the standard
output from that many runs back from the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
if (run < 0):
return ''
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working directory, one
for each argument. An argument may be a list, in which case the list
elements are concatenated using the os.path.join() method.
Subdirectories multiple levels deep must be created using a separate
argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if type(sub) is ListType:
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except:
pass
else:
count = count + 1
return count
def unlink (self, file):
"""Unlinks the specified file name. The file name may be a list, in
which case the elements are concatenated using the os.path.join()
method. The file is assumed to be under the temporary working directory
unless it is an absolute path name.
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def workdir_set(self, path):
"""Creates a temporary working directory with the specified path name.
If the path is a null string (''), a unique directory name is created.
"""
if os.path.isabs(path):
self.workdir = path
else:
if (path != None):
if path == '':
path = tempfile.mktemp()
if path != None:
os.mkdir(path)
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
# We'd like to set self.workdir like this:
# self.workdir = path
# But symlinks in the path will report things differently from
# os.getcwd(), so chdir there and back to fetch the canonical
# path.
cwd = os.getcwd()
os.chdir(path)
self.workdir = os.getcwd()
os.chdir(cwd)
else:
self.workdir = None
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file within the
current temporary working directory. Concatenates the temporary working
directory name with the specified arguments using the os.path.join()
method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def writable(self, top, write):
"""Make the specified directory tree writable (write == 1) or not
(write == None).
"""
def _walk_chmod(arg, dirname, names):
st = os.stat(dirname)
os.chmod(dirname, arg(st[stat.ST_MODE]))
for name in names:
n = os.path.join(dirname, name)
st = os.stat(n)
os.chmod(n, arg(st[stat.ST_MODE]))
def _mode_writable(mode):
return stat.S_IMODE(mode|0200)
def _mode_non_writable(mode):
return stat.S_IMODE(mode&~0200)
if write:
f = _mode_writable
else:
f = _mode_non_writable
try:
os.path.walk(top, _walk_chmod, f)
except:
pass # Ignore any problems changing modes.
def write(self, file, content, mode='wb'):
"""Writes the specified content text (second argument) to the specified
file name (first argument). The file name may be a list, in which case
the elements are concatenated using the os.path.join() method. The file
is created under the temporary working directory. Any subdirectories in
the path must already exist. The I/O mode for the file may be specified;
it must begin with a 'w'. The default is 'wb' (binary write).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
open(file, mode).write(content)
| mxrrow/zaicoin | src/deps/boost/tools/build/v2/test/TestCmd.py | Python | mit | 23,923 |
from __future__ import absolute_import
import time
import random
from urlparse import urlparse
from redis import Redis
from . import CatalogCoordinator, LockException
################################################################################
# a slightly modified version of retools lock which depends only on redis' expire.
# (retools depends on client OS time being sync'd)
class Lock(object):
def __init__(self, key, expires=30, timeout=10, redis=None):
"""
Distributed locking using Redis SETNX and GETSET.
Usage::
with Lock('my_lock'):
print "Critical section"
:param expires: We consider any existing lock older than
``expires`` seconds to be invalid in order to
detect crashed clients. This value must be higher
than it takes the critical section to execute.
:param timeout: If another client has already obtained the lock,
sleep for a maximum of ``timeout`` seconds before
giving up. A value of 0 means we never wait.
:param redis: The redis instance to use if the default global
redis connection is not desired.
"""
self.key = key
self.timeout = timeout
self.expires = expires
if not redis:
redis = Redis()
self.redis = redis
self.token = str(time.time() * random.random())
def __enter__(self):
redis = self.redis
timeout = self.timeout
while timeout >= 0:
if redis.setnx(self.key, self.token):
# We gained the lock; enter critical section
redis.expire(self.key, int(self.expires))
return
timeout -= 1
if timeout >= 0:
time.sleep(1)
raise LockException("Timeout while waiting for lock.")
def __exit__(self, exc_type, exc_value, traceback):
# Only delete the key if it's our token
current_value = self.redis.get(self.key)
if current_value == self.token:
self.redis.delete(self.key)
else:
raise LockException("Lock expired before exit.")
class RedisCatalogCoordinator(CatalogCoordinator):
def __init__(self, redis=None, redis_password=None, **kwargs):
super(RedisCatalogCoordinator, self).__init__(**kwargs)
assert self.url or redis
if redis is not None:
self._redis = redis
else:
u = urlparse(self.url)
self._redis = Redis(host=u.hostname, port=u.port, password=redis_password)
def get_index_lock(self, domain=None, **kwargs):
name = 'index'
if domain is not None:
name = '%s.%s' % (domain, name)
return Lock(name, redis=self._redis)
@classmethod
def valid_url(cls, url):
return urlparse(url).scheme in ('redis')
| mindsnacks/Zinc | src/zinc/coordinators/redis.py | Python | mit | 3,000 |
from django import forms
from .models import Residence
class ResidenceForm(forms.ModelForm):
class Meta:
model = Residence
fields = ('name', 'users')
| pgergov/belmis | belmis/residences/forms.py | Python | mit | 172 |
from django.core.urlresolvers import reverse_lazy, reverse
from django.core.exceptions import ValidationError
from django import forms
from django.forms import ModelForm, inlineformset_factory, HiddenInput, Textarea
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Reset, Button, HTML, Layout, Field, Div, Column
from crispy_forms.bootstrap import FormActions, AppendedText
from .models import Country, Office, UserProfile, Feedback, Comment, Attachment
"""
A generic method used for setting up similar bootstrap properties on crispy forms
"""
def setup_boostrap_helpers(formtag=False):
helper = FormHelper()
helper.form_class = 'form-horizontal'
#helper.label_class = 'col-sm-2'
helper.field_class = 'col-sm-12'
helper.html5_required = True
helper.form_show_labels = False
helper.error_text_inline = True
helper.help_text_inline = True
helper.render_required_fields = True
helper.form_show_errors = True
helper.form_tag = formtag
return helper
AttachmentFormSet = inlineformset_factory(
Feedback, Attachment,
extra=1,
can_delete=False,
fields=("attachment", "feedback"))
class AttachmentFormSetHelper(FormHelper):
"""
This is just a helper for the AttachmentFormSet defined above to make it crispier
"""
def __init__(self, *args, **kwargs):
super(AttachmentFormSetHelper, self).__init__(*args, **kwargs)
self.html5_required = True
self.form_class = 'form-horizontal'
self.field_class = 'col-sm-12'
self.form_tag = False
self.render_required_fields = True
self.disable_csrf = True
self.form_show_labels = False
class FeedbackForm(forms.ModelForm):
tagz = forms.CharField(label=_('Tags'), max_length=40, required=False,)
class Meta:
model = Feedback
fields = ['issue_type', 'summary', 'description', 'reference', 'tagz']
widgets = {'description': Textarea(attrs={'cols': 30, 'rows': 3}),}
def __init__(self, *args, **kwargs):
super(FeedbackForm, self).__init__(*args, **kwargs)
self.helper = setup_boostrap_helpers(formtag=False)
self.fields['issue_type'].empty_label = ""
self.fields['reference'].widget.attrs['placeholder'] = _('Reference')
self.fields['summary'].widget.attrs['placeholder'] = _('Summary')
self.fields['description'].widget.attrs['placeholder'] = _('Description')
self.helper.form_id = 'id_feedback_form'
self.helper.form_action = reverse_lazy('feedback_add')
#self.helper.add_input(Submit('submit', 'Submit', css_class='btn-sm btn-primary'))
self.helper.layout = Layout(
Div(
Column(
Field('issue_type',),
css_class="col-sm-6",
), Column(
Field('reference',),
css_class="col-sm-6",
),
css_class="row",
),
Div(
Column(
Field('summary'),
css_class="col-sm-12",
),
css_class="row",
),
Div(
Column(
Field('description'),
css_class="col-sm-12",
),
css_class="row",
),
Div(
Column(
Field('tagz'),
css_class="col-sm-12",
),
css_class="row",
),
)
class CommentForm(forms.ModelForm):
#Hidden value to get a child's parent
parent = forms.CharField(widget=forms.HiddenInput(
attrs={'class': 'parent'}), required=False)
def __init__(self, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.fields['feedback'].widget = forms.HiddenInput()
self.helper = setup_boostrap_helpers(formtag=True)
self.helper.form_show_labels = False
self.helper.field_class = 'col-sm-12'
self.helper.form_id = 'id_comment_form'
self.helper.form_action = reverse_lazy('comment_add')
self.helper.add_input(Submit('submit', 'Submit', css_class='btn-sm btn-primary'))
class Meta:
model = Comment
fields = ("feedback", "content", )
widgets = {'content': forms.Textarea(attrs={'rows': 3, 'placeholder': "Type your comment here"})}
| mercycorps/feedback | forms.py | Python | gpl-3.0 | 4,603 |
from flask import Flask, request, url_for, render_template, redirect
from random import randrange
app = Flask(__name__)
d = {}
@app.route("/", methods = ["POST", "GET"])
def index():
return render_template("home.html")
@app.route("/appetizer", methods = ["POST", "GET"])
def appetizer():
global d
if request.method=="GET":
return render_template("appetizer.html")
else:
button = request.form['button']
if button=="Go Back":
return redirect(url_for('index'))
else:
if ('gender' in request.form and 'veg' in request.form and 'thirst' in request.form and 'digestion' in request.form and 'boldness' in request.form):
d = {'gender':int(request.form['gender']),
'vegetarian':int(request.form['veg']),
'thirst':int(request.form['thirst']),
'digestion':int(request.form['digestion']),
'boldness': int(request.form['boldness'])}
return process()
else:
return "Please answer all questions!<br>" + render_template("appetizer.html")
def process():
global d
calorieCount = 0
maleOptionsV = [ ["Why are you a male vegetarian???", 0] ]
femaleOptionsV = [ ["Falafel", 300], ["Mixed veggies", 150] ]
maleOptionsNV = femaleOptionsNV = [ ["Chicken", 300], ["Lamb", 350], ["Chicken-falafel combo", 350], ["Chicken-lamb combo", 450], ["Chicken-lamb-falafel combo",500], ["Fish",250], ["Philly cheese steak", 450], ["A cheeseburger", 600] ]
isVeg = [maleOptionsV, femaleOptionsV]
isNotVeg = [maleOptionsNV, femaleOptionsNV]
possibilities = [ isVeg, isNotVeg ]
yes = [ [", and a Snapple.", 190], [", and a Sprite.",140], [", and a Coke.",140], [", and a Nestea.", 120], [", and a Bottle of Water.", 0] ]
no = [ [". And if you're not thirsty yet, you will be.", 0] ]
thirst = [yes, no]
boldness = [ [" in a salad ", 30], [" in a pita ", 200], [" over rice ", 350] ]
notConcerned = [ [" with white sauce, hot sauce, and BBQ sauce", 125] ]
someConcern = [ [" with white sauce", 50], [" with white sauce and BBQ sauce", 100] ]
veryConcerned = [ [" (no sauce)", 0] ]
concern = [notConcerned, someConcern, veryConcerned]
dishOptions = possibilities[d['vegetarian']][d['gender']]
dish = dishOptions[randrange(0, len(dishOptions))]
typeOfDish = dish[0]
calorieCount += int(dish[1])
typeOfFormat = boldness[d['boldness']][0]
if (typeOfDish != 'A cheeseburger' and typeOfDish != "Philly cheese steak"):
calorieCount += int(boldness[d['boldness']][1])
sauceOptions = concern[d['digestion']]
sauce = sauceOptions[randrange(0, len(sauceOptions))]
typeOfSauce = sauce[0]
if (typeOfDish != 'A cheeseburger' and typeOfDish != "Philly cheese steak"):
calorieCount += int(sauce[1])
drinkOptions = thirst[d['thirst']]
drink = drinkOptions[randrange(0, len(drinkOptions))]
typeOfDrink = drink[0]
calorieCount += int(drink[1])
if (typeOfDish == "Why are you a male vegetarian???"):
return render_template("MVresults.html",insult=typeOfDish);
elif (typeOfDish != 'A cheeseburger' and typeOfDish != "Philly cheese steak"):
return render_template("results.html", typeOfDish=str(typeOfDish), typeOfFormat=typeOfFormat, typeOfSauce=typeOfSauce, typeOfDrink=typeOfDrink,calorieCount=calorieCount)
else:
return render_template("results.html", typeOfDish=typeOfDish, typeOfFormat="", typeOfSauce="", typeOfDrink=typeOfDrink, calorieCount=calorieCount)
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0",port=5000)
| stuycs-softdev-fall-2013/proj3-7-cartwheels | halal/app.py | Python | bsd-3-clause | 3,626 |
#/usr/bin/env python
# AGDeviceControl
# Copyright (C) 2005 The Australian National University
#
# This file is part of AGDeviceControl.
#
# AGDeviceControl is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# AGDeviceControl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AGDeviceControl; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
def defaultaction(device, command, parameters=None):
print "default action %s.%s(%s)" % (device, command, parameters)
| pwarren/AGDeviceControl | agdevicecontrol/gui/defaultaction.py | Python | gpl-2.0 | 974 |
#!/usr/bin/env python
import unicornhat as unicorn
import getch, random, time, colorsys
import numpy as np
unicorn.rotation(90)
unicorn.brightness(0.4)
screen = [[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0]]
score=0
carX=3
carY=6
def drawObstacles():
for y in range(8):
for x in range(8):
unicorn.set_pixel(x,y,0,screen[y][x],0)
unicorn.show()
def addObstacle():
r=random.randrange(0,(10-abs(score/10)))
if r==1:
screen[0][random.randrange(0,8)]=64
def moveObstacles():
for y in range(7,-1,-1):
for x in range(8):
screen[y][x]=screen[y-1][x]
def drawCar(y, x):
unicorn.set_pixel((x),(y),0,0,64 )
unicorn.set_pixel((x)+1,(y),0,0,64 )
unicorn.set_pixel((x),(y)+1,0,0,64 )
unicorn.set_pixel((x)+1,(y)+1,0,0,64)
unicorn.show()
def undrawCar(y,x):
unicorn.set_pixel((x),(y),0,0,0)
unicorn.set_pixel((x)+1,(y),0,0,0)
unicorn.set_pixel((x),(y)+1,0,0,0)
unicorn.set_pixel((x)+1,(y)+1,0,0,0)
unicorn.show()
def checkHit():
if (screen[carY][carX]==64) or (screen[carY+1][carX]==64) or (screen[carY][carX+1]==64) or (screen[carY+1][carX+1]==64):
return True
else:
return False
def crashed():
for z in range(10):
rand_mat = np.random.rand(8,8)
for y in range(8):
for x in range(8):
h = 0.1 * rand_mat[x, y]
s = 0.8
v = rand_mat[x, y]
rgb = colorsys.hsv_to_rgb(h, s, v)
r = int(rgb[0]*255.0)
g = int(rgb[1]*255.0)
b = int(rgb[2]*255.0)
unicorn.set_pixel(x, y, r, g, b)
unicorn.show()
time.sleep(0.01)
while True:
moveObstacles()
addObstacle()
drawObstacles()
drawCar(carY,carX)
if (checkHit()==True):
crashed()
print "Crashed\nGame Over\nScore: ",score
break
else:
score=score+1
user_input=""
while user_input=="":
user_input = getch.getch().lower()
if (user_input!="q") and (user_input!="w") and (user_input!=" ") and (user_input!="x"):
user_input=""
if user_input!="x":
undrawCar(carY,carX)
if user_input=="q":
carX=carX-1
if carX < 0:
carX=0
elif user_input=="w":
carX=carX+1
if carX > 6:
carX=6
elif user_input==" ":
pass
else:
print "Game Over\nScore: ", score
break
| ukscone/unicornhat | avoid.py | Python | unlicense | 2,781 |
import random
import pickle
import unittest
"""
Function used to sort students using python's inbuilt sorting class
"""
def sort_by(_class, order):
if order == 1:
return sorted(_class)
elif order == 2:
return sorted(list(_class.items()), key=lambda student: max(student[1]), reverse=True)
else:
return sorted(list(_class.items()), key=lambda student: max(student[1]) / float(len(student[1])),
reverse=True)
def get_class(class_number):
if class_number == 1:
try:
_class = pickle.load(open("./class1.pkl", "rb"))
except IOError:
return None
elif class_number == 2:
try:
_class = pickle.load(open("./class2.pkl", "rb"))
except IOError:
return None
elif class_number == 3:
try:
_class = pickle.load(open("./class3.pkl", "rb"))
except IOError:
return None
else:
return None
return _class
"""
Function used to implement task 3
"""
def task3():
def display_grades(class_number, order):
# store data in the appropriate file
if class_number == 1 or class_number == 2 or class_number == 3:
_class = get_class(class_number)
else:
print("No such class")
return
if _class is None:
print("No students in class " + str(class_number))
return
if len(_class) == 0:
print("No students in class " + str(class_number))
return
sorted_names = sort_by(_class,order)
if order == 1:
for name in sorted_names:
print(name + ":" + str(max(_class[name])))
elif order == 2:
for name in sorted_names:
student_name = name[0]
scores = name[1]
print(student_name + ":" + str(max(scores)))
else:
for name in sorted_names:
student_name = name[0]
scores = name[1]
print(student_name + ":" + str(round(sum(scores) / float(len(scores)), 2)))
option = input(
"\nDisplay Class 1 Grades: 1\nDisplay Class 2 Grades: 2\nDisplay Class 3 Grades: 3\n\nEnter desired option:")
order_option = input(
"\nDisplay in alphabetical order: 1\nDisplay in descending order by highest grade: 2\nDisplay in descending order by average grade: 3\n\nEnter desired option:")
try:
display_grades(int(option), int(order_option))
except ValueError:
print("Incorrect type of input")
print("\n")
if __name__ == "__main__":
task3() | JA-VON/python-helpers-msbm | task3.py | Python | mit | 2,648 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Representation of the hudson.model.Result class
SUCCESS = {
'name': 'SUCCESS',
'ordinal': '0',
'color': 'BLUE',
'complete': True
}
UNSTABLE = {
'name': 'UNSTABLE',
'ordinal': '1',
'color': 'YELLOW',
'complete': True
}
FAILURE = {
'name': 'FAILURE',
'ordinal': '2',
'color': 'RED',
'complete': True
}
NOTBUILD = {
'name': 'NOT_BUILD',
'ordinal': '3',
'color': 'NOTBUILD',
'complete': False
}
ABORTED = {
'name': 'ABORTED',
'ordinal': '4',
'color': 'ABORTED',
'complete': False
}
THRESHOLDS = {
'SUCCESS': SUCCESS,
'UNSTABLE': UNSTABLE,
'FAILURE': FAILURE,
'NOT_BUILD': NOTBUILD,
'ABORTED': ABORTED
}
| joostvdg/jenkins-job-builder | jenkins_jobs/modules/hudson_model.py | Python | apache-2.0 | 1,248 |
import abc
import os.path
import string
import subprocess
from characteristic import Attribute, attributes
from haas.utils import abstractclassmethod
from six import add_metaclass
@add_metaclass(abc.ABCMeta)
class IAssertion(object):
@abstractclassmethod
def from_json_dict(cls, variables, data):
"""Create the assertion from a variables set and the loaded json
dict.
"""
# We call cls to ensure a subclass not implementing this class method
# cannot be created
return cls()
@abc.abstractmethod
def uphold(self, variables, case, stdout, stder, returncode):
""" The method to call to check the assertion."""
@attributes([
Attribute("variables", instance_of=dict),
Attribute("expected", instance_of=int)
])
class StatusAssertion(IAssertion):
@classmethod
def from_json_dict(cls, variables, data):
return cls(variables=variables, expected=data["expected"])
def uphold(self, variables, case, stdout, stderr, returncode):
case.assertEqual(self.expected, returncode)
@attributes([
Attribute("variables", instance_of=dict),
Attribute("expected", instance_of=str)
])
class OutputAssertion(IAssertion):
@classmethod
def from_json_dict(cls, variables, data):
return cls(variables=variables, expected=data["output"])
def uphold(self, variables, case, stdout, stderr, returncode):
output = "\n".join((stdout, stderr))
case.assertTrue(output.startswith(self._render()))
def _render(self):
return string.Template(self.expected).substitute(self.variables)
@attributes([
Attribute("variables", instance_of=dict),
Attribute("expected", instance_of=str)
])
class OutputStartswithAssertion(IAssertion):
@classmethod
def from_json_dict(cls, variables, data):
return cls(variables=variables, expected=data["expected"])
def render(self):
return string.Template(self.expected).substitute(self.variables)
def uphold(self, variables, case, stdout, stderr, returncode):
output = "\n".join((stdout, stderr))
case.assertTrue(output.startswith(self.render()))
@attributes([
Attribute("variables", instance_of=dict),
Attribute("expected", instance_of=str)
])
class RegexOutputAssertion(IAssertion):
@classmethod
def from_json_dict(cls, variables, data):
return cls(variables=variables, expected=data["expected"])
def uphold(self, variables, case, stdout, stderr, returncode):
output = "\n".join((stdout, stderr))
return case.assertRegexpMatches(output.rstrip(), self._render().rstrip())
def _render(self):
return string.Template(self.expected).substitute(self.variables)
@attributes([
Attribute("variables", instance_of=dict),
Attribute("path", instance_of=str),
Attribute("exists", instance_of=bool)
])
class FileExists(IAssertion):
@classmethod
def from_json_dict(cls, variables, data):
return cls(
variables=variables, path=data["path"], exists=data["exists"]
)
def uphold(self, variables, case, stdout, stderr, returncode):
path = self._render_path()
if self.exists:
msg = "File {0!r} does not exist".format(path)
case.assertTrue(os.path.exists(path), msg)
else:
msg = "File {0!r} exists".format(path)
case.assertFalse(os.path.exists(path), msg)
def _render_path(self):
return string.Template(self.path).substitute(self.variables)
@attributes([
Attribute("variables", instance_of=dict),
Attribute("status", instance_of=int),
Attribute("command", instance_of=str),
Attribute("assertions", instance_of=list)
])
class CommandAssertion(IAssertion):
@classmethod
def from_json_dict(cls, variables, data):
generic_assertions_factory = {
"regex": RegexOutputAssertion,
"file": FileExists,
}
assertions = []
for assertion_data in data.get("assertions", []):
kind = assertion_data["type"]
factory = generic_assertions_factory.get(kind)
if factory is None:
msg = "Assertion type {0!r} not supported"
raise NotImplementedError(msg.format(assertion_data["type"]))
else:
assertion = factory.from_json_dict(variables, assertion_data)
assertions.append(assertion)
return cls(
variables=variables, status=data["status"],
command=data["command"], assertions=assertions,
)
def uphold(self, variables, case, stdout, stderr, returncode):
command = self._render_command()
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True,
)
stdout, stderr = p.communicate()
for assertion in self.assertions:
assertion.uphold(variables, case, stdout, stderr, p.returncode)
def _render_command(self):
return string.Template(self.command).substitute(self.variables)
| cournape/nousagi | nousagi/assertions.py | Python | bsd-3-clause | 5,107 |
import httplib
import base64
import string
class RESTResource(object):
def __init__(self):
self.status = None
self.reason = None
self.raw_data = None
class RESTClient(object):
"""
Simple interface to the REST web services. Supports 'GET', 'PUT', 'POST' and 'DELETE' methods.
Tailored towards JSON based services, although should be pretty straightforward to implement
different data payload methods:
- subclass from RESTClient
- implement _build_<data type>_payload method (see json example)
- pass data to get, put, etc method as 'data_<data type>' keyword argument
Examples:
c = RESTClient('api.example.com')
c.get('/api/v1/resource/')
c.put('/api/v1/resource/instance1/', data_json={'params': ['res1a', 'res1b']})
c.post('/api/v1/resource/', data_json={'name': 'instance2', 'params': ['res2a', 'res2b']})
c.delete('/api/v1/resource/instance1/')
c = RESTClient('https://secret-api.example.com', username='user1', password='secret1')
"""
def __init__(self, url, username=None, password=None):
self._method = None
self._url = url
if self._url.endswith('/'):
self._url = self._url[:-1]
self.headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if username and password:
auth_string = 'Basic ' + string.strip(base64.encodestring(username + ':' + password))
self.headers['Authorization'] = auth_string
def _build_json_payload(self, data):
try:
import json
except ImportError:
raise RuntimeError('json not installed')
return json.dumps(data)
def _rest_call(self, resource=None, **kwargs):
http_body = None
if kwargs:
for key in kwargs:
if key.startswith('data_'):
http_body = getattr(self, "_build_%s_payload" % key[5:])(kwargs[key])
if self._url.startswith('https://'):
c = httplib.HTTPSConnection(self._url[8:])
elif self._url.startswith('http://'):
c = httplib.HTTPConnection(self._url[7:])
else:
c = httplib.HTTPConnection(self._url)
c.request(self._method.upper(), resource, body=http_body, headers=self.headers)
resp = c.getresponse()
rest_obj = RESTResource()
rest_obj.status = resp.status
rest_obj.reason = resp.reason
rest_obj.raw_data = resp.read()
c.close()
return rest_obj
def __getattr__(self, item):
if item not in ('get', 'put', 'post', 'delete'):
raise AttributeError("Method '%s' not implemented" % item)
self._method = item
return self._rest_call
| rytis/miniREST | miniREST/client.py | Python | apache-2.0 | 2,807 |
#!/usr/bin/python2
# vim:set ts=4 sw=4 et nowrap syntax=python ff=unix:
#
# Copyright 2011-2018 Mark Crewson <mark@crewson.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, re, signal, sys, tempfile, time
from mccorelib.application import Application
from mccorelib.async import free_reactor
from mccorelib.config import Config, ConfigError
from mccorelib.log import getlog
from mccorelib.multiproc import ParentController, ParentStates
from mccorelib.string_conversion import convert_to_bool, ConversionError
from squib import metrics, oxidizer, reporter, selfstats, statistics, utility
##############################################################################
class SquibMain (Application):
app_name = 'squib'
app_version = '0.1.0'
long_cmdline_args = [ 'nodaemon', ]
def __init__ (self, **kw):
super(SquibMain, self).__init__(**kw)
self.nodaemon = False
self.daemonized = False
self.pid_file = None
def cmdline_handler (self, argument, value):
if argument in ('--nodaemon'):
self.nodaemon = True
#### SETUP AND CLEANUP ################################################
def setup (self):
super(SquibMain, self).setup()
self.log = getlog()
self.rename_process()
self.configure_metrics_recorder()
self.configure_reporter()
self.configure_oxidizers()
self.configure_extra_oxidizers()
self.configure_selfstats()
self.daemonize()
self.write_pid()
def cleanup (self):
self.remove_pid()
super(SquibMain, self).cleanup()
def rename_process (self):
utility.set_process_name(self.app_name)
def configure_metrics_recorder (self):
hostname = utility.calculate_hostname()
if '.' in hostname:
hostname = hostname.split('.', 1)[0]
save_file = self.config.get('common::metrics_save_file', None)
self.metrics_recorder = metrics.MetricsRecorder(prefix='%s.' % hostname,
save_file=save_file)
def configure_reporter (self):
try:
reporter_config = self.config.section('reporter')
except KeyError:
self.log.warning('No reporter defined. Falling back to SimpleLogReporter.')
self.reporter = reporter.SimpleLogReporter(None, self.metrics_recorder)
else:
reporter_klass = reporter_config.get('class')
if reporter_klass is None:
self.log.warning('No report class defined. Falling back to SimpleLogReporter.')
self.reporter = reporter.SimpleLogReporter(None, self.metrics_recorder)
else:
klass = utility.find_python_object(reporter_klass)
self.reporter = klass(reporter_config, self.metrics_recorder)
def configure_oxidizers (self):
self.controller = SquibController(self.reporter)
for ox in self.config.read_nonconfig_section('oxidizers'):
ox = ox.strip()
if not ox or ox.startswith('#'): continue
try:
oxconfig = self.config.section(ox)
except KeyError:
self.log.warn("No configuration for an oxidizer named \"%s\". Ignored" % (ox))
continue
try:
self.controller.add_child(oxidizer.create_oxidizer(ox, oxconfig, self.metrics_recorder))
except ConfigError, err:
self.log.warn(str(err))
self.log.warn("Invalid oxidizer named \"%s\". Ignored" % (ox))
def configure_extra_oxidizers (self):
oxconfig_dir = self.config.get('common::oxidizers_config_directory', None)
if oxconfig_dir is None:
self.log.debug('No common::oxidizers_config_directory defined. Skipping extra oxidizers.')
return
if not os.path.isdir(oxconfig_dir):
self.log.warn('No directory, skipping extra oxidizers: %s' % oxconfig_dir)
return
for oxfile in os.listdir(oxconfig_dir):
oxname = os.path.splitext(oxfile)[0]
oxfile = os.path.join(oxconfig_dir, oxfile)
if not os.path.isfile(oxfile):
self.log.warn('Not an oxidizer config file, skipping: %s' % oxfile)
continue
self.log.debug('Reading extra oxidizer config file: %s' % oxfile)
try:
try:
oxconfig = Config(oxfile).section('oxidizer')
except KeyError:
self.log.warn("Invalid configuration file %s: no [oxidizer] section" % oxfile)
continue
self.controller.add_child(oxidizer.create_oxidizer(oxname, oxconfig, self.metrics_recorder))
except ConfigError, err:
self.log.warn(str(err))
self.log.warn("Invalid oxidizer named \"%s\" (from file: %s). Ingored" % (oxname, oxfile))
def configure_selfstats (self):
try:
if convert_to_bool(self.config.get('common::selfstats', True)) == True:
self.selfstats = selfstats.SelfStatistics(self.config, self.metrics_recorder)
self.metrics_recorder.set_selfstats(self.selfstats)
except ConversionError:
raise ConfigError("noselfstats must be a boolean")
def daemonize (self):
nodaemon = self.nodaemon
if nodaemon == False:
try:
if convert_to_bool(self.config.get('common::nodaemon', False)) == False:
if self.daemonized == False:
utility.daemonize()
self.daemonized = True
except ConversionError:
raise ConfigError("nodaemon must be a boolean")
def write_pid (self):
pid_file = self.config.get('common::pid_file')
if pid_file is None:
return
self.pid_file = os.path.abspath(pid_file)
try:
self.log.debug("Writing pid file: %s" % self.pid_file)
f = open(self.pid_file, 'w')
f.write('%d\n' % os.getpid())
f.close()
except (IOError, OSError), why:
self.log.error("Cannot write pid file: %s" % str(why))
raise ConfigError("Cannot write pid file: %s" % self.pid_file)
def remove_pid (self):
if self.pid_file is not None:
try:
self.log.debug("Removing pid file: %s" % self.pid_file)
os.unlink(self.pid_file)
except OSError, why:
self.log.debug("Cannot remove pid file: %s" % str(why))
#### PROCESS CONTROL #####################################################
def start (self):
while 1:
super(SquibMain, self).start()
if self.controller.should_shutdown():
break
free_reactor()
def run (self):
self.log.info("%s %s STARTED" % (self.app_name, self.app_version))
self.controller.start()
self.metrics_recorder.save()
self.log.info("%s %s STOPPED" % (self.app_name, self.app_version))
##############################################################################
import socket
class SquibController (ParentController):
"""
The squib main loop controller. This object manages the oxidizer children and
triggers the reporter.
"""
def __init__ (self, reporter, **kw):
super(SquibController, self).__init__(**kw)
self.reporter = reporter
self.report_period = self.reporter.get_report_period()
def setup (self):
self.reactor.call_later(self.report_period, self.report)
statistics.schedule_ewma_decay()
def report (self):
try:
self.reporter.send_report()
finally:
self.reactor.call_later(self.report_period, self.report)
##############################################################################
if __name__ == "__main__":
g = SquibMain()
g.start()
##############################################################################
## THE END
| mcrewson/squib | squib/main.py | Python | apache-2.0 | 8,690 |
import bs4
import hashlib
import auxo.agent
import auxo.report
url_uk = 'https://www.worldcubeassociation.org/competitions?region=United+Kingdom'
class CubingAgent(auxo.agent.WebAgent):
'''
An agent which checks for new Rubik's Cube competitions on the World Cubing
Association website.
'''
def __init__(self):
super().__init__('Cubing', url_uk)
def result(self):
report = super().result()
if 'comps' not in self.state:
self.state['comps'] = {}
if self.content is None:
report.addText('Failed to load the page.\n')
else:
soup = bs4.BeautifulSoup(self.content, 'html.parser')
# expecting something like:
# <li class="list-group-item not-past">
# <span class="date">Apr 23 - 24, 2016</span>
# <span class="competition-info">
# <div class="competition-link">Bosnia and Herzegovina Open 2016</p>
# <div class="location">Bosnia and Herzegovina, Banja Luka</p>
items = soup.select('li.list-group-item.not-past')
new_comps = 0
current_comps = {}
for li in items:
date = li.select('span.date')[0].text.strip()
title = li.select('div.competition-link')[0].text.strip()
location = li.select('div.location')[0].text.strip()
if (date == '') or (title == '') or (location == ''):
continue
comp = { 'date': date, 'title': title, 'location': location }
digest = hashlib.sha1()
digest.update(date.encode('utf-8'))
digest.update(title.encode('utf-8'))
digest.update(location.encode('utf-8'))
comp_id = digest.hexdigest()
current_comps[comp_id] = comp
if comp_id not in self.state['comps']:
report.addText('New competition: ' + title + '\n')
report.addText(' Location: ' + location + '\n')
report.addText(' Date: ' + date + '\n')
new_comps += 1
else:
report.addText(' Competition: ' + title + ' : ' + date + '\n')
if new_comps > 0:
report.addText('\nRegister here: ' + self.url + '\n')
# if there are no current competitions then the website layout has
# probably changed, so report that.
if len(current_comps) > 0:
self.state['comps'] = current_comps
else:
report.addText('No competitions found. New format?\n')
return report
| richard-taylor/auxo | auxo/cubing_agent.py | Python | gpl-3.0 | 2,939 |
from __future__ import print_function, absolute_import
from docopt import docopt
from .ext.fabric import *
from .ext.invoke import *
from .bootstrap import quickstart
from streamparse import __version__ as VERSION
# XXX: these are commands we're working on still
TODO_CMDS = """
sparse debug [-e <env>]
sparse restart [-e <env>]
sparse attach [-e <env>]
sparse logs [-e <env>]
"""
def main():
"""sparse: manage streamparse clusters.
sparse provides a front-end to streamparse, a framework for creating Python
projects for running, debugging, and submitting computation topologies
against real-time streams, using Apache Storm.
It requires the java and lein (Clojure build tool) to be on your $PATH, and
uses lein and Clojure under the hood for JVM/Thrift interop.
Usage:
sparse quickstart <project_name>
sparse run [-n <topology>] [-o <option>]... [-p <par>] [-t <time>] [-dv]
sparse submit [-n <topology>] [-o <option>]... [-p <par>] [-e <env>] [-dvf]
sparse list [-e <env>] [-v]
sparse kill [-n <topology>] [-e <env>] [-v]
sparse tail [-e <env>] [--pattern <regex>]
sparse (-h | --help)
sparse --version
Arguments:
project_name The name of your new streamparse project.
Options:
-h --help Show this screen.
--version Show version.
-v --verbose Show verbose output for command.
-e --environment <env> The environment to use for the command
corresponding to an environment in your
"envs" dictionary in config.json. If you
only have one environment specified,
streamparse will automatically use this.
-n --name <topology> The name of the topology to deploy. If you
have only one topology defined in your
topologies/ directory, streamparse
will use it automatically.
-o --option <option>... Topology option to use upon submit, e.g.
"-o topology.debug=true" is equivalent to
"--debug". May be repeated for multiple options.
See "Topology Configuration" listing in Storm
UI to confirm effects.
-p --par <par> Parallelism of topology; conveniently sets
number of Storm workers and acker bolts
at once to passed value [default: 2].
-t --time <time> Time (in seconds) to keep local cluster
running [default: 5].
--pattern <regex> Apply pattern to files for "tail" subcommand.
-f --force Force a topology to submit by killing any
currently running topologies of the same
name.
-d --debug Debug the given command.
"""
args = docopt(main.__doc__, version="sparse " + VERSION)
if args["run"]:
time = int(args["--time"])
par = int(args["--par"])
options = args["--option"]
run_local_topology(args["--name"], time, par, options, args["--debug"])
elif args["list"]:
list_topologies(args["--environment"])
elif args["kill"]:
kill_topology(args["--name"], args["--environment"])
elif args["quickstart"]:
quickstart(args['<project_name>'])
elif args["submit"]:
par = int(args["--par"])
options = args["--option"]
submit_topology(args["--name"], args["--environment"], par, options,
args["--force"], args["--debug"])
elif args["tail"]:
tail_topology(args["--environment"], args["--pattern"])
if __name__ == "__main__":
main()
| thedrow/streamparse | streamparse/cmdln.py | Python | apache-2.0 | 4,125 |
"""
----------------------------------------------------------------------------
Echo State Networks
Luis F. Simoes, 2016-07-29
----------------------------------------------------------------------------
Implemented following the specifications in:
[1] Jaeger, H. (2007). Echo state network. Scholarpedia, 2(9), 2330.
http://www.scholarpedia.org/article/Echo_state_network
[2] Lukosevicius, M. (2012). A practical guide to applying echo state networks.
In Neural networks: Tricks of the trade (pp. 659-686). Springer Berlin Heidelberg.
http://minds.jacobs-university.de/sites/default/files/uploads/papers/PracticalESN.pdf
[3] Akusok, A., Bjork, K. M., Miche, Y., & Lendasse, A. (2015). High-performance extreme
learning machines: a complete toolbox for big data applications. IEEE Access, 3, 1011-1025.
http://dx.doi.org/10.1109/ACCESS.2015.2450498
"""
import numpy as np
from scipy.linalg import solve
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve.html
from tqdm import tqdm, trange
import sys
##### --------------------------------------------------------------------
class ESN(object):
# choices for activation function to be applied by reservoir neurons
activation_funcs = {
'sigmoid' :
lambda x : 1.0 / (1.0 + np.exp(-x)), # output in: ( 0.0, 1.0)
'hyp.tan' :
np.tanh, # hyperbolic tangent, output in: (-1.0, 1.0)
'LeCun tanh' :
lambda x : 1.7159 * np.tanh(2./3. * x),
# https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
'rectifier' :
lambda x : np.maximum(0., x),
'Leaky ReLU' :
lambda x : np.maximum(0.01 * x, x),
'softplus' :
lambda x : np.log(1. + np.exp(x)),
# http://hdl.handle.net/1903/5355
'Elliott' :
lambda x : x / (1. + np.abs(x)),
# No activation function / identity
'identity' :
lambda x : x,
}
def __init__(self, nr_neurons=100, prob_connect=0.1, spectral_radius=0.99,
activation='hyp.tan', leaking_rate=1.0,
output_feedback=False, y_noise=0.0,
alpha=1e-9, batch_size=5000, random_state=None):
"""
Echo State Network.
Recurrent Neural Network type. The system's inputs drive the dynamics
of a *randomly defined* recurrent reservoir of neurons. A linear readout
is trained to predict the desired outputs from the reservoir's states.
See: http://www.scholarpedia.org/article/Echo_state_network
Parameters
----------
nr_neurons : int
Number of neurons in the reservoir.
prob_connect : float
Probability of a neuron in the reservoir receiving a recurrent connection
from another reservoir neuron. Configures the sparsity of reservoir connections.
spectral_radius : float
Spectral radius of the reservoir's recurrent weights matrix.
Should be greater in tasks requiring longer memory of the input.
Should usually be < 1 to ensure the "Echo State Property" is present.
activation : string (default='hyp.tan')
Choice of activation function to be applied by reservoir neurons, to
perform the nonlinear transformation of (the weighted sum of) their inputs.
List options with: `ESN.activation_funcs.keys()`.
leaking_rate : float
Extent to which reservoir neurons' previous states vanish following a new
activation. Implemented as `(1 - leaking_rate) * r_prev + leaking_rate * r_activ`.
See: https://en.wikipedia.org/wiki/Exponential_smoothing
output_feedback : bool
Indication of whether to feed the readout's predictions back into the
reservoir. Feedbacks enable ESNs to become pattern generators, and to
achieve universal computation capabilities. This power has its price,
however, as dynamical stability issues may arise.
y_noise : float
Scaling of the noise added during training to the simulated output
feedback (actually originating from the training data). Emulates an
imperfectly learned output `y`, making the network robust to this.
Implemented as `y * N(1, y_noise)`. Only applicable if `output_feedback=True`.
alpha : float
Regularization term used in the Tikhonov regularization (also known as
Ridge regression) of the readout's weights. Effectively, tunes the compromise
between having a small training error (`alpha=0`) and small output weights.
See: https://en.wikipedia.org/wiki/Tikhonov_regularization
batch_size : int
Size of the chunks into which the training data is broken down.
Allows for trading-off memory requirements and computational overhead.
See Sec. III. D of http://dx.doi.org/10.1109/ACCESS.2015.2450498
random_state : RandomState instance or None
If RandomState instance, random_state is the random number generator;
If None, the random number generator is np.random.
"""
assert 0 <= prob_connect <= 1, "`prob_connect` should be in [0,1]"
assert 0 < leaking_rate <= 1, "`leaking_rate` should be in (0,1]"
# configure the reservoir
self.nr_neurons = nr_neurons
self.prob_connect = prob_connect
self._spectral_radius = spectral_radius
self.activation_function = self.activation_funcs[activation]
self.leaking_rate = leaking_rate
# dealing with output feedbacks
self.output_feedback = output_feedback
self.y_noise = y_noise
# parameters of the readout's training
# (weights connecting reservoir to outputs)
self.alpha = alpha
self.batch_size = batch_size
self.random = np.random if random_state is None else random_state
self.W, self.B = None, None
self.r = None
def __str__(self):
if self.W is None:
return '?-%d-? ESN (untrained)' % self.nr_neurons
return '%d-%d-%d ESN' % self.shape()
def shape(self):
"Get the number of dimensions per layer (input, reservoir, readout)."
return (self.W.shape[0] - 1,
self.nr_neurons,
self.B.shape[1] if self.B.ndim > 1 else 1)
def reservoir_rec_weights(self):
"Get the weights for the reservoir nodes' recurrent connections"
return self.W[-self.nr_neurons:, :]
def spectral_radius(self):
"Determine the spectral radius of the reservoir connection matrix"
rw = self.reservoir_rec_weights()
eigvals = np.linalg.eigvals(rw)
spec_rad = np.abs(eigvals).max()
return spec_rad
# https://en.wikipedia.org/wiki/Spectral_radius
# http://mathworld.wolfram.com/SpectralRadius.html
def fit(self, X, y, sample_weight=None, y_initial=None):
nr_samples, input_dim = X.shape
assert nr_samples > self.nr_neurons, "Implemented training equations " \
"expect nr_samples (%d) > nr_neurons (%d)" % (nr_samples, self.nr_neurons)
# ---------- Define the reservoir
#
# configure the weights of connections into reservoir neurons
# (these consist of: bias input, external inputs, feedback from
# the output (optionally), and recurrent reservoir connections).
# Initialized to random weights (which then remain constant throughout training)
self.output_dim = y.shape[1]
output_feedback_dim = self.output_dim if self.output_feedback else 0
nr_inputs = 1 + input_dim + output_feedback_dim + self.nr_neurons
self.W = self.random.standard_normal(size=(nr_inputs, self.nr_neurons))
rw = self.reservoir_rec_weights()
# enforce connectivity's sparsity.
# prob_connect (probability of a link existing) ranges in [0,1],
# going from a fully disconnected to a fully connected graph.
if self.prob_connect is not None:
i = self.random.rand(*rw.shape) > self.prob_connect
rw[i] *= 0.0
# enforce the requested spectral radius
# (should be, in most situations, < 1 to ensure the echo state property)
if self._spectral_radius is not None:
rw *= (self._spectral_radius / self.spectral_radius())
# ---------- Train readouts
#
# split the training set into multiple batches
nr_batches = np.ceil(nr_samples / float(self.batch_size))
b = np.linspace(0, nr_samples, nr_batches + 1)[1:-1].astype(np.int)
if sample_weight is None:
batches = list(zip(np.split(X, b), np.split(y, b)))
else:
# "An additional multiplication by A is avoided by applying
# weights $\sqrt{a_j}$ directly to the rows of matrices H, T"
# -- http://dx.doi.org/10.1109/ACCESS.2015.2450498
w = np.sqrt(sample_weight).reshape(-1, 1)
assert w.shape[0] == nr_samples, "%d weights provided for %d " \
"training samples" % (w.shape[0], nr_samples)
batches = list(zip(np.split(X, b), np.split(y, b), np.split(w, b)))
# process batches, aggregating results into the HH and Hy matrices
HH, Hy, r, y_initial = self._fit_step(*batches[0], r_initial=None, y_initial=y_initial)
for Xyw in batches[1:]:
HHb, Hyb, r, y_initial = self._fit_step(*Xyw, r_initial=r, y_initial=y_initial)
HH += HHb
Hy += Hyb
# add the regularization term to HH's diagonal
# See: https://en.wikipedia.org/wiki/Tikhonov_regularization
HH.ravel()[::HH.shape[1]+1] += self.alpha
# calculate the output weights matrix, beta
# (finds the regularized least squares fit to a [multivariate] linear regression problem)
#self.B = np.linalg.lstsq(HH, Hy)[0]
self.B = solve(HH, Hy, sym_pos=True, overwrite_a=True, overwrite_b=True)
# http://mathworld.wolfram.com/PositiveDefiniteMatrix.html
def _fit_step(self, X, y, sample_weight=None, y_initial=None, **kwargs):
if self.output_feedback:
if y_initial is None:
y_initial = np.zeros(self.output_dim)
# add Gaussian noise to the outputs sent as feedback
if self.y_noise > 0.0:
_y = y * self.random.normal(1, self.y_noise, size=y.shape)
else:
_y = y
# "Teacher forcing": disengages the recurrent relationship between the reservoir
# and the readout during training. Treats output learning as a feedforward task.
# Feeds the previous step's desired outputs `y` (optionally with added noise)
# through the feedback connections, as if they had indeed been the model's outputs.
_y = np.vstack([y_initial, _y[:-1]])
X = np.hstack([X, _y])
# calculate the reservoir neurons' activations matrix, given the input vectors in X
H = self.propagate_reservoir(X, **kwargs)
# get the reservoir's most recent state
r = H[-1]
# extend reservoir with the input vectors (the linear readout uses as
# input an extended system state containing the input and reservoir states)
H = np.hstack([X, H])
# calculate the auxiliary matrices from which beta will be determined
if sample_weight is None:
HH = H.T.dot(H)
Hy = H.T.dot(y)
else:
Hw = sample_weight * H
HH = Hw.T.dot(Hw)
Hy = Hw.T.dot(sample_weight * y)
return HH, Hy, r, y[-1]
def update_reservoir(self, X, r):
"""
Advance the reservoir's state by one time step, as a function of
the new inputs `X`, extended with the reservoir's current state `r`.
"""
I = np.hstack([X, r])
A = self.W[0] + np.dot(I, self.W[1:])
A = self.activation_function(A)
if self.leaking_rate == 1:
# leaky integration not being used
return A
else:
return (1. - self.leaking_rate) * r + self.leaking_rate * A
def propagate_reservoir(self, X, reset_r=True, r_initial=None):
"Advance the reservoir's state across multiple time steps."
if reset_r:
self.r = np.zeros(self.nr_neurons) if r_initial is None else r_initial
R = []
for x in X:
self.r = self.update_reservoir(x, self.r)
R.append(self.r)
return np.array(R)
def predict_with_feedback(self, X, reset_r=True, r_initial=None, y_initial=None, **kwargs):
# a variant of `propagate_reservoir` that predicts outputs (`compute_readout`)
# and feeds them back as inputs for the next step.
if reset_r:
self.r = np.zeros(self.nr_neurons) if r_initial is None else r_initial
y = np.zeros(self.output_dim) if y_initial is None else y_initial
Y = []
for x in X:
x = np.hstack([x, y])
self.r = self.update_reservoir(x, self.r)
y = self.compute_readout(x, self.r)
Y.append(y)
return np.array(Y)
def compute_readout(self, X, R):
"""
Compute the linear readout, from an extended system state
containing the inputs `X` and reservoir state `R`.
"""
Z = np.hstack([X, R])
return np.dot(Z, self.B)
def predict(self, X, **kwargs):
assert self.B is not None, \
'Attempt to evaluate an input with an untrained ESN.'
if self.output_feedback:
return self.predict_with_feedback(X, **kwargs)
else:
R = self.propagate_reservoir(X, **kwargs)
return self.compute_readout(X, R)
##### --------------------------------------------------------------------
class ESN_ensemble(object):
def __init__(self, aggregate='mean', grad_boost=False, *model_args, **model_kwargs):
self.model_args = model_args
self.model_kwargs = model_kwargs
self.aggregate = {'mean' : np.mean, 'median' : np.median}[aggregate]
self.grad_boost = grad_boost
if self.grad_boost:
self.aggregate = None
def fit(self, X, y, nr_models=10, weighted=False, **train_args):
self.M = []
pred = None
_pred = []
_y = y
w = None
for i in trange(nr_models, leave=False, file=sys.stdout):
if weighted and pred is not None:
# weigh each time instants' predictions proportionally
# to the ensemble's current RMSE on it
w = np.mean((y - pred)**2, axis=1) ** 0.5
m = ESN(*self.model_args, **self.model_kwargs)
m.fit(X, _y, sample_weight=w, **train_args)
if self.grad_boost:
predX = m.predict(X)
pred = (0 if pred is None else pred) + predX
_y = y - pred
elif weighted:
predX = m.predict(X)
_pred.append(predX)
pred = self.aggregate(_pred, axis=0)
self.M.append(m)
def predict_with_feedback(self, X, reset_r=True, y_initial=None, **kwargs):
assert self.M[0].output_feedback, \
"Trained model doesn't use y(t-1) as input."
assert not self.grad_boost, \
"Using Gradient Boosting. Models can't all receive the same y."
Y = []
for x in X:
_y = [m.predict(x[None], reset_r=reset_r, y_initial=y_initial, **kwargs)
for m in self.M]
y = y_initial = self.aggregate(_y, axis=0)[0]
Y.append(y)
# after the first step, ensure `r` (reservoir neurons' states)
# are no longer reset
reset_r = False
return np.array(Y)
def predict(self, X, feedback_ensemble_y=False, **kwargs):
if feedback_ensemble_y:
return self.predict_with_feedback(X, **kwargs)
predX = [m.predict(X, **kwargs) for m in self.M]
if self.grad_boost:
return np.sum(predX, axis=0)
else:
return self.aggregate(predX, axis=0)
def model_selection(self, X, y, keep_top=10, **kwargs):
"""
Keep only the best `keep_top` models, as determined by the RMSEs
on some dataset `X -> y`.
"""
m_eval = []
for m in self.M:
y_pred = m.predict(X, **kwargs)
if not np.isnan(y_pred.max()):
m_rmse = RMSE(y_true=y, y_pred=y_pred)
m_eval.append((m_rmse, m))
# sort by ascending order of RMSE
m_eval.sort(key=lambda i:i[0])
evals, models = list(zip(*m_eval))
# keep only the `keep_top` models with lowest RMSE
self.M = models[:keep_top]
return evals, models
def error(self, X, y, feedback_ensemble_y=False, **kwargs):
"""
Calculate how an ensemble of ESNs has its
RMSE evolve with each additional model.
"""
if not feedback_ensemble_y:
err = []
pred = None
_pred = []
for m in tqdm(self.M, leave=False, file=sys.stdout):
pred_m = m.predict(X, **kwargs)
if self.grad_boost:
pred = (0 if pred is None else pred) + pred_m
else:
_pred.append(pred_m)
pred = self.aggregate(_pred, axis=0)
e = RMSE(y_true=y, y_pred=pred)
err.append(e)
return err
# if using aggregated y feedback, the contributions from each model
# are evaluated differently
else:
y_initial = kwargs.pop('y_initial', np.zeros(y.shape[1]))
y_shifted = np.vstack([y_initial, y[:-1]])
M_pred = []
for m in self.M:
# obtain model m's predictions over time, as if at every stage
# it had perfect feedback from the previous time step's output
m_pred = np.array([
m.predict(_x[None], reset_r=i == 0, y_initial=_y, **kwargs)
for i,(_x, _y) in enumerate(zip(X, y_shifted))
])
M_pred.append(np.vstack(m_pred))
return [
RMSE(y_true=y, y_pred=self.aggregate(M_pred[:i], axis=0))
for i in range(1, len(self.M) + 1)
]
def show_error(self, error):
import matplotlib.pylab as plt
l = 'Error after adding each model\n(final RMSE: %f)' % error[-1]
plt.plot(list(range(1, len(self.M) + 1)), error, label=l)
plt.legend()
plt.xlabel('Number of models')
plt.ylabel('RMSE')
plt.xlim(0, len(error))
model_str = '%d x %s' % (len(self.M), str(self.M[0]))
plt.title('Ensemble: ' + model_str)
def RMSE(y_true, y_pred):
"""
Root-mean-square error (RMSE).
Represents the sample standard deviation of the differences between
predicted values and observed values.
Has the same units as the quantity being estimated.
https://en.wikipedia.org/wiki/Root-mean-square_deviation
"""
return np.sqrt(np.mean((y_true - y_pred) ** 2))
| lfsimoes/mars_express__esn | echo_state_networks.py | Python | mit | 20,376 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scorecards', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='category',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='entry',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='entry',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
| mysociety/pombola | pombola/scorecards/migrations/0002_datetimefield_remove_default.py | Python | agpl-3.0 | 883 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
from cnfformula.transformations.expand import Expand
from ..cmdline import register_cnf_transformation_subcommand
from ..transformations import register_cnf_transformation
from ..cnf import CNF
from ..cnf import disj, xor
from ..cnf import less, greater, geq, leq, eq
from ..cnf import weighted_eq, weighted_geq
def _shuffle_literals(constraint,substitution):
"""Shuffle the literals in the low level representation of constraints."""
literals = (substitution[l] for l in constraint)
if type(constraint)==disj:
return disj(*literals)
elif type(constraint) in [xor,eq]:
return type(constraint)(*literals,value=constraint.value)
elif type(constraint) in [less, greater, geq, leq]:
return type(constraint)(*literals,threshold=constraint.threshold)
elif type(constraint) in [weighted_eq,weighted_geq]:
offset = sum(-w for (w,v) in constraint if substitution[v]<0)
terms = ((w*substitution[v]//abs(substitution[v]),abs(substitution[v])) for (w,v) in constraint)
if type(constraint) == weighted_eq:
return weighted_eq(*terms,value=constraint.value+offset)
elif type(constraint) == weighted_geq:
return weighted_geq(*terms,threshold=constraint.threshold+offset)
else:
ValueError("The constraint type is unknown: {}".format(type(constraint)))
else:
raise ValueError("The constraint type is unknown: {}".format(type(constraint)))
@register_cnf_transformation
def Shuffle(F,**kwargs):
"""Reshuffle the given cnf.
Returns a formula logically equivalent to the input, a CNF with
:math:`n` variables and :math:`m` constraints, with the following
transformations applied in order:
Parameters
----------
F : CNF
formula to be shuffled
variables_permutation: list(string) or 'random', optional
the sequence of variables, in their new order. If `'random'`
then the order is picked at random. If `None` or the parameter
is not set, then there is no permutation. (default: None)
polarity_flips: list(-1,1) or 'random', optional
This is a :math:`\{-1,1\}^n` vector. If the :math:`i`-th
entry is -1, all the literals with the :math:`i`-th variable
change its sign. If `'random'` then the flips are picked at
random. If `None` or the parameter is not set, then the
literals are not flipped. (default: None)
constraints_permutation: list(int) or 'random', optional
it is a permutation of [0..m-1]. The resulting constrains are
reordered according to the permutation. If `'random'` then the
permutation is picked at random. If `None` or the parameter is
not set, then the constraints are shuffled. (default: None)
"""
variables_permutation = kwargs.pop('variables_permutation', None)
polarity_flips = kwargs.pop('polarity_flips', None)
constraints_permutation = kwargs.pop('constraints_permutation', None)
# empty cnf
out=CNF(header='')
out.header="Reshuffling of:\n\n"+F.header
# Permute variables
variables=list(F.variables())
N=len(variables)
if variables_permutation == 'random':
random.shuffle(variables)
elif variables_permutation is not None:
assert set(variables_permutation)==set(variables)
variables = variables_permutation
for v in variables:
out.add_variable(v)
# polarity flip
if polarity_flips is None:
polarity_flips=[1]*N
elif polarity_flips == 'random':
polarity_flips=[random.choice([-1,1]) for _ in xrange(N)]
else:
assert len(polarity_flips)==N
#
# substitution of variables
#
substitution=[None]*(2*N+1)
reverse_idx=dict([(v,i) for (i,v) in enumerate(out.variables(),1)])
polarity_flips = [None]+polarity_flips
for i,v in enumerate(F.variables(),1):
substitution[i]= polarity_flips[i]*reverse_idx[v]
substitution[-i]= -substitution[i]
# permutation of constraints
#
M=len(F._constraints)
if constraints_permutation is None:
constraints_permutation = range(M)
elif constraints_permutation == 'random':
constraints_permutation=range(M)
random.shuffle(constraints_permutation)
else:
assert len(constraints_permutation)==M
# load clauses
out._constraints = [None]*M
out._length = None
for (old,cnst) in enumerate(F._constraints):
out._constraints[constraints_permutation[old]]= _shuffle_literals(cnst,substitution)
# return the formula
out.mode_default()
return out
@register_cnf_transformation_subcommand
class ShuffleCmd:
"""Shuffle
"""
name='shuffle'
description='Permute variables, constraints and polarity of literals at random'
@staticmethod
def setup_command_line(parser):
parser.add_argument('--no-polarity-flips','-p',
action='store_true',
dest='no_polarity_flips',
help="No polarity flips")
parser.add_argument('--no-variables-permutation','-v',
action='store_true',
dest='no_variables_permutation',
help="No permutation of variables")
parser.add_argument('--no-constraints-permutation','-c',
action='store_true',
dest='no_constraints_permutation',
help="No permutation of constraints")
@staticmethod
def transform_cnf(F,args):
return Shuffle(F,
variables_permutation = None if args.no_variables_permutation else 'random',
constraints_permutation = None if args.no_constraints_permutation else 'random',
polarity_flips = None if args.no_polarity_flips else 'random')
@register_cnf_transformation_subcommand
class FlipCmd:
name='flip'
description='negate all variables in the formula'
@staticmethod
def setup_command_line(parser):
pass
@staticmethod
def transform_cnf(F, args):
N=sum(1 for _ in F.variables())
return Shuffle(F, polarity_flips=[-1]*N)
| marcvinyals/cnfgen | cnfformula/transformations/shuffle.py | Python | gpl-3.0 | 6,427 |
#
# CompleteTranscription.py
#
# by Andrea Cogliati <andrea.cogliati@rochester.edu>
# University of Rochester
#
from music21 import *
import subprocess
import operator
import math
from os import system
class MidiBeat:
def __init__(self, timestamp, level, division):
self.timestamp = timestamp
self.level = level
self.division = division
pass
class MidiNote:
_PITCHCLASSES = ['f', 'c', 'g', 'd', 'a', 'e', 'b']
_ACCIDENTALS = ['', '#', '##', '--', '-']
_MIDINAMES = ['c', 'c#', 'd', 'd#', 'e', 'f', 'f#', 'g', 'g#', 'a', 'a#', 'b']
def __init__(self, onset, offset, midinote, stream):
self.onset = onset
self.offset = offset
self.midinote = midinote
self.stream = stream
self.tpc = None
self.beat = None
self.duration = None
self.octave = midinote // 12 - 1
pass
@property
def note_name(self):
if self.tpc != None:
pitch_idx = self.tpc % 7 - 1
accidentals = (self.tpc - 1) // 7
pitchname = self._PITCHCLASSES[pitch_idx] + self._ACCIDENTALS[accidentals]
else:
pitch_idx = self.midinote % 12
pitchname = self._MIDINAMES[pitch_idx]
return pitchname + str(self.octave)
@property
def note_list_name(self):
return 'Note\t{}\t{}\t{}'.format(self.onset, self.offset, self.midinote)
def __str__(self):
return self.note_name
class MidiTimeSignaure:
def __init__(self, tactus_division, upper_division, phase):
self.phase = phase
self.initial_division = 1
if (tactus_division, upper_division) == (2, 2):
self.beats = 4
self.duration = 4
self.divisions = 16
self.division_per_quarter = 4
elif (tactus_division, upper_division) == (2, 3):
self.beats = 3
self.duration = 4
self.divisions = 12
self.division_per_quarter = 4
if phase == 2:
self.initial_division = 9
elif (tactus_division, upper_division) == (3, 2):
self.beats = 6
self.duration = 8
self.divisions = 12
self.division_per_quarter = 4
if phase == 1:
self.initial_division = 7
elif (tactus_division, upper_division) == (3, 3):
self.beats = 9
self.duration = 8
self.divisions = 18
self.division_per_quarter = 4
def __str__(self):
return '{0}/{1}'.format(self.beats, self.duration)
class MidiScore:
def __init__(self):
self.key = None
self.time_signature = None
self.beats = None
self.notes = None
self.num_streams = None
def set_key(self, key_string):
key_string = key_string.replace('b','-')
if key_string[-1] == 'm':
key_string = key_string[0:-1].lower()
self.key = key_string
def set_meter(self, meter_output):
meter_list = strlst_to_intlst(meter_output.split())
tactus_division, upper_division, phase = meter_list[1], meter_list[2], meter_list[4]
self.time_signature = MidiTimeSignaure(tactus_division, upper_division, phase)
def parse_quantized_notes_output(self, quantized_note_events_output):
self.beats = []
division = self.time_signature.initial_division
self.notes = []
self.num_streams = 0
for line in quantized_note_events_output.split('\n'):
tokens = line.split()
if tokens != []:
if tokens[0] == 'Beat':
self.beats.append(MidiBeat(int(tokens[1]), int(tokens[2]), division))
division += 1
elif tokens[0] == 'Note':
stream = int(tokens[4])
self.notes.append(MidiNote(int(tokens[1]), int(tokens[2]), int(tokens[3]), stream))
if stream > self.num_streams:
self.num_streams = stream
def find_note(self, onset, offset, midinote):
for idx in range(len(self.notes)):
if self.notes[idx].onset == onset and self.notes[idx].offset == offset and self.notes[idx].midinote == midinote:
return idx
return None
def parse_harmony_output(self, harmony_output):
for line in harmony_output.split('\n'):
tokens = line.split()
if tokens != []:
if tokens[0] == 'TPCNote':
idx = self.find_note(int(tokens[1]), int(tokens[2]), int(tokens[3]))
if idx != None:
tpc = int(tokens[4])
self.notes[idx].tpc = tpc
def find_notes_beat(self):
for idx in range(len(self.notes)):
self.notes[idx].beat = self.find_beat(self.notes[idx].onset)
self.notes[idx].duration = self.find_beat(self.notes[idx].offset).division - self.notes[idx].beat.division
def find_beat(self, onset):
min_idx = 0
for idx in range(1,len(self.beats)):
if abs(self.beats[idx].timestamp - onset) <= abs(self.beats[min_idx].timestamp - onset):
min_idx = idx
else:
break
return self.beats[min_idx]
def get_stream(self, key):
return [note for note in self.notes if note.stream == key]
# Helper functions
def strlst_to_intlst(strlst):
intlst = []
for num in strlst:
intlst.append(int(num))
return intlst
def find_empty_voices(a_measure):
empty_voices = []
hasNotes = False
for a_voice in a_measure.voices:
if a_voice.hasElementOfClass(note.Note):
hasNotes = True
else:
empty_voices.append(a_voice)
if hasNotes:
return empty_voices
else:
return empty_voices[1:]
def remove_extra_rests_from_score(the_score):
# Remove extra rests
emptyVoices = []
for el in the_score.recurse():
if isinstance(el, stream.Measure):
emptyVoices.extend(find_empty_voices(el))
the_score.remove(emptyVoices, recurse=True)
return the_score
def fix_empty_voice(a_voice):
a_duration = a_voice.elements[0].duration
a_voice.remove(a_voice.elements[0])
a_spacer = note.SpacerRest()
a_spacer.duration = a_duration
a_voice.insert(0, a_spacer)
def isStandardKey(a_key):
for sharps in range(-7,7):
ks = key.KeySignature(sharps)
ks.mode = 'major'
maj_ton = ks.getScale().tonic
ks.mode = 'minor'
min_ton = ks.getScale().tonic
maj_key = key.Key(maj_ton, 'major')
min_key = key.Key(min_ton, 'minor')
if a_key == maj_key or a_key == min_key:
return True
return False
def pad_part(a_part, a_time_signature):
if (a_part.duration.quarterLength * (a_time_signature.duration // 4)) % a_time_signature.beats:
a_rest = note.Rest()
a_rest.quarterLength = (a_time_signature.beats - (a_part.duration.quarterLength * (a_time_signature.duration // 4) % a_time_signature.beats)) / (a_time_signature.duration // 4)
a_part.insert(a_part.duration.quarterLength, a_rest)
def setup():
pass
def complete_transcription(midifile):
setup()
## Process MIDI file according to the cognitive model
print('Convert MIDI into note list')
note_list = subprocess.check_output(['mftext', midifile])
# fix overlapping notes
print('Fix overlapping notes')
new_note_list = []
for line in note_list.decode().split('\n'):
tokens = line.split()
if tokens != []:
if tokens[0] == 'Note':
new_note_list.append(MidiNote(int(tokens[1]), int(tokens[2]), int(tokens[3]), 0))
new_note_list.sort(key=operator.attrgetter('onset'))
OVERLAP_THR = .30
for i in range(len(new_note_list)):
for j in range(i+1,len(new_note_list)):
if new_note_list[j].onset >= new_note_list[i].offset:
break
len_note_i = new_note_list[i].offset - new_note_list[i].onset
len_note_j = new_note_list[j].offset - new_note_list[j].onset
overlap_start = max(new_note_list[i].onset, new_note_list[j].onset)
overlap_end = min(new_note_list[i].offset, new_note_list[j].offset)
overlap = overlap_end - overlap_start
overlap_note_i = overlap/len_note_i
overlap_note_j = overlap/len_note_j
if overlap_note_i + overlap_note_j < OVERLAP_THR:
new_note_list[i].offset = new_note_list[j].onset
note_list = '\n'.join([note.note_list_name for note in new_note_list]).encode()
print('Detect meter')
polyph_proc = subprocess.Popen(['polyph', '-v', '-1'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
meter_output = polyph_proc.communicate(input=note_list)[0]
# polyph_proc = subprocess.Popen(['polyph', '-v', '-2'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# chords_output = polyph_proc.communicate(input=note_list)[0]
print('Detect beats and quantized notes')
polyph_proc = subprocess.Popen(['polyph', '-v', '-4'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
quantized_note_events_output = polyph_proc.communicate(input=note_list)[0]
print('Determine note spelling')
harmony_proc = subprocess.Popen(['harmony', '-p', 'harmony_params.txt'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
harmony_output = harmony_proc.communicate(input=quantized_note_events_output)[0]
print('Estimate key')
key_proc = subprocess.Popen('key', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
key_output = key_proc.communicate(input=harmony_output)[0]
## Create intermediate representation
midi_score = MidiScore()
# Parsing the key
midi_score.set_key(key_output.decode().split()[0])
print('The key is', midi_score.key)
# Parsing the meter
midi_score.set_meter(meter_output.decode())
print('The time signature is', midi_score.time_signature)
# Parsing quantized notes output
midi_score.parse_quantized_notes_output(quantized_note_events_output.decode())
# Parsing harmony output
midi_score.parse_harmony_output(harmony_output.decode())
# Find note beats
midi_score.find_notes_beat()
## Create music21 score
print('Creating score')
trebleStaff = stream.PartStaff()
trebleStaff.insert(0, meter.TimeSignature(str(midi_score.time_signature)))
trebleStaff.insert(0, key.Key(str(midi_score.key)))
trebleStaff.insert(0, clef.TrebleClef())
bassStaff = stream.PartStaff()
bassStaff.insert(0, meter.TimeSignature(str(midi_score.time_signature)))
bassStaff.insert(0, key.Key(str(midi_score.key)))
bassStaff.insert(0, clef.BassClef())
# Create streams
streams = stream.Score()
for idx in range(1, midi_score.num_streams+1):
new_stream = stream.Stream()
note_stream = midi_score.get_stream(idx)
stream_offset = (note_stream[0].beat.division - 1) / midi_score.time_signature.division_per_quarter
for idx in range(len(note_stream)):
midinote = note_stream[idx]
new_note = note.Note(str(midinote))
if idx != len(note_stream) - 1:
new_note.quarterLength = (note_stream[idx+1].beat.division - midinote.beat.division) / midi_score.time_signature.division_per_quarter
else:
new_note.quarterLength = midinote.duration / midi_score.time_signature.division_per_quarter
new_stream.insert((midinote.beat.division - 1) / midi_score.time_signature.division_per_quarter - stream_offset, new_note)
streams.insert(stream_offset, new_stream)
# Assign streams to correct staff
for a_stream in streams.elements:
if isinstance(a_stream.bestClef(), clef.TrebleClef):
trebleStaff.insert(a_stream.offset, a_stream)
else:
bassStaff.insert(a_stream.offset, a_stream)
# Make notation
trebleStaff = trebleStaff.flat
bassStaff = bassStaff.flat
# Fix parts with different lengths
treble_staff_bars = math.ceil((trebleStaff.duration.quarterLength * (midi_score.time_signature.duration // 4)) / midi_score.time_signature.beats)
bass_staff_bars = math.ceil((bassStaff.duration.quarterLength * (midi_score.time_signature.duration // 4)) / midi_score.time_signature.beats)
if treble_staff_bars > bass_staff_bars:
a_rest = note.Rest()
a_rest.quarterLength = midi_score.time_signature.beats / (midi_score.time_signature.duration // 4)
bassStaff.insert((treble_staff_bars - 1) * midi_score.time_signature.beats / (midi_score.time_signature.duration // 4), a_rest)
elif bass_staff_bars > treble_staff_bars:
a_rest = note.Rest()
a_rest.quarterLength = midi_score.time_signature.beats / (midi_score.time_signature.duration // 4)
trebleStaff.insert((bass_staff_bars - 1) * midi_score.time_signature.beats / (midi_score.time_signature.duration // 4), a_rest)
# Pad last measure of parts, if necessary
pad_part(trebleStaff, midi_score.time_signature)
pad_part(bassStaff, midi_score.time_signature)
trebleStaff.makeVoices(inPlace=True)
trebleStaff.makeRests(fillGaps=True)
bassStaff.makeVoices(inPlace=True)
bassStaff.makeRests(fillGaps=True)
the_score = stream.Score()
the_score.insert(0, trebleStaff)
the_score.insert(0, bassStaff)
the_score = the_score.makeNotation()
# Fix missing key signature
for part in range(2):
if not the_score[part][0].hasElementOfClass(key.Key):
the_score[part][0].insert(0, key.Key(str(midi_score.key)))
# Fix non-standard key signatures
if not isStandardKey(key.Key(str(midi_score.key))):
print('Fixing key signature')
for el in the_score.recurse():
if isinstance(el, key.Key):
el.tonic = el.pitchAndMode[0].getEnharmonic()
el.sharps = key.Key(el.pitchAndMode[0].getEnharmonic(), el.mode).sharps
elif isinstance(el, note.Note):
el.pitch = el.pitch.getEnharmonic()
# Remove extra rests
print('Removing extra rests')
empty_voices = []
for el in the_score.recurse():
if isinstance(el, stream.Measure):
empty_voices.extend(find_empty_voices(el))
for a_voice in empty_voices:
fix_empty_voice(a_voice)
return the_score
def convert_part_to_lilypond(a_part):
lilypond_part = ''
lpc = lily.translate.LilypondConverter()
lilypond_part += lpc.lySequentialMusicFromStream(a_part.getKeySignatures()).stringOutput()
lilypond_part += lpc.lySequentialMusicFromStream(a_part.getTimeSignatures()).stringOutput()
all_voices = stream.Stream()
voice_less = not a_part[0].hasVoices()
if voice_less:
voiceLess = True
new_voice = stream.Voice()
new_voice.id = 0
all_voices.insert(0, new_voice)
for el in a_part.recurse():
if isinstance(el, note.GeneralNote):
if voice_less:
voice_in_score = all_voices.getElementById(0)
measure_offset = el.activeSite.offset
else:
voice_in_score = all_voices.getElementById(el.activeSite.id)
measure_offset = el.activeSite.activeSite.offset
voice_in_score.insert(measure_offset + el.offset, el, setActiveSite=False)
elif isinstance(el, stream.Voice) and not all_voices.getElementById(el.id):
new_voice = stream.Voice()
new_voice.id = el.id
all_voices.insert(0, new_voice)
voice_num = 0
lilypond_part += ' << '
for a_voice in all_voices:
if voice_num > 0:
lilypond_part += ' \\\\ '
lilypond_part += ' { '
for a_note in a_voice:
if a_note.quarterLength == 0:
continue
if len(a_note.duration.components) == 1:
lilypond_note = lpc.lySimpleMusicFromNoteOrRest(a_note).stringOutput().translate(str.maketrans('', '', '[]'))
else:
lilypond_note = ''
is_first = True
for a_duration in a_note.duration.components:
if is_first or isinstance(a_note, note.Rest) or isinstance(a_note, note.SpacerRest):
is_first = False
else:
lilypond_note += ' ~ '
if isinstance(a_note,note.Note):
simple_note = note.Note()
simple_note.pitch = a_note.pitch
# print(a_note,simple_note)
elif isinstance(a_note,note.Rest):
simple_note = note.Rest()
elif isinstance(a_note,note.SpeacerRest):
simple_note = note.SpeacerRest()
else:
print('I don''t know what {} is'.format(el))
simple_note.quarterLength = a_duration.quarterLength
lilypond_note += lpc.lySimpleMusicFromNoteOrRest(simple_note).stringOutput().translate(str.maketrans('', '', '[]'))
lilypond_part += lilypond_note
lilypond_part += ' } '
voice_num += 1
lilypond_part += ' >> \\bar "|." '
return lilypond_part
def convert_music21_score_to_lilypond(the_score):
lilypond_version = '\\version "2.18.2"'
lilypond_header = '\\header { tagline = "" }'
lilypond_score_footer = """\\score { \\new PianoStaff
<<
% \\set PianoStaff.instrumentName = #"Piano "
\\new Staff = "upper" \\upper
\\new Staff = "lower" \\lower >> }"""
lilypond_u = convert_part_to_lilypond(the_score.parts[0])
lilypond_l = convert_part_to_lilypond(the_score.parts[1])
lilypond_part_str = '{} = {{ \clef {} {} }}'
lilypond_upper = lilypond_part_str.format('upper', 'treble', lilypond_u)
lilypond_lower = lilypond_part_str.format('lower', 'bass', lilypond_l)
lilypond_score = lilypond_version + '\n' + lilypond_header + '\n' + lilypond_upper + '\n' + lilypond_lower + '\n' + lilypond_score_footer
return lilypond_score
def main():
midifile = 'minuet.mid'
the_score = complete_transcription(midifile)
lilypond_score = convert_music21_score_to_lilypond(the_score)
open('minuet.ly','w').write(lilypond_score)
system('lilypond -o minuet minuet.ly')
if __name__ == '__main__':
main()
| AndreaCogliati/CompleteTranscription | CompleteTranscription.py | Python | bsd-3-clause | 18,533 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='polygamy',
version='0.1.2',
description='Handle multiple SCM repositories easily.',
long_description=readme + '\n\n' + history,
author='Chris Trotman',
author_email='chris@trotman.io',
url='https://github.com/solarnz/polygamy',
packages=[
'polygamy',
],
include_package_data=True,
install_requires=[
'blessings == 1.5.1',
'gevent >= 1.0',
'tabulate == 0.7.2',
],
license="BSD",
zip_safe=False,
keywords='polygamy',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
entry_points={
'console_scripts': [
'polygamy = polygamy:main'
]
},
)
| solarnz/polygamy | setup.py | Python | bsd-3-clause | 1,343 |
#-*- coding: utf-8 -*-
import urllib2
import re
import CommonFunctions
import base64
common = CommonFunctions
from resources.lib import utils
title=['NRJ12','Chérie 25']
img=['nrj12','cherie25']
readyForUse=True
def list_shows(channel,folder):
shows=[]
filePath=utils.downloadCatalog('http://www.nrj-play.fr/%s/replay' % channel,channel + '.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace("\n", "")
html=' '.join(html.split())
if folder=='none':
match = re.compile(r'<li class="subNav-menu-item">(.*?)<a href="(.*?)" class=(.*?)>(.*?)</a>',re.DOTALL).findall(html)
if match:
for empty,link,empty2,title in match:
if 'active' not in empty2:
shows.append( [channel,link, title , '','folder'] )
else:
print 'http://www.nrj-play.fr%s' % (folder)
filePath=utils.downloadCatalog('http://www.nrj-play.fr%s' % (folder),channel + folder +'.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace("\n", "")
html=' '.join(html.split())
print html
match = re.compile(r'<h2 class="linkProgram-title"> <a href="(.*?)">(.*?)</a> </h2>',re.DOTALL).findall(html)
print 'rr'
if match:
for link,title in match:
print 'ff'
title = common.replaceHTMLCodes(title)
title = title.title()
shows.append( [channel,link, title.encode("utf-8") , '','shows'] )
return shows
def list_videos(channel,link):
videos=[]
filePath=utils.downloadCatalog('http://www.nrj-play.fr' + link,channel + link.replace('/','') +'.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace("\n", "")
html=' '.join(html.split())
print html
match = re.compile(r'<h3 class="thumbnailReplay-title" itemprop="name"> <a href="(.*?)">(.*?)</a> </h3>',re.DOTALL).findall(html)
if match:
for link,title in match:
title = common.replaceHTMLCodes(title)
title = title.title()
infoLabels={ "Title": title}
videos.append( [channel, link , title , '',infoLabels,'play'] )
else:
match = re.compile(r'<meta itemprop="name" content="(.*?)" />',re.DOTALL).findall(html)
if match:
for title in match:
title = common.replaceHTMLCodes(title)
title = title.title()
infoLabels={ "Title": title}
videos.append( [channel, link , title , '',infoLabels,'play'] )
return videos
def getVideoURL(channel,link):
filePath=utils.downloadCatalog('http://www.nrj-play.fr' + link,channel + link.replace('/','') +'.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace("\n", "")
html=' '.join(html.split())
print html
match = re.compile(r'<meta itemprop="contentUrl" content="(.*?)" alt="',re.DOTALL).findall(html)
url=match[0]
return url | spmjc/plugin.video.freplay | resources/lib/channels/nrj12.py | Python | gpl-2.0 | 3,267 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest2
from mom.codec import json
from mom.codec import text
from mom.tests import constants
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
class Test_decode(unittest2.TestCase):
def test_decode(self):
# decode accepts unicode only.
self.assertEqual(json.json_decode(constants.JSON_UFOO), constants.UFOO)
def test_raises_error_when_invalid_type(self):
self.assertRaises(TypeError, json.json_decode, constants.JSON_FOO)
class Test_encode(unittest2.TestCase):
def test_encode(self):
# json deals with strings, not bytes.
self.assertEqual(json.json_decode(json.json_encode(constants.UNICODE_VALUE)), constants.UNICODE_VALUE)
def test_raises_error_when_invalid_type(self):
self.assertRaises(TypeError, json.json_encode, text.utf8_encode(constants.UNICODE_VALUE))
self.assertRaises(TypeError, json.json_encode, constants.X_BYTE)
if __name__ == "__main__":
unittest2.main()
| gorakhargosh/mom | mom/tests/test_mom_codec_json.py | Python | apache-2.0 | 1,683 |
"""
Views for PubSite app.
"""
from django.conf import settings
from django.contrib.auth.views import (
PasswordResetView,
PasswordResetDoneView,
PasswordResetConfirmView,
PasswordResetCompleteView,
)
from django.shortcuts import render
import requests
import logging
logger = logging.getLogger(__name__)
def _get_context(page_name):
return {
"pages": settings.PUBLIC_PAGES,
"current_page_name": page_name,
}
# Regular index
# def index(request):
# """
# View for the static index page
# """
# return render(request, 'public/home.html', _get_context('Home'))
def index(request):
"""
View for the static index page
"""
return render(request, "public/home.html", _get_context("Home"))
def about(request):
"""
View for the static chapter history page.
"""
return render(request, "public/about.html", _get_context("About"))
def activities(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/activities.html",
_get_context("Service & Activities"),
)
def rush(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/rush.html",
_get_context("Rush"),
)
def campaign(request):
"""
View for the campaign service page.
"""
# Overrride requests Session authentication handling
class NoRebuildAuthSession(requests.Session):
def rebuild_auth(self, prepared_request, response):
"""
No code here means requests will always preserve the Authorization
header when redirected.
Be careful not to leak your credentials to untrusted hosts!
"""
url = "https://api.givebutter.com/v1/transactions/"
headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"}
response = None
# Create custom requests session
session = NoRebuildAuthSession()
# Make GET request to server, timeout in seconds
try:
r = session.get(url, headers=headers, timeout=0.75)
if r.status_code == 200:
response = r.json()
else:
logger.error(f"ERROR in request: {r.status_code}")
except requests.exceptions.Timeout:
logger.warning("Connection to GiveButter API Timed out")
except requests.ConnectionError:
logger.warning("Connection to GiveButter API could not be resolved")
except requests.exceptions.RequestException:
logger.error(
"An unknown issue occurred while trying to retrieve GiveButter Donor List"
)
# Grab context object to use later
ctx = _get_context("Campaign")
# Check for successful response, if so - filter, sort, and format data
if response and "data" in response:
response = response["data"] # Pull data from GET response object
logger.debug(f"GiveButter API Response: {response}")
# Filter by only successful transactions, then sort by amount descending
successful_txs = [tx for tx in response if tx["status"] == "succeeded"]
sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True)
# Clean data to a list of dictionaries & remove unnecessary data
transactions = [
{
"name": tx["giving_space"]["name"],
"amount": tx["giving_space"]["amount"],
"message": tx["giving_space"]["message"],
}
for tx in sorted_txs[:20]
]
# Attach transaction dictionary & length to context object
ctx["transactions"] = transactions
ctx["num_txs"] = len(successful_txs)
return render(
request,
"public/campaign.html",
ctx,
)
def permission_denied(request):
"""
View for 403 (Permission Denied) error.
"""
return render(
request,
"common/403.html",
_get_context("Permission Denied"),
)
def handler404(request, exception):
""" """
return render(request, "common/404.html", _get_context("Page Not Found"))
class ResetPassword(PasswordResetView):
template_name = "password_reset/password_reset_form.html"
class ResetPasswordDone(PasswordResetDoneView):
template_name = "password_reset/password_reset_done.html"
class ResetPasswordConfirm(PasswordResetConfirmView):
template_name = "password_reset/password_reset_confirm.html"
class ResetPasswordComplete(PasswordResetCompleteView):
template_name = "password_reset/password_reset_complete.html"
| sigmapi-gammaiota/sigmapi-web | sigmapiweb/apps/PubSite/views.py | Python | mit | 4,610 |
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class WlbItemQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.is_sku = None
self.item_code = None
self.item_type = None
self.name = None
self.page_no = None
self.page_size = None
self.parent_id = None
self.status = None
self.title = None
def getapiname(self):
return 'taobao.wlb.item.query'
| CooperLuan/devops.notes | taobao/top/api/rest/WlbItemQueryRequest.py | Python | mit | 489 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.responses import BaseResponse
class ControlledShutdownV0Response(BaseResponse):
schema = [
{'name': 'error', 'type': 'int16'},
{'name': 'partitions_remaining',
'type': 'array',
'item_type': [
{'name': 'topic', 'type': 'string'},
{'name': 'partition', 'type': 'int32'},
]},
]
| toddpalino/kafka-tools | kafka/tools/protocol/responses/controlled_shutdown_v0.py | Python | apache-2.0 | 1,166 |
# Copyright 2011, 2013-2015 VPAC
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from karaage.people.models import Person
from karaage.people.utils import validate_username_for_rename_person
from karaage.people.utils import UsernameInvalid, UsernameTaken
import sys
import django.db.transaction
import tldap.transaction
try:
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Change a username for a person and all accounts for that person'
args = '<old username> <new username>'
@django.db.transaction.atomic
@tldap.transaction.commit_on_success
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError(
'Usage: change_username <old username> <new username>')
old = args[0]
new = args[1]
try:
person = Person.objects.get(username=old)
except Person.DoesNotExist:
raise CommandError('person %s does not exist' % old)
try:
validate_username_for_rename_person(new, person)
except UsernameInvalid as e:
raise CommandError(e.args[0])
except UsernameTaken as e:
raise CommandError(e.args[0])
while 1:
confirm = input(
'Change person "%s" and accounts to "%s (yes,no): '
% (old, new))
if confirm == 'yes':
break
elif confirm == 'no':
return sys.exit(0)
else:
print("Please enter yes or no")
for account in person.account_set.filter(date_deleted__isnull=True):
account.username = new
account.save()
print("Changed username on %s account" % account.machine_category)
person.username = new
person.save()
print("Changed username on person")
print("Done")
| Karaage-Cluster/karaage-debian | karaage/management/commands/change_username.py | Python | gpl-3.0 | 2,564 |
#!/usr/bin/env python3
from pyopencga.opencga_config import ClientConfiguration
from pyopencga.opencga_client import OpencgaClient
import argparse
import getpass
from pprint import pprint
import json
def qc(families):
print('Executing qc...')
for family in families:
if len(family['members']) > 1 and 'qualityControl' not in family:
data = {'family': family['id']}
if args.relatednessMaf is not None:
data['relatednessMaf'] = args.relatednessMaf
print("Calculate QC " + family['id'])
oc.variants.run_family_qc(data, study=args.study)
def index(families):
print('Executing index...')
for family in families:
if len(family['members']) > 1:
family_list = [family['id']]
data = {'family': family_list}
print("Index " + family['id'])
oc.variant_operations.index_family_genotype(data, study=args.study)
# Define parameters
parser = argparse.ArgumentParser()
parser.add_argument('action', help="Action to execute", choices=["qc", "index"], default="")
parser.add_argument('-s', '--study', help="Study ID or fqn")
parser.add_argument('--id', help="comma separated list of family ID")
parser.add_argument('--phenotypes', help="Comma-separated list of phenotype ID, e.g. hp:12345")
parser.add_argument('--disorders', help="Comma-separated list of disorder ID, e.g. hp:12345")
parser.add_argument('--relatednessMaf', help="Populatioon MAF, e.g. cohort:ALL>0.05")
parser.add_argument('--conf', help="Load client-configuration.yml")
parser.add_argument('--url', default="https://bioinfo.hpc.cam.ac.uk/opencga-prod", help="Default https://bioinfo.hpc.cam.ac.uk/opencga-prod")
parser.add_argument('-u', '--user', help="Username to login to OpenCGA")
parser.add_argument('-p', '--password', help="Password to login to OpenCGA")
args = parser.parse_args()
# Ask for password
if args.password is None:
args.password = getpass.getpass()
# Create OpencgaClient config object from file or dictionary
if args.conf is not None:
config = ClientConfiguration(args.conf)
else:
config_dict = {
"rest": {
"host": args.url
}
}
config = ClientConfiguration(config_dict)
# Create OpenCGA client and login
oc = OpencgaClient(config)
oc.login(args.user, args.password) # If done this way, password will be prompted to the user so it is not displayed or..
# Fetch selected families
query = {}
if args.study is not None:
query['study'] = args.study
if args.id is not None:
query['id'] = args.id
if args.phenotypes is not None:
query['phenotypes'] = args.phenotypes
if args.disorders is not None:
query['disorders'] = args.disorders
family_resp = oc.families.search(**query)
# Execute action
if args.action == "qc":
qc(family_resp.get_results())
elif args.action == "index":
index(family_resp.get_results())
| opencb/opencga | opencga-app/app/misc/scripts/family_ops.py | Python | apache-2.0 | 2,902 |
"""
WSGI config for buzzit project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "buzzit.settings")
application = get_wsgi_application()
| jmennen/group5 | Code/buzzit/buzzit/wsgi.py | Python | bsd-2-clause | 389 |
import json
import urllib2, urllib
import logging
logging.basicConfig()
logger = logging.getLogger('pbs_bullet.notifier')
class Notifier(object):
def __init__(self, name, pb_token):
self.token = pb_token
self.name = name
self.iden = self.create_listener()
def create_listener(self):
"""
Register this device to receive pushbullet notifications,
and if successful return the identifier.
"""
data = urllib.urlencode({'nickname': self.name, 'type': 'stream'})
logger.debug("Adding %s to pushbullet." % self.name)
request = urllib2.Request('https://api.pushbullet.com/v2/devices', data, headers={
'Authorization': "Bearer %s" % self.token,
'Accept': '*/*'
})
try:
resp = urllib2.urlopen(request)
return json.load(resp)['iden']
except urllib2.HTTPError as e:
logger.error("Pushbullet register error.")
logger.error(e.read())
except urllib2.URLError as e:
logger.error("Pushbullet register error.")
logger.error(e)
def delete_listener(self):
"""
Unregister this device as a listener.
"""
logger.debug("Deleting %s from pushbullet." % self.iden)
request = urllib2.Request('https://api.pushbullet.com/v2/devices/%s' % self.iden, headers={
'Authorization': "Bearer %s" % self.token,
'Accept': '*/*'
})
request.get_method = lambda: 'DELETE'
try:
resp = urllib2.urlopen(request)
return json.load(resp)
except urllib2.HTTPError as e:
logger.error("Pushbullet delete listener error.")
logger.error(e.read())
except urllib2.URLError as e:
logger.error("Pushbullet delete listener error.")
logger.error(e)
def delete_push(self, push):
"""
Delete a push.
"""
logger.debug("Deleting push id %s from pushbullet." % push['iden'])
request = urllib2.Request('https://api.pushbullet.com/v2/pushes/%s' % push['iden'], headers={
'Authorization': "Bearer %s" % self.token,
'Accept': '*/*'
})
request.get_method = lambda: 'DELETE'
try:
resp = urllib2.urlopen(request)
return json.load(resp)
except urllib2.HTTPError as e:
logger.error("Pushbullet delete error.")
logger.error(e.read())
except urllib2.URLError as e:
logger.error("Pushbullet delete error.")
logger.error(e)
def check_pushes(self):
"""
Return any undismissed pushes for this device, and dismiss
them.
"""
data = urllib.urlencode({'active': '0'})
logger.debug("Checking pushes for %s." % self.iden)
request = urllib2.Request('https://api.pushbullet.com/v2/pushes?%s' % data, headers={
'Authorization': "Bearer %s" % self.token,
'Accept': '*/*'
})
pushes = []
try:
resp = urllib2.urlopen(request)
pushes = json.load(resp)['pushes']
pushes = filter(lambda push: 'target_device_iden' in push.keys() and push['target_device_iden'] == self.iden, pushes)
logger.debug("Got %d pushes." % len(pushes))
# Delete them from the server
map(lambda push: self.delete_push(push), pushes)
pushes.reverse()
except urllib2.HTTPError as e:
logger.error("Pushbullet check error.")
logger.error(e.read())
except urllib2.URLError as e:
logger.error("Pushbullet check error.")
logger.error(e)
finally:
return pushes
def send_notification(self, title, body, target=None):
"""
Send a pushbullet notification.
"""
data = {"type":"note", "title": title, "body": body, "source_device_iden":self.iden}
if target is not None:
data['device_iden'] = target
note = json.dumps(data)
logger.debug("Sending %s to pushbullet." % note)
request = urllib2.Request('https://api.pushbullet.com/v2/pushes', note, headers={
'Authorization':"Bearer %s" % self.token,
'Content-Type':'application/json',
'Accept':'*/*'
})
try:
resp = urllib2.urlopen(request)
except urllib2.HTTPError as e:
logger.error("Pushbullet notify error.")
logger.error(e.read())
except urllib2.URLError as e:
logger.error("Pushbullet notify error.")
logger.error(e) | greenape/pbs_bullet | pbsbullet/notify.py | Python | bsd-2-clause | 4,699 |
__author__ = 'asifj'
import logging
from kafka import KafkaConsumer
import json
import traceback
from bson.json_util import dumps
from kafka import SimpleProducer, KafkaClient
from utils import Utils
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.INFO
)
inputs = []
consumer = KafkaConsumer("SAPEvent", bootstrap_servers=['172.22.147.242:9092', '172.22.147.232:9092', '172.22.147.243:9092'], auto_commit_enable=False, auto_offset_reset="smallest")
message_no = 1
inputs = consumer.fetch_messages()
'''for message in consumer:
topic = message.topic
partition = message.partition
offset = message.offset
key = message.key
message = message.value
print "================================================================================================================="
if message is not None:
try:
document = json.loads(message)
collection = document.keys()[0]
if collection == "customerMaster":
print "customerMaster"
elif collection == "srAttachements":
#print dumps(document, sort_keys=True)
inputs.append(document)
except Exception, err:
print "CustomException"
print "Kafka Message: "+str(message)
print(traceback.format_exc())
print "================================================================================================================="
print "\n"
message_no += 1
'''
# To send messages synchronously
kafka = KafkaClient('172.22.147.232:9092,172.22.147.242:9092,172.22.147.243:9092')
producer = SimpleProducer(kafka)
for i in inputs:
try:
#producer.send_messages(b'SAPEvent', json.dumps(input))
document = json.loads(str(i.value))
type = document.keys()[0]
if type == "srDetails":
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
row = []
utils = Utils()
row = utils.validate_sr_details( document['srDetails'], row)
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "\n\n"
except Exception:
print "Kafka: "+str(document)
print Exception.message
print(traceback.format_exc())
| asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase | KafkaCP.py | Python | apache-2.0 | 2,510 |
import wicked as w
from wicked import index
def test_sqopprod():
"""Test the SQOpProd class"""
w.reset_space()
w.add_space("o", "fermion", "occupied", ["i", "j"])
w.add_space("a", "fermion", "general", ["u", "v"])
w.add_space("v", "fermion", "occupied", ["a", "b", "c"])
opprod = w.sqopprod([], [])
assert str(opprod) == ""
opprod = w.sqopprod(["v_0"], [])
assert str(opprod) == "a+(v0)"
opprod = w.sqopprod([], ["o_0"])
assert str(opprod) == "a-(o0)"
opprod = w.sqopprod(["v_0"], ["o_0"])
assert str(opprod) == "a+(v0) a-(o0)"
assert opprod.latex() == "\hat{a}^\dagger_{a} \hat{a}_{i}"
opprod = w.sqopprod(["v_0", "v_1"], ["o_0", "o_1"])
assert str(opprod) == "a+(v0) a+(v1) a-(o1) a-(o0)"
opprod1 = w.sqopprod(["v_0", "v_1"], ["o_0", "o_1"])
opprod2 = w.sqopprod(["v_0", "v_1"], ["o_0", "o_1"])
assert opprod1 == opprod2
opprod1 = w.sqopprod(["v_0"], [])
opprod2 = w.sqopprod(["v_0"], [])
assert not (opprod1 < opprod2)
opprod1 = w.sqopprod(["v_0"], [])
opprod2 = w.sqopprod(["v_1"], [])
assert opprod1 < opprod2
# let's test a bunch of combinations
opprod1 = w.sqopprod(["v_0"], ["o_0"])
opprod2 = w.sqopprod(["v_0"], ["o_0"])
assert opprod1 == opprod2
assert not (opprod1 < opprod2)
opprod1 = w.sqopprod(["v_0"], ["o_0"])
opprod2 = w.sqopprod(["v_1"], ["o_0"])
assert opprod1 < opprod2
opprod1 = w.sqopprod(["v_0"], ["o_1"])
opprod2 = w.sqopprod(["v_1"], ["o_0"])
assert opprod1 < opprod2
opprod1 = w.sqopprod(["v_1"], ["o_0"])
opprod2 = w.sqopprod(["v_1"], ["o_1"])
assert opprod1 < opprod2
opprod1 = w.sqopprod(["v_1"], ["o_1"])
opprod2 = w.sqopprod(["v_1"], ["o_0"])
assert not (opprod1 < opprod2)
opprod1 = w.sqopprod(["v_1"], ["o_2"])
opprod2 = w.sqopprod(["v_1", "v_2"], ["o_0"])
assert opprod1 < opprod2
opprod1 = w.sqopprod(["v_2"], ["o_2"])
opprod2 = w.sqopprod(["v_1", "v_2"], ["o_0"])
assert opprod1 < opprod2
opprod1 = w.sqopprod(["o_4"], ["o_2"])
opprod2 = w.sqopprod(["v_1", "v_2"], ["o_0"])
assert opprod1 < opprod2
opprod1 = w.sqopprod(["o_4"], ["o_2"])
opprod2 = w.sqopprod(["a_1", "o_2"], ["o_0"])
assert opprod1 < opprod2
opprod1 = w.sqopprod(["a_4"], ["o_2"])
opprod2 = w.sqopprod(["a_4"], ["a_2"])
assert opprod1 < opprod2
if __name__ == "__main__":
test_sqopprod()
| fevangelista/wicked | tests/sqopprod/test_sqopprod.py | Python | mit | 2,446 |
# -*- coding: utf-8 -*-
from code_coverage_bot import hgmo
def test_ok():
assert(hgmo)
| lundjordan/services | src/codecoverage/bot/tests/test_hgmo.py | Python | mpl-2.0 | 94 |
# -*- coding: utf-8 -*-
from chatterbot import ChatBot
from settings import GITTER
# Uncomment the following lines to enable verbose logging
# import logging
# logging.basicConfig(level=logging.INFO)
chatbot = ChatBot(
'GitterBot',
gitter_room=GITTER['ROOM'],
gitter_api_token=GITTER['API_TOKEN'],
gitter_only_respond_to_mentions=False,
input_adapter='chatterbot.input.Gitter',
output_adapter='chatterbot.output.Gitter',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
)
chatbot.train('chatterbot.corpus.english')
# The following loop will execute each time the user enters input
while True:
try:
response = chatbot.get_response(None)
# Press ctrl-c or ctrl-d on the keyboard to exit
except (KeyboardInterrupt, EOFError, SystemExit):
break
| Reinaesaya/OUIRL-ChatBot | examples/gitter_example.py | Python | bsd-3-clause | 809 |
from __future__ import absolute_import
class FileStorage(object):
def __init__(self, path=''):
self.path = path
def save(self, filename, fp, content_type=None, path=None):
raise NotImplementedError
def url_for(self, filename, expire=300):
raise NotImplementedError
def get_file(self, filename, offset=None, length=None):
raise NotImplementedError
def get_size(self, filename):
raise NotImplementedError
| dropbox/changes | changes/storage/base.py | Python | apache-2.0 | 469 |
age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weigh?")
print "So, you are %r old, %r tall and %r heavy." % (
age, height, weight)
| mshcruz/LearnPythonTheHardWay | ex12.py | Python | gpl-2.0 | 203 |
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef SEQUENCE_CHAINPARAMSSEEDS_H\n')
g.write('#define SEQUENCE_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the Sequence network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 16662)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 116662)
g.write('#endif // SEQUENCE_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| duality-solutions/Sequence | share/seeds/generate-seeds.py | Python | mit | 4,303 |
'''
Created on Mar 3, 2016
@author: Daniel Rivas
'''
from .base import *
IS_PRODUCTION = True
DEBUG= True
ALLOWED_HOSTS =['cogcommtl.ca', 'www.cogcommtl.ca', 'localhost:8000', 'percept.uqam.ca']
DEBUG= False
prod_only_apps = [
]
INSTALLED_APPS.extend(prod_only_apps)
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': get_secret("MYSQL_CREDS")
}
# change to live when we go live!
PAYPAL_MODE = 'live'
MEDIA_ROOT = os.path.normpath(os.path.join('/', 'var/', 'www/', 'labo/'))
| rivasd/djPsych | djPsych/settings/production.py | Python | gpl-3.0 | 541 |
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
"""
Defines all exceptions
"""
class AgentError(Exception):
"""
Base class of agent error.
"""
def __init__(self, msg, inner=None):
msg = u"[{0}] {1}".format(type(self).__name__, msg)
if inner is not None:
msg = u"{0}\nInner error: {1}".format(msg, inner)
super(AgentError, self).__init__(msg)
class AgentConfigError(AgentError):
"""
When configure file is not found or malformed.
"""
def __init__(self, msg=None, inner=None):
super(AgentConfigError, self).__init__(msg, inner)
class AgentNetworkError(AgentError):
"""
When network is not available\.
"""
def __init__(self, msg=None, inner=None):
super(AgentNetworkError, self).__init__(msg, inner)
class ExtensionError(AgentError):
"""
When failed to execute an extension
"""
def __init__(self, msg=None, inner=None, code=-1):
super(ExtensionError, self).__init__(msg, inner)
self.code = code
class ProvisionError(AgentError):
"""
When provision failed
"""
def __init__(self, msg=None, inner=None):
super(ProvisionError, self).__init__(msg, inner)
class ResourceDiskError(AgentError):
"""
Mount resource disk failed
"""
def __init__(self, msg=None, inner=None):
super(ResourceDiskError, self).__init__(msg, inner)
class DhcpError(AgentError):
"""
Failed to handle dhcp response
"""
def __init__(self, msg=None, inner=None):
super(DhcpError, self).__init__(msg, inner)
class OSUtilError(AgentError):
"""
Failed to perform operation to OS configuration
"""
def __init__(self, msg=None, inner=None):
super(OSUtilError, self).__init__(msg, inner)
class ProtocolError(AgentError):
"""
Azure protocol error
"""
def __init__(self, msg=None, inner=None):
super(ProtocolError, self).__init__(msg, inner)
class ProtocolNotFoundError(ProtocolError):
"""
Azure protocol endpoint not found
"""
def __init__(self, msg=None, inner=None):
super(ProtocolNotFoundError, self).__init__(msg, inner)
class HttpError(AgentError):
"""
Http request failure
"""
def __init__(self, msg=None, inner=None):
super(HttpError, self).__init__(msg, inner)
class EventError(AgentError):
"""
Event reporting error
"""
def __init__(self, msg=None, inner=None):
super(EventError, self).__init__(msg, inner)
class CryptError(AgentError):
"""
Encrypt/Decrypt error
"""
def __init__(self, msg=None, inner=None):
super(CryptError, self).__init__(msg, inner)
class UpdateError(AgentError):
"""
Update Guest Agent error
"""
def __init__(self, msg=None, inner=None):
super(UpdateError, self).__init__(msg, inner)
class ResourceGoneError(HttpError):
"""
The requested resource no longer exists (i.e., status code 410)
"""
def __init__(self, msg=None, inner=None):
if msg is None:
msg = "Resource is gone"
super(ResourceGoneError, self).__init__(msg, inner)
class RemoteAccessError(AgentError):
"""
Remote Access Error
"""
def __init__(self, msg=None, inner=None):
super(RemoteAccessError, self).__init__(msg, inner)
| hglkrijger/WALinuxAgent | azurelinuxagent/common/exception.py | Python | apache-2.0 | 3,970 |
def ShellSort(A):
sublistcount = len(A) // 2
while sublistcount > 0:
for startposition in range(sublistcount):
gapInsertionSort(A, startposition, sublistcount)
print("After increments of size", sublistcount, "The list is", A)
sublistcount = sublistcount // 2
def gapInsertionSort(A, start, gap):
for i in range(start + gap, len(A), gap):
currentvalue = A[i]
position = i
while position >= gap and A[position - gap] > currentvalue:
A[position] = A[position - gap]
position = position - gap
A[position] = currentvalue
A = [534, 246, 933, 127, 277, 321, 454, 565, 220]
ShellSort(A)
print(A)
| applecool/cs430assignments | Sorting/ShellSort.py | Python | mit | 698 |
"""
analysis inspired by
http://www.nature.com/mp/journal/vaop/ncurrent/full/mp2016143a.html
"""
import sys
import pandas,numpy
import statsmodels.api as sm
sys.path.append('../timeseries')
from load_myconnectome_data import *
xvar_names=['panas.positive','panas.negative']
rnaseq_data,gene_names,rnaseq_dates,rnaseq_subcodes=load_rnaseq_data(use_wgcna=False)
behavdata,behav_vars,behav_dates,behav_subcodes=load_behav_data(xvars=xvar_names)
rnaseq_joint,behavdata_joint,subcodes_joint=get_matching_datasets(rnaseq_data,behavdata,rnaseq_subcodes,behav_subcodes)
X=sm.add_constant(behavdata_joint)
geneidx=[i for i in range(len(gene_names)) if gene_names[i].find('MIR181')==0]
assert len(geneidx)==1
genedata=rnaseq_joint[:,geneidx[0]]
#print(numpy.corrcoef(genedata,behavdata_joint[:,0])[0,1])
#print(numpy.corrcoef(genedata,behavdata_joint[:,1])[0,1])
rlm_model = sm.RLM(genedata, X, M=sm.robust.norms.HuberT())
rlm_results = rlm_model.fit()
print (rlm_results.params)
| poldrack/myconnectome | myconnectome/rnaseq/MIR181_vs_affect.py | Python | mit | 979 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.