code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from __future__ import absolute_import, unicode_literals
from collections import namedtuple
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models.fields import FieldDoesNotExist
from django.test.client import RequestFactory
from drf_toolbox.compat import django_pgfields_installed, models
from drf_toolbox.serializers import (fields, BaseModelSerializer,
ModelSerializer, RelatedField)
from drf_toolbox.serializers.fields import api
from drf_toolbox import viewsets
from rest_framework import serializers
from rest_framework.relations import HyperlinkedIdentityField
from tests import models as test_models, serializers as test_serializers
from tests.compat import mock
import unittest
import six
import uuid
NO_DJANGOPG = 'django-pgfields is not installed.'
class SerializerSuite(unittest.TestCase):
"""Suite of test cases around custom serializers, ensuring that
they provide expected output.
"""
def test_api_endpoints_field_autocreated(self):
"""Establish that the `api_endpoints` key is auto-created on
a serializer that doesn't explicitly define the field.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class ViewSet(viewsets.ModelViewSet):
model = test_models.NormalModel
serializer_class = test_serializers.NormalSerializer
# Create the serializer
s = test_serializers.NormalSerializer()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': ViewSet(),
}
# Ensure that the expected api.APIEndpointsField is present.
df = s.get_default_fields()
self.assertIn('api_endpoints', df)
self.assertIsInstance(df['api_endpoints'], api.APIEndpointsField)
def test_api_endpoints_field_default_serializer(self):
"""Establish that the the `api_endpoints` key is created for a
default serializer.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class ViewSet(viewsets.ModelViewSet):
model = test_models.NormalModel
# Create the serializer.
s = ViewSet().get_serializer_class()()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': ViewSet(),
}
# Ensure that the expected api.APIEndpointField is present.
df = s.get_default_fields()
self.assertIn('api_endpoints', df)
self.assertIsInstance(df['api_endpoints'], api.APIEndpointsField)
def test_api_endpoint_field_default_serializer(self):
"""Establish that the the `api_endpoint` key is created in a case
where we cannot match to the viewset, and we're still using a
specific serializer.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class Viewset(viewsets.ModelViewSet):
model = test_models.NormalModel
# Create the serializer.
s = test_serializers.NormalSerializer()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': Viewset(),
}
# Ensure that the expected api.APIEndpointField is present.
df = s.get_default_fields()
self.assertIn('api_endpoint', df)
self.assertIsInstance(df['api_endpoint'], api.APIEndpointField)
def test_api_endpoint_key_existing(self):
"""Test that if a set of fields is provided with an `api_endpoints`
field, that we don't barrel over it.
"""
# Ensure I get what I expect from `get_default_fields`.
s = test_serializers.ExplicitAPIEndpointsSerializer()
fields = s.get_default_fields()
self.assertEqual(len(fields), 3)
self.assertIsInstance(fields['api_endpoints'],
serializers.IntegerField)
def test_api_endpoints_autocovert_plural_to_singular(self):
"""Establish that explicitly specifying `api_endpoint` or
`api_endpoints` will graciously switch between them when necessary.
"""
# Create a serializer to use for this test.
class Serializer(test_serializers.NormalSerializer):
class Meta:
model = test_serializers.NormalSerializer.Meta.model
fields = ('id', 'api_endpoints')
# Establish that a serializer instance with no context will
# have an api_endpoint field.
s = Serializer()
self.assertIn('api_endpoint', s.opts.fields)
self.assertNotIn('api_endpoints', s.opts.fields)
def test_api_endpoints_autocovert_singular_to_plural(self):
"""Establish that explicitly specifying `api_endpoint` or
`api_endpoints` will graciously switch between them when necessary.
"""
# Create a serializer to use for this test.
class Serializer(test_serializers.NormalSerializer):
class Meta:
model = test_serializers.NormalSerializer.Meta.model
fields = ('id', 'api_endpoint')
# Establish that a serializer instance with no context will
# have an api_endpoint field.
with mock.patch.object(ModelSerializer, '_viewset_uses_me') as vum:
vum.return_value = True
s = Serializer(context={'view': object(),})
self.assertIn('api_endpoints', s.opts.fields)
self.assertNotIn('api_endpoint', s.opts.fields)
def test_direct_relationship(self):
"""Test that a direct relationship retrieval works
as expected.
"""
# Get the related field from a direct relationship.
s = test_serializers.ChildSerializer()
rel_field = s.get_related_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
self.assertIsInstance(rel_field, RelatedField)
# Verify the label.
self.assertEqual(
rel_field.label_from_instance(test_models.NormalModel()),
'NormalModel object',
)
# Verify the value.
self.assertFalse(rel_field.prepare_value(test_models.NormalModel()))
def test_direct_relationship_with_explicit_fields(self):
"""Test that a direct relationship retreival works as expected,
and that our explicit field list chains down to the related field.
"""
# Create our serializer.
s = test_serializers.ChildSerializerII()
rel_field = s.get_related_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
self.assertIsInstance(rel_field, RelatedField)
rel_field.context = {'request': RequestFactory().get('/foo/bar/')}
# Get the serializer class.
s = rel_field._get_serializer(test_models.NormalModel(bacon=42))
self.assertEqual([i for i in s.get_fields().keys()], ['id', 'bacon'])
def test_reverse_relationship(self):
"""Test that a reverse relationship retrieval works as
expected.
"""
# Instantiate my normal serializer and run a reverse
# relationship against the fake child model.
s = test_serializers.NormalSerializer()
rel_field = s.get_related_field(None, test_models.ChildModel, False)
self.assertIsInstance(rel_field, RelatedField)
def test_related_field_with_no_pk(self):
"""Test that a related field receiving a model object
with no primary key returns None.
"""
rel_field = RelatedField(())
answer = rel_field.to_native(test_models.ChildModel())
self.assertEqual(answer, None)
def test_related_field_with_pk(self):
"""Test that a related field receiving a model object
with a primary key returns None.
"""
# Create a fake request.
factory = RequestFactory()
request = factory.get('/foo/')
# Get the appropriate related field.
fake_pk = uuid.uuid4()
nm = test_models.NormalModel(id=42)
cm = test_models.ChildModel(normal=nm)
cs = test_serializers.ChildSerializer(context={'request': request})
rel_field = cs.get_related_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
rel_field.context = { 'request': request }
# Get the final answer.
answer = rel_field.to_native(nm)
self.assertEqual({
'api_endpoint': 'http://testserver/normal/%d/' % nm.id,
'id': 42,
'bacon': None,
'bar': None,
'baz': None,
'foo': None,
}, answer)
def test_reverse_related_field_serializer(self):
"""Establish that a related field can be specified on a serializer
without incident.
"""
# Create a bogus request object.
factory = RequestFactory()
request = factory.get('/foo/')
# Create a serializer that would otherwise show itself
# at a related level.
rs = test_serializers.ReverseSerializer()
# Create an instance.
nm = test_models.NormalModel(bar=1, baz=2, bacon=3)
rm = test_models.RelatedModel(id=42, baz=1, normal=nm)
# Get the fields from the serializer and determine that we get
# what we expect.
fields_dict = rs.get_default_fields()
self.assertEqual(
[i for i in fields_dict.keys()],
[
'id', 'api_endpoint', 'bacon', 'bar',
'baz', 'foo', 'related_model',
],
)
# Pull out the related field.
rel_field = fields_dict['related_model']
rel_field.context = {'request': request}
# Convert our related field to native, and establish that it does not
# have a normal model.
native = rel_field.to_native(rm)
self.assertEqual({'id': 42, 'baz': 1}, native)
def test_create_rel_serializer_class(self):
"""Establish that the `RelatedField._create_serializer_class`
method works as expected.
"""
RelatedModel = test_models.RelatedModel
# Create a bogus request object.
factory = RequestFactory()
request = factory.get('/foo/')
# Create a serializer that would otherwise show itself
# at a related level.
rs = test_serializers.ReverseSerializer()
# Create an instance.
nm = test_models.NormalModel(bar=1, baz=2, bacon=3)
rm = RelatedModel(id=42, baz=1, normal=nm)
# Get the fields from the serializer and determine that we get
# what we expect.
fields_dict = rs.fields
self.assertEqual(
set([i for i in fields_dict.keys()]),
{'bacon', 'bar', 'baz', 'related_model'},
)
# Pull out the related field.
rel_field = fields_dict['related_model']
rel_field.context = {'request': request}
# Establish that there is no serializer class on the related
# field yet.
self.assertFalse(hasattr(rel_field, '_serializer_class'))
# Create a serializer class.
ret_val = rel_field._create_serializer_class(RelatedModel)
self.assertTrue(ret_val)
self.assertTrue(hasattr(rel_field, '_serializer_class'))
sc = rel_field._serializer_class
# Establish that a followup call is a no-op.
ret_val = rel_field._create_serializer_class(RelatedModel)
self.assertFalse(ret_val)
self.assertIs(rel_field._serializer_class, sc)
def test_created_field(self):
"""Establish that explicitly asking for a `created` field
does cause it to be included.
"""
fc = test_serializers.CreatedSerializer()
self.assertIn('created', fc.get_default_fields())
def test_initial_data(self):
"""Establish that initial data is carried over to the `save_object`
serializer method.
"""
NormalModel = test_models.NormalModel
# Create our child serializer.
nm = NormalModel(id=42)
ns = test_serializers.ChildSerializer(initial={
'normal': nm.id,
})
# Establish that if we call `save_object` on a child that does not
# yet have a normal, that the latter's presence in `initial` causes
# it to be set on our object.
cm = test_models.ChildModel()
with self.assertRaises(ObjectDoesNotExist):
cm.normal
with mock.patch.object(BaseModelSerializer, 'save_object') as save:
with mock.patch.object(NormalModel.objects, 'get') as get:
get.return_value = nm
# Actually perform the `save_object` call being tested.
ns.save_object(cm)
# Assert that the superclass `save_object` was called as
# expected.
save.assert_called_once_with(cm)
# Assert that the `get` method was called as expected.
get.assert_called_once_with(pk=42)
self.assertEqual(cm.normal, nm)
class RelatedFieldTests(unittest.TestCase):
def setUp(self):
# Save my fake models to my test class.
NormalModel = test_models.NormalModel
self.nm = test_models.NormalModel
self.cm = test_models.ChildModel
# Set up related fields and things.
self.rel_field = RelatedField(())
self.rel_field.context = {}
if hasattr(test_models.NormalModel.objects, 'get_queryset'):
self.rel_field.queryset = NormalModel.objects.get_queryset()
else:
self.rel_field.queryset = NormalModel.objects.get_query_set()
def test_related_field_from_id_dict(self):
"""Test that a related field's `from_native` method, when
sent a dictionary with an `id` key, returns that ID.
"""
# Test the case where we get a valid value back.
with mock.patch.object(self.rel_field.queryset, 'get') as qs:
qs.return_value = test_models.NormalModel(id=42)
answer = self.rel_field.from_native({'id': 42 })
qs.assert_called_with(id=42)
self.assertEqual(answer, qs.return_value)
def test_related_field_from_with_no_unique(self):
"""Test that a related field's `from_native` method, when
no unique values are sent, raises ValidationError.
"""
# Test the case where we get a valid value back.
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'foo': 3 })
def test_related_field_from_pk_noexist(self):
"""Test that a related field's `from_native` method processes
a plain ID correctly, and processes DoesNotExist correctly.
"""
# Test processing when DoesNotExist is raised.
with mock.patch.object(self.rel_field.queryset, 'get') as m:
m.side_effect = test_models.NormalModel.DoesNotExist
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native(42)
def test_related_field_from_pk_valueerror(self):
"""Test that a related field's `from_native` method processes
a plain ID correctly, and processes ValueError correctly.
"""
# Test processing when DoesNotExist is raised.
with mock.patch.object(self.rel_field.queryset, 'get') as m:
m.side_effect = ValueError
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native(42)
def test_related_field_from_unique_key(self):
"""Establish that we can retrieve a relation by a unique key within
that model.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as m:
answer = self.rel_field.from_native({'bacon': 42})
m.assert_called_once_with(bacon=42)
def test_related_field_from_composite_unique_keys(self):
"""Establish that we can retrieve a relation by a composite-unique
set of keys within that model.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as m:
answer = self.rel_field.from_native({'bar': 1, 'baz': 2})
m.assert_called_once_with(bar=1, baz=2)
def test_related_field_from_no_unique_keys(self):
"""Establish that if we attempt a lookup with no unique keys,
that the system doesn't even try and raises an error.
"""
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'foo': []})
def test_related_field_from_bogus_field(self):
"""Establish that if I attempt to retrieve a related instance based on
a field that does not exist on the related model, that ValidationError
is raised.
"""
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'bogus': None})
def test_related_field_ignores_api_endpoint(self):
"""Establish that a `from_native` call will ignore serializer fields
that do not correspond to model fields, such as `api_endpoint`.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as get:
answer = self.rel_field.from_native({'api_endpoint': 1, 'baz': 2})
get.assert_called_once_with(baz=2)
def test_related_field_multiple_objects(self):
"""Establish that if I send criteria that don't narrow down to
a single model instance, that ValidationError is raised.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as m:
m.side_effect = test_models.NormalModel.MultipleObjectsReturned
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'bar': 3})
@unittest.skipUnless(django_pgfields_installed, NO_DJANGOPG)
class PostgresFieldTests(unittest.TestCase):
"""Test suite to establish that the custom serializer fields that
correlate to django_pg model fields work in the way we expect.
"""
def test_uuid_field_no_auto_add(self):
"""Test that a UUID field without `auto_add` returns the
correct serializer field.
"""
# Instantiate my fake model serializer and establish that
# we get back a UUIDField that is not read-only.
s = test_serializers.PGFieldsSerializer()
fields_dict = s.get_default_fields()
self.assertIsInstance(fields_dict['uuid'], fields.UUIDField)
self.assertEqual(fields_dict['uuid'].required, True)
self.assertEqual(fields_dict['uuid'].read_only, False)
def test_composite_field_without_drf_method(self):
"""Establish that we get a plain CompositeField if the model
field does not instruct us otherwise.
"""
s = test_serializers.PGFieldsSerializer()
fields_dict = s.get_default_fields()
self.assertEqual(fields_dict['coords'].__class__,
fields.CompositeField)
def test_json_field_from_native(self):
"""Determine that a JSON serializer sends the value
through on the `from_native` method.
"""
jf = fields.JSONField()
answer = jf.from_native([1, 3, 5])
self.assertEqual(answer, [1, 3, 5])
def test_json_field_to_native(self):
"""Determine that a JSON serializer sends the value
through on the `to_native` method.
"""
jf = fields.JSONField()
answer = jf.to_native([1, 3, 5])
self.assertEqual(answer, [1, 3, 5])
def test_uuid_field_from_native(self):
"""Determine that the UUID serializer converts the value
back to a Python UUID object.
"""
uf = fields.UUIDField()
answer = uf.from_native('01234567-0123-0123-0123-0123456789ab')
self.assertIsInstance(answer, uuid.UUID)
self.assertEqual(
answer,
uuid.UUID('01234567-0123-0123-0123-0123456789ab'),
)
def test_uuid_field_to_native(self):
"""Determine that the UUID serializer converts the value
to a string representation of the uuid.
"""
uf = fields.UUIDField()
answer = uf.to_native(
uuid.UUID('01234567-0123-0123-0123-0123456789ab'),
)
self.assertIsInstance(answer, six.text_type)
self.assertEqual(answer, '01234567-0123-0123-0123-0123456789ab')
def test_array_field_from_native(self):
"""Establish that the Array serializer converts the value
back into a Python list as expected.
"""
af = fields.ArrayField(of=serializers.IntegerField())
answer = af.from_native([1, 1, '2', 3, '5', 8])
self.assertIsInstance(answer, list)
self.assertEqual(answer, [1, 1, 2, 3, 5, 8])
def test_array_field_to_native(self):
"""Establish that the Array serializer converts the value
to a Python list as expected.
"""
af = fields.ArrayField(of=serializers.IntegerField())
answer = af.to_native([1, 1, 2, 3, 5, 8])
self.assertIsInstance(answer, list)
self.assertEqual(answer, [1, 1, 2, 3, 5, 8])
def test_composite_field_from_native(self):
"""Establish that the composite serializer converts the value
back into the appropriate Python instance type.
"""
# Create an instance class and composite field.
Point = namedtuple('Point', ['x', 'y'])
cf = fields.CompositeField(
fields={
'x': serializers.IntegerField(),
'y': serializers.IntegerField(),
},
instance_class=Point,
)
# Test the conversion from a native dictionary.
answer = cf.from_native({ 'x': 3, 'y': 1 })
self.assertIsInstance(answer, Point)
self.assertEqual(answer.x, 3)
self.assertEqual(answer.y, 1)
def test_composite_field_to_native(self):
"""Establish that the composite serializer converts the value
back into the appropriate Python instance type.
"""
# Create an instance class and composite field.
Point = namedtuple('Point', ['x', 'y'])
cf = fields.CompositeField(
fields={
'x': serializers.IntegerField(),
'y': serializers.IntegerField(),
},
instance_class=Point,
)
# Test the conversion from a native dictionary.
answer = cf.to_native(Point(x=3, y=1))
self.assertIsInstance(answer, dict)
self.assertEqual(answer, { 'x': 3, 'y': 1 })
| pombredanne/drf-toolbox | tests/test_serializers.py | Python | bsd-3-clause | 23,089 |
#!/usr/bin/env python
#
# Test fff_array wrapping
#
from numpy.testing import assert_almost_equal, assert_equal
import numpy as np
import nipy.neurospin.bindings as fb
MAX_TEST_SIZE = 30
def random_shape(size):
"""
Output random dimensions in the range (2, MAX_TEST_SIZE)
"""
aux = np.random.randint(MAX_TEST_SIZE-1, size=size) + 2
if size==1:
return aux
else:
return tuple(aux)
def _test_array_get(x):
pos = np.asarray(x.shape)/2
a = fb.array_get(x, pos[0], pos[1], pos[2], pos[3])
assert_equal(a, x[pos[0], pos[1], pos[2], pos[3]])
def test_array_get():
d0, d1, d2, d3 = random_shape(4)
x = np.random.rand(d0, d1, d2, d3)-.5
_test_array_get(x)
def _test_array_get_block(x):
b0 = fb.array_get_block(x, 1, 8, 2, 1, 8, 2, 1, 8, 2, 1, 8, 2)
b = x[1:8:2, 1:8:2, 1:8:2, 1:8:2]
assert_equal(b0, b)
def test_array_get_block():
x = np.random.rand(10, 10, 10, 10)-.5
_test_array_get_block(x)
def _test_array_add(x, y):
z = fb.array_add(x, y)
assert_equal(z, x+y)
def test_array_add():
d0, d1, d2, d3 = random_shape(4)
x = np.random.rand(d0, d1, d2, d3)-.5
y = (100*np.random.rand(d0, d1, d2, d3)).astype('uint8')
_test_array_add(x, y)
def _test_array_mul(x, y):
z = fb.array_mul(x, y)
assert_equal(z, x*y)
def test_array_mul():
d0, d1, d2, d3 = random_shape(4)
x = np.random.rand(d0, d1, d2, d3)-.5
y = (100*np.random.rand(d0, d1, d2, d3)).astype('uint8')
_test_array_mul(x, y)
def _test_array_sub(x, y):
z = fb.array_sub(x, y)
assert_equal(z, x-y)
def test_array_sub():
d0, d1, d2, d3 = random_shape(4)
x = np.random.rand(d0, d1, d2, d3)-.5
y = (100*np.random.rand(d0, d1, d2, d3)).astype('uint8')
_test_array_sub(x, y)
def _test_array_div(x, y):
z = fb.array_div(x, y)
assert_almost_equal(z, x/y)
def test_array_div():
d0, d1, d2, d3 = random_shape(4)
x = np.random.rand(d0, d1, d2, d3)-.5
y = np.random.rand(d0, d1, d2, d3)-.5
_test_array_div(x, y)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| yarikoptic/NiPy-OLD | nipy/neurospin/bindings/tests/test_array.py | Python | bsd-3-clause | 2,134 |
import os
from setuptools import setup, find_packages
def read(filename):
return open(os.path.join(
os.path.dirname(__file__), filename)).read()
entry_points = """
[zc.buildout]
default = svetlyak40wt.recipe.symlinks:Symlinks
[zc.buildout.uninstall]
default = svetlyak40wt.recipe.symlinks:uninstall_symlinks
"""
setup(
version = '0.2.1',
name = 'svetlyak40wt.recipe.symlinks',
description = 'Simple recipe to collect symbolic links in one directory.',
long_description = read('README.md'),
classifiers = [
'License :: OSI Approved :: BSD License',
'Framework :: Buildout',
'Programming Language :: Python',
],
keywords = 'buildout recipe',
author = 'Alexander Artemenko',
author_email = 'svetlyak.40wt@gmail.com',
url = 'http://github.com/svetlyak40wt/svetlyak40wt.recipe.symlinks',
license = 'New BSD License',
packages = find_packages(),
namespace_packages = ['svetlyak40wt', 'svetlyak40wt.recipe'],
include_package_data = True,
install_requires = [
'zc.buildout',
'zc.recipe.egg',
'setuptools',
],
zip_safe = False,
entry_points = entry_points,
)
| svetlyak40wt/svetlyak40wt.recipe.symlinks | setup.py | Python | bsd-3-clause | 1,194 |
def function1():
"""
Return 1
"""
return 1
def function2():
"""
Return 2
"""
return 2
| has2k1/travis_doc | travis_doc/travis_doc.py | Python | bsd-3-clause | 120 |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
VERSION='1.0.1'
| arkmaxim/grpc | src/python/grpcio_tests/grpc_version.py | Python | bsd-3-clause | 1,643 |
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
transform(CKernelImplementations(), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def op_kernel(self, op):
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = monosig.argtypes
if function.matches('ckernel', argtypes):
overload = function.best_match('ckernel', argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
return op
| zeeshanali/blaze | blaze/compute/air/passes/ckernel_impls.py | Python | bsd-3-clause | 1,537 |
#!/usr/bin/python -OOOO
# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :
# Copyright (c) 2010, Kou Man Tong. All rights reserved.
# For licensing, see LICENSE file included in the package.
"""
Base codec functions for bson.
"""
import struct
import cStringIO
import calendar, pytz
from datetime import datetime
import warnings
try:
from abc import ABCMeta, abstractmethod
except ImportError:
# If abc is not present (older versions of python), just define the ABCMeta
# class as a dummy class, we don't really need it anyway.
class ABCMeta(type):
pass
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
# {{{ Error Classes
class MissingClassDefinition(ValueError):
def __init__(self, class_name):
super(MissingClassDefinition, self).__init__(
"No class definition for class %s" % (class_name,))
# }}}
# {{{ Warning Classes
class MissingTimezoneWarning(RuntimeWarning):
def __init__(self, *args):
args = list(args)
if len(args) < 1:
args.append("Input datetime object has no tzinfo, assuming UTC.")
super(MissingTimezoneWarning, self).__init__(*args)
# }}}
# {{{ Traversal Step
class TraversalStep(object):
def __init__(self, parent, key):
self.parent = parent
self.key = key
# }}}
# {{{ Custom Object Codec
class BSONCoding(object):
__metaclass__ = ABCMeta
@abstractmethod
def bson_encode(self):
pass
@abstractmethod
def bson_init(self, raw_values):
pass
classes = {}
def import_class(cls):
if not issubclass(cls, BSONCoding):
return
global classes
classes[cls.__name__] = cls
def import_classes(*args):
for cls in args:
import_class(cls)
def import_classes_from_modules(*args):
for module in args:
for item in module.__dict__:
if hasattr(item, "__new__") and hasattr(item, "__name__"):
import_class(item)
def encode_object(obj, traversal_stack, generator_func):
values = obj.bson_encode()
class_name = obj.__class__.__name__
values["$$__CLASS_NAME__$$"] = class_name
return encode_document(values, traversal_stack, obj, generator_func)
def encode_object_element(name, value, traversal_stack, generator_func):
return "\x03" + encode_cstring(name) + \
encode_object(value, traversal_stack,
generator_func = generator_func)
class _EmptyClass(object):
pass
def decode_object(raw_values):
global classes
class_name = raw_values["$$__CLASS_NAME__$$"]
cls = None
try:
cls = classes[class_name]
except KeyError, e:
raise MissingClassDefinition(class_name)
retval = _EmptyClass()
retval.__class__ = cls
alt_retval = retval.bson_init(raw_values)
return alt_retval or retval
# }}}
# {{{ Codec Logic
def encode_string(value):
value = value.encode("utf8")
length = len(value)
return struct.pack("<i%dsb" % (length,), length + 1, value, 0)
def decode_string(data, base):
length = struct.unpack("<i", data[base:base + 4])[0]
value = data[base + 4: base + 4 + length - 1]
value = value.decode("utf8")
return (base + 4 + length, value)
def encode_cstring(value):
if isinstance(value, unicode):
value = value.encode("utf8")
return value + "\x00"
def decode_cstring(data, base):
length = 0
max_length = len(data) - base
while length < max_length:
character = data[base + length]
length += 1
if character == "\x00":
break
return (base + length, data[base:base + length - 1].decode("utf8"))
def encode_binary(value):
length = len(value)
return struct.pack("<ib", length, 0) + value
def decode_binary(data, base):
length, binary_type = struct.unpack("<ib", data[base:base + 5])
return (base + 5 + length, data[base + 5:base + 5 + length])
def encode_double(value):
return struct.pack("<d", value)
def decode_double(data, base):
return (base + 8, struct.unpack("<d", data[base: base + 8])[0])
ELEMENT_TYPES = {
0x01 : "double",
0x02 : "string",
0x03 : "document",
0x04 : "array",
0x05 : "binary",
0x08 : "boolean",
0x09 : "UTCdatetime",
0x0A : "none",
0x10 : "int32",
0x12 : "int64"
}
def encode_double_element(name, value):
return "\x01" + encode_cstring(name) + encode_double(value)
def decode_double_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_double(data, base)
return (base, name, value)
def encode_string_element(name, value):
return "\x02" + encode_cstring(name) + encode_string(value)
def decode_string_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_string(data, base)
return (base, name, value)
def encode_value(name, value, buf, traversal_stack, generator_func):
if isinstance(value, BSONCoding):
buf.write(encode_object_element(name, value, traversal_stack,
generator_func))
elif isinstance(value, float):
buf.write(encode_double_element(name, value))
elif isinstance(value, unicode):
buf.write(encode_string_element(name, value))
elif isinstance(value, dict):
buf.write(encode_document_element(name, value,
traversal_stack, generator_func))
elif isinstance(value, list) or isinstance(value, tuple):
buf.write(encode_array_element(name, value,
traversal_stack, generator_func))
elif isinstance(value, str):
buf.write(encode_binary_element(name, value))
elif isinstance(value, bool):
buf.write(encode_boolean_element(name, value))
elif isinstance(value, datetime):
buf.write(encode_UTCdatetime_element(name, value))
elif value is None:
buf.write(encode_none_element(name, value))
elif isinstance(value, int):
if value < -0x80000000 or value > 0x7fffffff:
buf.write(encode_int64_element(name, value))
else:
buf.write(encode_int32_element(name, value))
elif isinstance(value, long):
buf.write(encode_int64_element(name, value))
def encode_document(obj, traversal_stack,
traversal_parent = None,
generator_func = None):
buf = cStringIO.StringIO()
key_iter = obj.iterkeys()
if generator_func is not None:
key_iter = generator_func(obj, traversal_stack)
for name in key_iter:
value = obj[name]
traversal_stack.append(TraversalStep(traversal_parent or obj, name))
encode_value(name, value, buf, traversal_stack, generator_func)
traversal_stack.pop()
e_list = buf.getvalue()
e_list_length = len(e_list)
return struct.pack("<i%dsb" % (e_list_length,), e_list_length + 4 + 1,
e_list, 0)
def encode_array(array, traversal_stack,
traversal_parent = None,
generator_func = None):
buf = cStringIO.StringIO()
for i in xrange(0, len(array)):
value = array[i]
traversal_stack.append(TraversalStep(traversal_parent or array, i))
encode_value(unicode(i), value, buf, traversal_stack, generator_func)
traversal_stack.pop()
e_list = buf.getvalue()
e_list_length = len(e_list)
return struct.pack("<i%dsb" % (e_list_length,), e_list_length + 4 + 1,
e_list, 0)
def decode_element(data, base):
element_type = struct.unpack("<b", data[base:base + 1])[0]
element_description = ELEMENT_TYPES[element_type]
decode_func = globals()["decode_" + element_description + "_element"]
return decode_func(data, base)
def decode_document(data, base):
length = struct.unpack("<i", data[base:base + 4])[0]
end_point = base + length
base += 4
retval = {}
while base < end_point - 1:
base, name, value = decode_element(data, base)
retval[name] = value
if "$$__CLASS_NAME__$$" in retval:
retval = decode_object(retval)
return (end_point, retval)
def encode_document_element(name, value, traversal_stack, generator_func):
return "\x03" + encode_cstring(name) + \
encode_document(value, traversal_stack,
generator_func = generator_func)
def decode_document_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_document(data, base)
return (base, name, value)
def encode_array_element(name, value, traversal_stack, generator_func):
return "\x04" + encode_cstring(name) + \
encode_array(value, traversal_stack, generator_func = generator_func)
def decode_array_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_document(data, base)
retval = []
try:
i = 0
while True:
retval.append(value[unicode(i)])
i += 1
except KeyError:
pass
return (base, name, retval)
def encode_binary_element(name, value):
return "\x05" + encode_cstring(name) + encode_binary(value)
def decode_binary_element(data, base):
base, name = decode_cstring(data, base + 1)
base, value = decode_binary(data, base)
return (base, name, value)
def encode_boolean_element(name, value):
return "\x08" + encode_cstring(name) + struct.pack("<b", value)
def decode_boolean_element(data, base):
base, name = decode_cstring(data, base + 1)
value = not not struct.unpack("<b", data[base:base + 1])[0]
return (base + 1, name, value)
def encode_UTCdatetime_element(name, value):
if value.tzinfo is None:
warnings.warn(MissingTimezoneWarning(), None, 4)
value = int(round(calendar.timegm(value.utctimetuple()) * 1000 +
(value.microsecond / 1000.0)))
return "\x09" + encode_cstring(name) + struct.pack("<q", value)
def decode_UTCdatetime_element(data, base):
base, name = decode_cstring(data, base + 1)
value = datetime.fromtimestamp(struct.unpack("<q",
data[base:base + 8])[0] / 1000.0, pytz.utc)
return (base + 8, name, value)
def encode_none_element(name, value):
return "\x0a" + encode_cstring(name)
def decode_none_element(data, base):
base, name = decode_cstring(data, base + 1)
return (base, name, None)
def encode_int32_element(name, value):
return "\x10" + encode_cstring(name) + struct.pack("<i", value)
def decode_int32_element(data, base):
base, name = decode_cstring(data, base + 1)
value = struct.unpack("<i", data[base:base + 4])[0]
return (base + 4, name, value)
def encode_int64_element(name, value):
return "\x12" + encode_cstring(name) + struct.pack("<q", value)
def decode_int64_element(data, base):
base, name = decode_cstring(data, base + 1)
value = struct.unpack("<q", data[base:base + 8])[0]
return (base + 8, name, value)
# }}}
| cupcicm/bson | bson/codec.py | Python | bsd-3-clause | 10,379 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
args_linear = ['/openmp', '/O2', '/favor:INTEL64']
args_nonlinear = ['/openmp', '/O2', '/favor:INTEL64', '/fp:fast']
ext_modules = [
Extension('clpt_commons_bc1', ['clpt_commons_bc1.pyx'],
extra_compile_args=args_linear,
)]
setup(
name = 'clpt_bc1',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| albertoferna/compmech | compmech/conecyl/clpt/setup_clpt_bc1.py | Python | bsd-3-clause | 472 |
import sys
import numpy as np
from copy import copy, deepcopy
import multiprocessing as mp
from numpy.random import shuffle, random, normal
from math import log, sqrt, exp, pi
import itertools as it
from scipy.stats import gaussian_kde, pearsonr
from scipy.stats import ttest_1samp
from itertools import product
try:
from Crypto.pct_warnings import PowmInsecureWarning
import warnings
warnings.simplefilter("ignore", PowmInsecureWarning)
except:
pass
# In this work, I am computing transfer entropies
# by, first, discretizing expression values into a given
# number of bins. Using those bins, the probability of a given
# interval is computed, and the joint probability over time
# can also be computed (given two time series).
# Want P(X_t+1, X_k2, Y_k1) * log (P(X_t+1,Y_k1,X_k2)*P(X_t+1)) / (P(X_t+1, X_k2)*P(X_k2,Y_K1))
# just get the joint, then get the others by marginalization
# parameters:
# yk: the markov order for Y = let it be 1
# xk: the markov order for x = let it be 1
# yl: the time delay for y
# xl: the time delay for x
# b : the number of bins
# autoTE is
# FOR TE (Y -> X)
def autoshuff((x,y)):
permutedY = deepcopy(y)
shuffle(permutedY)
return(pearsonr(x, permutedY)[0])
def autoCorr(x,y,reps1, cpus):
pool = mp.Pool(cpus)
observed = pearsonr(x,y)[0]
permutedList = it.repeat( (x,y), reps1)
permutedCor = pool.map(autoshuff, permutedList)
pool.close()
return([observed] + permutedCor)
def geneindex(gene, genes):
for i in range(0,len(genes)):
if gene in genes[i]:
return(i)
return(-1)
def prepGeneDataGG(dats, genes, g1, g2):
i = geneindex(g1, genes) # from
j = geneindex(g2, genes) # to
if (i > -1 and j > -1):
x = map(float,dats[i]) #from
y = map(float,dats[j]) # to
x = np.array(x); x = (x-x.mean())/max(1,(x-x.mean()).max())
y = np.array(y); y = (y-y.mean())/max(1,(y-y.mean()).max())
return((x,y))
else:
return( ([],[]) )
def corEdges(exprfile, genefile, fileout, reps, cpus, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
res0 = autoCorr(fromx,toy,reps, cpus)
fout.write(g1 +"\t"+ g2 +"\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, ylmax, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
maxCorr = 0.0
maxLag = 0.0
for yl in range(0,(ylmax+1)):
try:
res0 = autoCorr(fromx,toy,reps, cpus)
if (res0[0] > maxCorr):
maxTE = res0
maxLag = yl
except:
e = sys.exc_info()
sys.stderr.write(str(e)+"\n")
fout.write(g1 +"\t"+ g2 +"\t"+ str(maxLag) +"\t"+ str(maxCorr) +"\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def main(argv):
#for i in range(1,len(argv)):
# print(str(i) +" "+ argv[i])
exprfile = argv[1]
genefile = argv[2]
fileout = argv[3]
reps = int(argv[4])
cpus = int(argv[5])
g1 = argv[6]
g2 = argv[7]
maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, 6, g1, g2)
if __name__ == "__main__":
main(sys.argv)
#pref="/Users/davidlgibbs/Dropbox/Research/Projects/Influence_Maximization_Problem/EserData/"
#pref = "/users/dgibbs/EserData/"
#genes = pref +"yeast_array_genesymbols.csv"
#gexpr = pref +"Eser_Averaged_Expression.txt"
#tout = "/Users/davidlgibbs/Desktop/x.txt"
#corEdges(gexpr, genes, tout, 20, 2, "YOX1", "MBP1")
| Gibbsdavidl/miergolf | src/corEdges.py | Python | bsd-3-clause | 3,914 |
import gc
import numpy as np
from pandas import (
DatetimeIndex,
Float64Index,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
date_range,
)
from .pandas_vb_common import tm
class SetOperations:
params = (
["datetime", "date_string", "int", "strings"],
["intersection", "union", "symmetric_difference"],
)
param_names = ["dtype", "method"]
def setup(self, dtype, method):
N = 10 ** 5
dates_left = date_range("1/1/2000", periods=N, freq="T")
fmt = "%Y-%m-%d %H:%M:%S"
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {
"datetime": {"left": dates_left, "right": dates_left[:-1]},
"date_string": {"left": date_str_left, "right": date_str_left[:-1]},
"int": {"left": int_left, "right": int_left[:-1]},
"strings": {"left": str_left, "right": str_left[:-1]},
}
self.left = data[dtype]["left"]
self.right = data[dtype]["right"]
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint:
def setup(self):
N = 10 ** 5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Range:
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10 ** 6, step=3)
self.idx_dec = RangeIndex(start=10 ** 6, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
def time_get_loc_inc(self):
self.idx_inc.get_loc(900_000)
def time_get_loc_dec(self):
self.idx_dec.get_loc(100_000)
def time_iter_inc(self):
for _ in self.idx_inc:
pass
def time_iter_dec(self):
for _ in self.idx_dec:
pass
def time_sort_values_asc(self):
self.idx_inc.sort_values()
def time_sort_values_des(self):
self.idx_inc.sort_values(ascending=False)
class IndexEquals:
def setup(self):
idx_large_fast = RangeIndex(100_000)
idx_small_slow = date_range(start="1/1/2012", periods=1)
self.mi_large_slow = MultiIndex.from_product([idx_large_fast, idx_small_slow])
self.idx_non_object = RangeIndex(1)
def time_non_object_equals_multiindex(self):
self.idx_non_object.equals(self.mi_large_slow)
class IndexAppend:
def setup(self):
N = 10_000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing:
params = ["String", "Float", "Int"]
param_names = ["dtype"]
def setup(self, dtype):
N = 10 ** 6
self.idx = getattr(tm, f"make{dtype}Index")(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = (
self.sorted[:half].append(self.sorted[:half]).sort_values()
)
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod:
# GH 13166
def setup(self):
N = 100_000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
class IntervalIndexMethod:
# GH 24813
params = [10 ** 3, 10 ** 5]
def setup(self, N):
left = np.append(np.arange(N), np.array(0))
right = np.append(np.arange(1, N + 1), np.array(1))
self.intv = IntervalIndex.from_arrays(left, right)
self.intv._engine
self.intv2 = IntervalIndex.from_arrays(left + 1, right + 1)
self.intv2._engine
self.left = IntervalIndex.from_breaks(np.arange(N))
self.right = IntervalIndex.from_breaks(np.arange(N - 3, 2 * N - 3))
def time_monotonic_inc(self, N):
self.intv.is_monotonic_increasing
def time_is_unique(self, N):
self.intv.is_unique
def time_intersection(self, N):
self.left.intersection(self.right)
def time_intersection_one_duplicate(self, N):
self.intv.intersection(self.right)
def time_intersection_both_duplicate(self, N):
self.intv.intersection(self.intv2)
class GC:
params = [1, 2, 5]
def create_use_drop(self):
idx = Index(list(range(1_000_000)))
idx._engine
def peakmem_gc_instances(self, N):
try:
gc.disable()
for _ in range(N):
self.create_use_drop()
finally:
gc.enable()
from .pandas_vb_common import setup # noqa: F401 isort:skip
| jorisvandenbossche/pandas | asv_bench/benchmarks/index_object.py | Python | bsd-3-clause | 6,355 |
#!/usr/bin/env python
import sys
import os
import pandas as pd
import numpy as np
from hmmlearn.hmm import *
from sklearn.externals import joblib
import ipdb
import time
from math import (
log,
exp
)
from matplotlib import pyplot as plt
from sklearn.preprocessing import (
scale,
normalize
)
def matplot_list(list_data,
figure_index,
title,
label_string,
save_path,
save=False,
linewidth='3.0',
fontsize= 50,
xaxis_interval=0.005,
xlabel= 'time',
ylabel = 'log likelihood'):
# if you want to save, title is necessary as a save name.
global n_state
global covariance_type_string
plt.figure(figure_index, figsize=(40,30), dpi=80)
ax = plt.subplot(111)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
plt.grid(True)
i = 0
plt.xlabel(xlabel,fontsize=fontsize)
plt.ylabel(ylabel,fontsize=fontsize)
plt.xticks( fontsize = 50)
plt.yticks( fontsize = 50)
plt.ylim(-200000,200000)
for data in list_data:
i = i + 1
index = np.asarray(data).shape
O = (np.arange(index[0])*xaxis_interval).tolist()
if label_string[i-1] == 'threshold':
plt.plot(O, data, label=label_string[i-1],linewidth=3, linestyle = '--', mfc ="grey")
else:
plt.plot(O, data, label=label_string[i-1],linewidth=linewidth)
plt.legend(loc='best', frameon=True, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
#plt.annotate('State=4 Sub_State='+str(n_state)+' GaussianHMM_cov='+covariance_type_string,
# xy=(0, -5000), xycoords='data',
# xytext=(+10, +30), textcoords='offset points', fontsize=fontsize,
# arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-.25"))
if save:
plt.savefig(save_path+'/'+title+".eps", format="eps")
def scaling(X):
_index, _column = X.shape
Data_scaled = []
scale_length = 10
for i in range(scale_length, _index-scale_length-2):
scaled = scale(X[i-scale_length:i+scale_length + 1, :])
Data_scaled.append(scaled[scale_length,:])
scaled_array = np.asarray(Data_scaled)
return scaled_array
def load_data(path, path_index=1, preprocessing_scaling=False, preprocessing_normalize=False, norm='l2'):
# df1 = pd.read_csv(path+'/R_Angles.dat', sep='\s+', header=None, skiprows=1)
# df2 = pd.read_csv(path+'/R_CartPos.dat', sep='\s+', header=None, skiprows=1)
df3 = pd.read_csv(path+'/R_Torques.dat', sep='\s+', header=None, skiprows=1)
#df4 = pd.read_csv(path+'/worldforce-'+str(path_index)+".dat", sep='\s+', header=None, skiprows=1)
df5 = pd.read_csv(path+'/R_State.dat', sep='\s+', header=None, skiprows=1)
# df1.columns = ['time','s0','s1','s2','s3','s4','s5']
#df2.columns = ['time','x','y','z','R','P','Y']
df3.columns = ['time','Fx','Fy','Fz','Mx','My','Mz']
#df4.columns = ['time','Fx','Fy','Fz','FR','FP','FY']
df = df3
#df = pd.merge(df1,df2, how='outer', on='time')
#df = pd.merge(df,df3, how='outer', on='time')
#df = pd.merge(df,df4, how='outer',on='time')
df = df.fillna(method='ffill')
df5.columns=['time']
df5['state']=[1,2,3]
df5.ix[3] = [0.005,0]
df = pd.merge(df,df5, how='outer', on='time')
df = df.fillna(method='ffill')
X_1 = df.values[df.values[:,-1] ==0]
index_1,column_1 = X_1.shape
X_2 = df.values[df.values[:,-1] ==1]
index_2,column_2 = X_2.shape
X_3 = df.values[df.values[:,-1] ==2]
index_3,column_3 = X_3.shape
X_4 = df.values[df.values[:,-1] ==3]
index_4,column_4 = X_4.shape
index = [index_1,index_2,index_3,index_4]
X_1_ = X_1[:,1:-1]
X_2_ = X_2[:,1:-1]
X_3_ = X_3[:,1:-1]
X_4_ = X_4[:,1:-1]
X_tempt = np.array([[0,0,0,0,0,0]])
X_1_tempt = np.concatenate((X_tempt,X_1_),axis=0)
X_1_tempt_1 = np.concatenate((X_1_,X_tempt),axis=0)
X_1_d = X_1_tempt_1 - X_1_tempt
X_1_d = X_1_d[1:-1]
X_1_d = np.concatenate((X_tempt,X_1_d),axis=0)
X_1_ = np.concatenate((X_1_,X_1_d),axis=1)
X_2_tempt = np.concatenate((X_tempt,X_2_),axis=0)
X_2_tempt_2 = np.concatenate((X_2_,X_tempt),axis=0)
X_2_d = X_2_tempt_2 - X_2_tempt
X_2_d = X_2_d[1:-1]
X_2_d = np.concatenate((X_tempt,X_2_d),axis=0)
X_2_ = np.concatenate((X_2_,X_2_d),axis=1)
X_3_tempt = np.concatenate((X_tempt,X_3_),axis=0)
X_3_tempt_3 = np.concatenate((X_3_,X_tempt),axis=0)
X_3_d = X_3_tempt_3 - X_3_tempt
X_3_d = X_3_d[1:-1]
X_3_d = np.concatenate((X_tempt,X_3_d),axis=0)
X_3_ = np.concatenate((X_3_,X_3_d),axis=1)
X_4_tempt = np.concatenate((X_tempt,X_4_),axis=0)
X_4_tempt_4 = np.concatenate((X_4_,X_tempt),axis=0)
X_4_d = X_4_tempt_4 - X_4_tempt
X_4_d = X_4_d[1:-1]
X_4_d = np.concatenate((X_tempt,X_4_d),axis=0)
X_4_ = np.concatenate((X_4_,X_4_d),axis=1)
Data = [X_1_,X_2_,X_3_,X_4_]
if preprocessing_normalize:
normalize_X_1 = normalize(Data[0], norm=norm)
normalize_X_2 = normalize(Data[1], norm=norm)
normalize_X_3 = normalize(Data[2], norm=norm)
normalize_X_4 = normalize(Data[3], norm=norm)
index_1, column_1 = normalize_X_1.shape
index_2, column_2 = normalize_X_1.shape
index_3, column_3 = normalize_X_1.shape
index_4, column_4 = normalize_X_1.shape
Data = []
Data = [normalize_X_1, normalize_X_2, normalize_X_3, normalize_X_4]
index = [index_1, index_2, index_3, index_4]
if preprocessing_scaling:
scaled_X_1 = scale(Data[0])
scaled_X_2 = scale(Data[1])
scaled_X_3 = scale(Data[2])
scaled_X_4 = scale(Data[3])
index_1, column_1 = scaled_X_1.shape
index_2, column_2 = scaled_X_1.shape
index_3, column_3 = scaled_X_1.shape
index_4, column_4 = scaled_X_1.shape
Data = []
Data = [scaled_X_1, scaled_X_2, scaled_X_3, scaled_X_4]
index = [index_1,index_2,index_3,index_4]
return Data, index
def array_list_mean(list_data,c,offset):
"""
eg: argument list_data[X1,X2,...] X1,X2 numpy Column array
mean(X1,X2,..)
threshold = mean - c * std
return: marray [X_mean] , [X_std] [threshold]
"""
tempt_list = []
df_full_mean = pd.DataFrame()
df_full_std = pd.DataFrame()
df = pd.DataFrame()
for data in list_data:
df_tempt = pd.DataFrame(data=data)
df = pd.concat([df,df_tempt], axis=1)
mean_series = df.mean(axis=1)
std_series = df.std(axis=1)
return mean_series.values, std_series.values, (mean_series.values-std_series.values*c-offset)
def array_log_mean(list_data):
tempt_list = []
df = pd.DataFrame()
for data in list_data:
df_tempt = pd.DataFrame(data=data)
df = pd.concat([df,df_tempt], axis=1)
mean_series = df.mean(axis=1)
std_series = df.std(axis=1)
mean = mean_series.values.T.tolist()
std = std_series.values.T.tolist()
threshold = mean_series.values - 3*std_series.values
return mean, std, threshold
def main():
#ipdb.set_trace()
global n_state
global covariance_type_string
n_iteraton = 100
covariance_type_string = 'full'
preprocessing_scaling = False
preprocessing_normalize = False
data_feature = 6
norm_style = 'l2'
success_path = "/home/ben/ML_data/REAL_HIRO_ONE_SA_SUCCESS"
model_save_path = "/home/ben/ML_data/REAL_HIRO_ONE_SA_SUCCESS/train_model"
success_trail_num_train = 43
success_trail_num_test = 1
threshold_constant = 3
threshold_offset = 100
n_state = joblib.load(model_save_path+'/model_decision/n_state.pkl')
CV_index = [1,2]
confusion_matrix = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
detection_time = [[],[],[],[]]
#########--- load the success Data Index And Label String----###################
path_index_name = []
for i in range(1,45):
if i+1 <= 9:
post_str = '0'+str(i+1)
else:
post_str = str(i+1)
path_index_name.append('20121127-HIROSA-S-'+post_str)
for o in range(44):
Success_Data_Train = []
Success_Index_Train = []
Success_Label_String_Train = []
Success_Data_Test = []
Success_Index_Test = []
Success_Label_String_Test = []
CV_Fold_1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
CV_Fold_2 = [10,11,12,13,14,15,16,17,18,19]
if o == 0:
CV_Train = CV_Fold_1
CV_Test = CV_Fold_2
else:
CV_Train = CV_Fold_2
CV_Test = CV_Fold_1
##########-----loading the Sucess trails data-----------############################
## Success_Data_Train[trails][subtask][time_index,feature]
## Success_Index_Train[trails][subtask]
## Success_Label_String[trails]
print "Choosing CV index"
for i in range(44):
if not i==o:
data_tempt, index_tempt = load_data(path=success_path+"/"+path_index_name[i],
preprocessing_scaling=preprocessing_scaling,
preprocessing_normalize=preprocessing_normalize,
norm=norm_style)
Success_Data_Train.append(data_tempt)
Success_Index_Train.append(index_tempt)
Success_Label_String_Train.append("Success Trail"+ path_index_name[i])
print"%d " %i
else:
data_tempt, index_tempt = load_data(path=success_path+"/"+path_index_name[i])
Success_Data_Test.append(data_tempt)
Success_Index_Test.append(index_tempt)
Success_Label_String_Test.append("Success Trail"+ path_index_name[i])
######################################################################################
######-------loading the HMM Models to list[] ------#################################
## Subtasks i hmm model list
## model_i[trails]
model_1_list = []
model_2_list = []
model_3_list = []
model_4_list = []
# loading the models
for i in range(success_trail_num_train):
if not os.path.isdir(success_path+'/'+path_index_name[i]):
print success_path+'/'+path_index_name[i]+" do not exit"
print "Please train the hmm model first, and check your model folder"
model_1_list.append(joblib.load(model_save_path+'/'+path_index_name[i]+"/model_s1.pkl"))
model_2_list.append(joblib.load(model_save_path+'/'+path_index_name[i]+"/model_s2.pkl"))
model_3_list.append(joblib.load(model_save_path+'/'+path_index_name[i]+"/model_s3.pkl"))
model_4_list.append(joblib.load(model_save_path+'/'+path_index_name[i]+"/model_s4.pkl"))
###############################################################################
###---------------get the minimun index in every subtask from trails-------#########
Success_min_index_train = []
index_tempt = []
for j in range(4):
index_tempt = []
for i in range(success_trail_num_train):
index_tempt.append(Success_Index_Train[i][j])
Success_min_index_train.append(min(index_tempt))
######-----------training and getting the threshold ----################################
# Training with left n-1 out
model_1_end_mean = []
model_2_end_mean = []
model_3_end_mean = []
model_4_end_mean = []
for n in range(success_trail_num_train):
model_1_log = []
model_2_log = []
model_3_log = []
model_4_log = []
for i in range(success_trail_num_train):
#Do no train with its own model data
if not i==n:
model_1_log_tempt = []
model_2_log_tempt = []
model_3_log_tempt = []
model_4_log_tempt = []
for j in range(4):
if j == 0:
try:
model_1_log_tempt = model_1_list[n].score(Success_Data_Train[i][j][:,:])
model_1_log.append(model_1_log_tempt)
except:
print "error in trail %d model 1"%(n+1)
return 0
elif j == 1:
try:
model_2_log_tempt = model_2_list[n].score(Success_Data_Train[i][j][:,:])
model_2_log.append(model_2_log_tempt)
except:
print "error in trail %d model 2"%(n+1)
return 0
elif j==2:
try:
model_3_log_tempt = model_3_list[n].score(Success_Data_Train[i][j][:,:])
model_3_log.append(model_3_log_tempt)
except:
print "error in trail %d model 3"%(n+1)
return 0
elif j==3:
try:
model_4_log_tempt = model_4_list[n].score(Success_Data_Train[i][j][:,:])
model_4_log.append(model_4_log_tempt)
except:
print "error in trail %d model 4"%(n+1)
return 0
mean_tempt = np.mean(np.asarray(model_1_log))
model_1_end_mean.append(mean_tempt)
mean_tempt = np.mean(np.asarray(model_2_log))
model_2_end_mean.append(mean_tempt)
mean_tempt = np.mean(np.asarray(model_3_log))
model_3_end_mean.append(mean_tempt)
mean_tempt = np.mean(np.asarray(model_4_log))
model_4_end_mean.append(mean_tempt)
# get the best model according to the highest log-likelihood mean value
argsort_model_1_end_mean = np.argsort(np.asarray(model_1_end_mean))
argsort_model_2_end_mean = np.argsort(np.asarray(model_2_end_mean))
argsort_model_3_end_mean = np.argsort(np.asarray(model_3_end_mean))
argsort_model_4_end_mean = np.argsort(np.asarray(model_4_end_mean))
best_model_trail_list = [argsort_model_1_end_mean[-9],
argsort_model_2_end_mean[-9],
argsort_model_3_end_mean[-9],
argsort_model_4_end_mean[-9]]
print "the model_1 is %d"%(argsort_model_1_end_mean[-1]+1)
print "the mid model_1_mean is %d"%model_1_end_mean[argsort_model_1_end_mean[-9]]
print "the mid model_2 is %d"%(argsort_model_2_end_mean[-1]+1)
print "the mid model_2_mean is %d"%model_2_end_mean[argsort_model_2_end_mean[-9]]
print "the mid model_3 is %d"%(argsort_model_3_end_mean[-1]+1)
print "the mid model_3_mean is %d"%model_3_end_mean[argsort_model_3_end_mean[-9]]
print "the best model_4 is %d"%(argsort_model_4_end_mean[-1]+1)
print "the best model_4_mean is %d"%model_4_end_mean[argsort_model_4_end_mean[-9]]
best_model = [model_1_list[argsort_model_1_end_mean[-1]],
model_2_list[argsort_model_2_end_mean[-1]],
model_3_list[argsort_model_3_end_mean[-1]],
model_4_list[argsort_model_4_end_mean[-1]]]
best_model_arg = [argsort_model_1_end_mean[-1],
argsort_model_2_end_mean[-1],
argsort_model_3_end_mean[-1],
argsort_model_4_end_mean[-1]]
######-----------training and getting the threshold ----################################
model_1_log = []
model_2_log = []
model_3_log = []
model_4_log = []
model_1_log_T = []
model_2_log_T = []
model_3_log_T = []
model_4_log_T = []
model_1_end_mean = []
model_2_end_mean = []
model_3_end_mean = []
model_4_end_mean = []
model_1_log_full = []
model_2_log_full = []
model_3_log_full = []
model_4_log_full = []
model_log_full = []
# Training with left one out
if not os.path.isdir(model_save_path+"/figure/state_classifcation"):
os.makedirs(model_save_path+"/figure/state_classifcation")
for i in range(success_trail_num_test):
#Do no train with its own model data
model_1_log = np.array([0])
model_2_log = np.array([0])
model_3_log = np.array([0])
model_4_log = np.array([0])
cul_time = 0
Classification_Full_Flag = True
for j in range(4):
model_1_log_tempt = []
model_2_log_tempt = []
model_3_log_tempt = []
model_4_log_tempt = []
classification_flag = False
for k in range(Success_Index_Test[i][j]):
data_1 = best_model[0].score(Success_Data_Test[i][j][:k+1,:])
data_2 = best_model[1].score(Success_Data_Test[i][j][:k+1,:])
data_3 = best_model[2].score(Success_Data_Test[i][j][:k+1,:])
data_4 = best_model[3].score(Success_Data_Test[i][j][:k+1,:])
model_1_log_tempt.append(data_1)
model_2_log_tempt.append(data_2)
model_3_log_tempt.append(data_3)
model_4_log_tempt.append(data_4)
arg_list = np.argsort(np.asarray([data_1, data_2, data_3, data_4]))
if not arg_list[-1] == j:
classification_flag = False
elif not classification_flag:
classification_flag =True
latest_cross_time = k
if not classification_flag:
print"False classification at state %d" %(j+1)
print"Misclassifcation to state %d"%(arg_list[-1]+1)
Classification_Full_Flag = False
else:
print "the cross time is %f" %((cul_time+latest_cross_time)*0.005)
print "the time of dectection is %f" %(latest_cross_time*100/Success_Index_Test[i][j]) + "%"
confusion_matrix[j][arg_list[-1]] = confusion_matrix[j][arg_list[-1]]+1
detection_time[j].append(latest_cross_time*100/Success_Index_Test[i][j])
model_1_log = np.concatenate((model_1_log, np.asarray(model_1_log_tempt)))
model_2_log = np.concatenate((model_2_log, np.asarray(model_2_log_tempt)))
model_3_log = np.concatenate((model_3_log, np.asarray(model_3_log_tempt)))
model_4_log = np.concatenate((model_4_log, np.asarray(model_4_log_tempt)))
cul_time = Success_Index_Test[i][j] +cul_time
print "confusion_matrix ="
print "%d %d %d %d" %(confusion_matrix[0][0],confusion_matrix[0][1],confusion_matrix[0][2],confusion_matrix[0][3])
print "%d %d %d %d" %(confusion_matrix[1][0],confusion_matrix[1][1],confusion_matrix[1][2],confusion_matrix[1][3])
print "%d %d %d %d" %(confusion_matrix[2][0],confusion_matrix[2][1],confusion_matrix[2][2],confusion_matrix[2][3])
print "%d %d %d %d" %(confusion_matrix[3][0],confusion_matrix[3][1],confusion_matrix[3][2],confusion_matrix[3][3])
print "computing the classification log curves of testing (%d/%d)"%(i+1,success_trail_num_test)
print "Detection Time: %d %d %d %d" %(detection_time[0][-1],detection_time[1][-1],detection_time[2][-1],detection_time[3][-1])
# if Classification_Full_Flag:
# matplot_list([model_1_log,model_2_log,model_3_log,model_4_log],
# figure_index=i,
# label_string=['Approach','Rotation','Insertion','Mating'],
# title='State Classification'+Success_Label_String_Test[i],
# save=True,
# save_path = model_save_path+"/figure/state_classifcation")
# else:
# matplot_list([model_1_log,model_2_log,model_3_log,model_4_log],
# figure_index=i,
# label_string=['Approach','Rotation','Insertion','Mating'],
# title='Wrong State Classification'+Success_Label_String_Test[i],
# save=True,
# save_path = model_save_path+"/figure/state_classifcation")
np.savetxt(model_save_path+'/figure/state_classifcation/confusion_matrix_mid.dat', np.asarray(confusion_matrix), fmt='%d')
np.savetxt(model_save_path+'/figure/state_classifcation/detection_time_mid.dat', np.asarray(detection_time), fmt='%d')
#plt.show()
#plt.show()
return 0
if __name__ == '__main__':
sys.exit(main())
| birlrobotics/HMM | hmm_for_hiro_snap_task/failure_detection/hmm_model_test_classification_mid_real_hiro.py | Python | bsd-3-clause | 21,847 |
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import datetime
import itertools
import operator
import os
import re
import sys
try:
from lxml import etree
except ImportError:
etree = None
from . import colorize, config, source, utils
ISSUE_KIND_ERROR = 'ERROR'
ISSUE_KIND_WARNING = 'WARNING'
ISSUE_KIND_INFO = 'INFO'
ISSUE_KIND_ADVICE = 'ADVICE'
# field names in rows of json reports
JSON_INDEX_DOTTY = 'dotty'
JSON_INDEX_FILENAME = 'file'
JSON_INDEX_HASH = 'hash'
JSON_INDEX_INFER_SOURCE_LOC = 'infer_source_loc'
JSON_INDEX_ISL_FILE = 'file'
JSON_INDEX_ISL_LNUM = 'lnum'
JSON_INDEX_ISL_CNUM = 'cnum'
JSON_INDEX_ISL_ENUM = 'enum'
JSON_INDEX_KIND = 'kind'
JSON_INDEX_LINE = 'line'
JSON_INDEX_PROCEDURE = 'procedure'
JSON_INDEX_PROCEDURE_ID = 'procedure_id'
JSON_INDEX_QUALIFIER = 'qualifier'
JSON_INDEX_QUALIFIER_TAGS = 'qualifier_tags'
JSON_INDEX_TYPE = 'bug_type'
JSON_INDEX_TRACE = 'bug_trace'
JSON_INDEX_TRACE_LEVEL = 'level'
JSON_INDEX_TRACE_FILENAME = 'filename'
JSON_INDEX_TRACE_LINE = 'line_number'
JSON_INDEX_TRACE_DESCRIPTION = 'description'
JSON_INDEX_VISIBILITY = 'visibility'
ISSUE_TYPES_URL = 'http://fbinfer.com/docs/infer-issue-types.html#'
def _text_of_infer_loc(loc):
return ' ({}:{}:{}-{}:)'.format(
loc[JSON_INDEX_ISL_FILE],
loc[JSON_INDEX_ISL_LNUM],
loc[JSON_INDEX_ISL_CNUM],
loc[JSON_INDEX_ISL_ENUM],
)
def text_of_report(report):
filename = report[JSON_INDEX_FILENAME]
kind = report[JSON_INDEX_KIND]
line = report[JSON_INDEX_LINE]
error_type = report[JSON_INDEX_TYPE]
msg = report[JSON_INDEX_QUALIFIER]
infer_loc = ''
if JSON_INDEX_INFER_SOURCE_LOC in report:
infer_loc = _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
return '%s:%d: %s: %s%s\n %s' % (
filename,
line,
kind.lower(),
error_type,
infer_loc,
msg,
)
def _text_of_report_list(project_root, reports, bugs_txt_path, limit=None,
formatter=colorize.TERMINAL_FORMATTER):
n_issues = len(reports)
if n_issues == 0:
if formatter == colorize.TERMINAL_FORMATTER:
out = colorize.color(' No issues found ',
colorize.SUCCESS, formatter)
return out + '\n'
else:
return 'No issues found'
text_errors_list = []
for report in reports[:limit]:
filename = report[JSON_INDEX_FILENAME]
line = report[JSON_INDEX_LINE]
source_context = ''
source_context = source.build_source_context(
os.path.join(project_root, filename),
formatter,
line,
)
indenter = source.Indenter() \
.indent_push() \
.add(source_context)
source_context = '\n' + unicode(indenter)
msg = text_of_report(report)
if report[JSON_INDEX_KIND] == ISSUE_KIND_ERROR:
msg = colorize.color(msg, colorize.ERROR, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_WARNING:
msg = colorize.color(msg, colorize.WARNING, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_ADVICE:
msg = colorize.color(msg, colorize.ADVICE, formatter)
text = '%s%s' % (msg, source_context)
text_errors_list.append(text)
error_types_count = {}
for report in reports:
t = report[JSON_INDEX_TYPE]
# assert failures are not very informative without knowing
# which assertion failed
if t == 'Assert_failure' and JSON_INDEX_INFER_SOURCE_LOC in report:
t += _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
if t not in error_types_count:
error_types_count[t] = 1
else:
error_types_count[t] += 1
max_type_length = max(map(len, error_types_count.keys())) + 2
sorted_error_types = error_types_count.items()
sorted_error_types.sort(key=operator.itemgetter(1), reverse=True)
types_text_list = map(lambda (t, count): '%s: %d' % (
t.rjust(max_type_length),
count,
), sorted_error_types)
text_errors = '\n\n'.join(text_errors_list)
if limit >= 0 and n_issues > limit:
text_errors += colorize.color(
('\n\n...too many issues to display (limit=%d exceeded), please ' +
'see %s or run `inferTraceBugs` for the remaining issues.')
% (limit, bugs_txt_path), colorize.HEADER, formatter)
issues_found = 'Found {n_issues}'.format(
n_issues=utils.get_plural('issue', n_issues),
)
msg = '{issues_found}\n\n{issues}\n\n{header}\n\n{summary}'.format(
issues_found=colorize.color(issues_found,
colorize.HEADER,
formatter),
issues=text_errors,
header=colorize.color('Summary of the reports',
colorize.HEADER, formatter),
summary='\n'.join(types_text_list),
)
return msg
def _is_user_visible(project_root, report):
kind = report[JSON_INDEX_KIND]
return kind in [ISSUE_KIND_ERROR, ISSUE_KIND_WARNING, ISSUE_KIND_ADVICE]
def print_and_save_errors(infer_out, project_root, json_report, bugs_out,
pmd_xml):
errors = utils.load_json_from_path(json_report)
errors = [e for e in errors if _is_user_visible(project_root, e)]
console_out = _text_of_report_list(project_root, errors, bugs_out,
limit=10)
utils.stdout('\n' + console_out)
plain_out = _text_of_report_list(project_root, errors, bugs_out,
formatter=colorize.PLAIN_FORMATTER)
with codecs.open(bugs_out, 'w',
encoding=config.CODESET, errors='replace') as file_out:
file_out.write(plain_out)
if pmd_xml:
xml_out = os.path.join(infer_out, config.PMD_XML_FILENAME)
with codecs.open(xml_out, 'w',
encoding=config.CODESET,
errors='replace') as file_out:
file_out.write(_pmd_xml_of_issues(errors))
def merge_reports_from_paths(report_paths):
json_data = []
for json_path in report_paths:
json_data.extend(utils.load_json_from_path(json_path))
return _sort_and_uniq_rows(json_data)
def _pmd_xml_of_issues(issues):
if etree is None:
print('ERROR: "etree" Python package not found.')
print('ERROR: You need to install it to use Infer with --pmd-xml')
sys.exit(1)
root = etree.Element('pmd')
root.attrib['version'] = '5.4.1'
root.attrib['date'] = datetime.datetime.now().isoformat()
for issue in issues:
fully_qualifed_method_name = re.search('(.*)\(.*',
issue[JSON_INDEX_PROCEDURE_ID])
class_name = ''
package = ''
if fully_qualifed_method_name is not None:
# probably Java
info = fully_qualifed_method_name.groups()[0].split('.')
class_name = info[-2:-1][0]
method = info[-1]
package = '.'.join(info[0:-2])
else:
method = issue[JSON_INDEX_PROCEDURE]
file_node = etree.Element('file')
file_node.attrib['name'] = issue[JSON_INDEX_FILENAME]
violation = etree.Element('violation')
violation.attrib['begincolumn'] = '0'
violation.attrib['beginline'] = str(issue[JSON_INDEX_LINE])
violation.attrib['endcolumn'] = '0'
violation.attrib['endline'] = str(issue[JSON_INDEX_LINE] + 1)
violation.attrib['class'] = class_name
violation.attrib['method'] = method
violation.attrib['package'] = package
violation.attrib['priority'] = '1'
violation.attrib['rule'] = issue[JSON_INDEX_TYPE]
violation.attrib['ruleset'] = 'Infer Rules'
violation.attrib['externalinfourl'] = (
ISSUE_TYPES_URL + issue[JSON_INDEX_TYPE])
violation.text = issue[JSON_INDEX_QUALIFIER]
file_node.append(violation)
root.append(file_node)
return etree.tostring(root, pretty_print=True, encoding=config.CODESET)
def _sort_and_uniq_rows(l):
key = operator.itemgetter(JSON_INDEX_FILENAME,
JSON_INDEX_LINE,
JSON_INDEX_HASH,
JSON_INDEX_QUALIFIER)
l.sort(key=key)
groups = itertools.groupby(l, key)
# guaranteed to be at least one element in each group
return map(lambda (keys, dups): dups.next(), groups)
| jsachs/infer | infer/lib/python/inferlib/issues.py | Python | bsd-3-clause | 9,001 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
TRANSACTION_STATUS = (
('P', _('pending')),
('F', _('failed')),
('C', _('complete')),
)
class Transaction(models.Model):
user = models.ForeignKey(User, blank = True, null = True, default = None,
verbose_name = _("user"), help_text = _("user who started transaction"))
description = models.CharField(_("reference description"), max_length = 255, help_text = _("reference description"))
amount = models.FloatField(_("amount"))
currency = models.CharField(_("currency"), max_length = 3)
details = models.CharField(_("details"), max_length = 255, help_text = _("payment details"))
created = models.DateTimeField(auto_now_add = True)
last_modified = models.DateTimeField(auto_now = True)
status = models.CharField(_("status"), max_length = 1, default = 'P')
redirect_after_success = models.CharField(max_length = 255, editable = False)
redirect_on_failure = models.CharField(max_length = 255, editable = False)
def __unicode__(self):
return _("transaction %s " % self.pk)
class Meta:
verbose_name = _("transaction")
ordering = ['-last_modified']
| truevision/django_banklink | django_banklink/models.py | Python | bsd-3-clause | 1,291 |
from django.test import TestCase
from forums.factories import PostFactory
class CategoryModelTests(TestCase):
def setUp(self):
self.post = PostFactory()
def test_string_method(self):
self.assertEquals(str(self.post), 'Body text\nWith multiple lines!')
| byteweaver/django-forums | tests/test_model_post.py | Python | bsd-3-clause | 280 |
"""
RGB Colourspace & Transformations
=================================
Defines the following *RGB* colourspace transformations:
- :func:`colour_hdri.camera_space_to_RGB`
- :func:`colour_hdri.camera_space_to_sRGB`
"""
from __future__ import annotations
import numpy as np
from colour.algebra import matrix_dot, vector_dot
from colour.hints import ArrayLike, NDArray
from colour.models import RGB_COLOURSPACES
__author__ = "Colour Developers"
__copyright__ = "Copyright 2015 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"camera_space_to_RGB",
"camera_space_to_sRGB",
]
def camera_space_to_RGB(
RGB: ArrayLike,
M_XYZ_to_camera_space: ArrayLike,
matrix_RGB_to_XYZ: ArrayLike,
) -> NDArray:
"""
Convert given *RGB* array from *camera space* to given *RGB* colourspace.
Parameters
----------
RGB
Camera space *RGB* colourspace array.
M_XYZ_to_camera_space
Matrix converting from *CIE XYZ* tristimulus values to *camera space*.
matrix_RGB_to_XYZ
Matrix converting from *RGB* colourspace to *CIE XYZ* tristimulus
values.
Returns
-------
:class:`numpy.ndarray`
*RGB* colourspace array.
Examples
--------
>>> RGB = np.array([0.80660, 0.81638, 0.65885])
>>> M_XYZ_to_camera_space = np.array([
... [0.47160000, 0.06030000, -0.08300000],
... [-0.77980000, 1.54740000, 0.24800000],
... [-0.14960000, 0.19370000, 0.66510000]])
>>> matrix_RGB_to_XYZ = np.array([
... [0.41238656, 0.35759149, 0.18045049],
... [0.21263682, 0.71518298, 0.07218020],
... [0.01933062, 0.11919716, 0.95037259]])
>>> camera_space_to_RGB(
... RGB,
... M_XYZ_to_camera_space,
... matrix_RGB_to_XYZ) # doctest: +ELLIPSIS
array([ 0.7564180..., 0.8683192..., 0.6044589...])
"""
M_RGB_camera = matrix_dot(M_XYZ_to_camera_space, matrix_RGB_to_XYZ)
M_RGB_camera /= np.transpose(np.sum(M_RGB_camera, axis=1)[np.newaxis])
RGB_f = vector_dot(np.linalg.inv(M_RGB_camera), RGB)
return RGB_f
def camera_space_to_sRGB(
RGB: ArrayLike, M_XYZ_to_camera_space: ArrayLike
) -> NDArray:
"""
Convert given *RGB* array from *camera space* to *sRGB* colourspace.
Parameters
----------
RGB
Camera space *RGB* colourspace array.
M_XYZ_to_camera_space
Matrix converting from *CIE XYZ* tristimulus values to *camera space*.
Returns
-------
:class:`numpy.ndarray`
*sRGB* colourspace array.
Examples
--------
>>> RGB = np.array([0.80660, 0.81638, 0.65885])
>>> M_XYZ_to_camera_space = np.array([
... [0.47160000, 0.06030000, -0.08300000],
... [-0.77980000, 1.54740000, 0.24800000],
... [-0.14960000, 0.19370000, 0.66510000]])
>>> camera_space_to_sRGB(RGB, M_XYZ_to_camera_space) # doctest: +ELLIPSIS
array([ 0.7564350..., 0.8683155..., 0.6044706...])
"""
return camera_space_to_RGB(
RGB, M_XYZ_to_camera_space, RGB_COLOURSPACES["sRGB"].matrix_RGB_to_XYZ
)
| colour-science/colour-hdri | colour_hdri/models/rgb.py | Python | bsd-3-clause | 3,252 |
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2012 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: zunzun@zunzun.com
# web: http://zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
# Version info: $Id: Peak.py 1 2012-01-07 22:20:43Z zunzun.com@gmail.com $
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(over = 'raise', divide = 'raise', invalid = 'raise', under = 'ignore') # numpy raises warnings, convert to exceptions to trap them
import pyeq2.Model_2D_BaseClass
class Hamilton(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Hamilton"
_HTML = "Vb = Gb * (I/mu)<sup>ln(mu/I)/(B*B)</sup> + (Vb<sub>max</sub> * I)/(I + sigma_b)"
_leftSideHTML = 'Vb'
_coefficientDesignators = ['Gb', 'mu', 'B', 'Vbmax', 'sigma_b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def __init__(self, inFittingTarget = 'SSQABS', inExtendedVersionName = 'Default'):
pyeq2.Model_2D_BaseClass.Model_2D_BaseClass.__init__(self, inFittingTarget, inExtendedVersionName)
self.lowerCoefficientBounds = [0.0, 0.0, 0.0, 0.0, 0.0]
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
Gb = inCoeffs[0]
mu = inCoeffs[1]
B = inCoeffs[2]
Vbmax = inCoeffs[3]
sigma_b = inCoeffs[4]
try:
temp = Gb * numpy.power(x_in / mu, numpy.log(mu/x_in)/(B*B)) + (Vbmax * x_in) / (x_in + sigma_b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = Gb * pow(x_in / mu, log(mu/x_in)/(B*B)) + (Vbmax * x_in) / (x_in + sigma_b);\n"
return s
class ArnoldCohenLogNormalShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Arnold Cohen Log-Normal Peak Shifted"
_HTML = 'y = a * (exp(-0.5 * ((ln(x-f)-b)/c)<sup>2</sup>)) / (d * (x-g))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = (a * numpy.exp(-0.5 * numpy.power((numpy.log(x_in-f)-b) / c, 2.0))) / (d * (x_in-g))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a * exp(-0.5 * pow((log(x_in-f)-b) / c, 2.0))) / (d * (x_in-g));\n"
return s
class ArnoldCohenTwoParameterLogNormalShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Arnold Cohen Two-Parameter Log-Normal Peak Shifted"
_HTML = 'y = exp(-0.5 * ((ln(x-d)-b)/c)<sup>2</sup>) / (sqrt(2*pi) * c * (x-f))'
_leftSideHTML = 'y'
_coefficientDesignators = ['b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
sqrt2pi = numpy.sqrt(2.0 * numpy.pi)
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
b = inCoeffs[0]
c = inCoeffs[1]
d = inCoeffs[2]
f = inCoeffs[3]
try:
temp = numpy.exp(-0.5 * numpy.power((numpy.log(x_in-d)-b) / c, 2.0)) / (self.sqrt2pi * c * (x_in-f))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = exp(-0.5 * pow((log(x_in-d)-b) / c, 2.0)) / (2.506628274631000502415765284811 * c * (x_in-f));\n"
return s
class BoxLucasA(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Box Lucas A"
_HTML = 'y = a * (1.0 - b<sup>x</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def __init__(self, inFittingTarget = 'SSQABS', inExtendedVersionName = 'Default'):
pyeq2.Model_2D_BaseClass.Model_2D_BaseClass.__init__(self, inFittingTarget, inExtendedVersionName)
self.lowerCoefficientBounds = [-1.0E300, 1.0E-300]
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * (1.0 - numpy.power(b, x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * (1.0 - pow(b, x_in));\n"
return s
class BoxLucasAShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Box Lucas A Shifted"
_HTML = 'y = a * (1.0 - b<sup>x-c</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * (1.0 - numpy.power(b, x_in - c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * (1.0 - pow(b, x_in - c));\n"
return s
class BoxLucasB(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Box Lucas B"
_HTML = 'y = a * (1.0 - exp(-bx))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.NegX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_NegX = inDataCacheDictionary['NegX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * (1.0 - numpy.exp(b * x_NegX))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * (1.0 - exp(-1.0 * b * x_in));\n"
return s
class BoxLucasBShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Box Lucas B Shifted"
_HTML = 'y = a * (1.0 - exp(-b(x-c)))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * (1.0 - numpy.exp(-1.0 * b * (x_in - c)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * (1.0 - exp(-1.0 * b * (x_in - c)));\n"
return s
class BoxLucasC(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Box Lucas C"
_HTML = 'y = (a / (a-b)) * (exp(-bx) - exp(-ax))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = (a / (a-b)) * (numpy.exp(-1.0 * b *x_in) - numpy.exp(-1.0 * a * x_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a / (a-b)) * (exp(-1.0 * b *x_in) - exp(-1.0 * a * x_in));\n"
return s
class BoxLucasCShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Box Lucas C shifted"
_HTML = 'y = (a / (a-b)) * (exp(-b(x-c)) - exp(-a(x-c)))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = (a / (a-b)) * (numpy.exp(-1.0 * b * (x_in-c)) - numpy.exp(-1.0 * a * (x_in-c)))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a / (a-b)) * (exp(-1.0 * b * (x_in-c)) - exp(-1.0 * a * (x_in-c)));\n"
return s
class ExtremeValue(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Extreme Value Peak"
_HTML = 'y = a * exp(-exp(-((x-b)/c))-((x-b)/c)+1.0)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.exp(-1.0 * numpy.exp(-1.0 * ((x_in-b)/c))-((x_in-b)/c) + 1.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-1.0 * exp(-1.0 * ((x_in-b)/c))-((x_in-b)/c) + 1.0);\n"
return s
class Gaussian(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Gaussian Peak"
_HTML = 'y = a * exp(-0.5 * ((x-b)/c)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.exp(-0.5 * numpy.power((x_in-b) / c, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow((x_in-b) / c, 2.0));\n"
return s
class Gaussian_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Gaussian Peak Modified"
_HTML = 'y = a * exp(-0.5 * ((x-b)/c)<sup>d</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * numpy.exp(-0.5 * numpy.power((x_in-b) / c, d))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow((x_in-b) / c, d));\n"
return s
class LogNormal(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Log-Normal Peak"
_HTML = 'y = a * exp(-0.5 * ((ln(x)-b)/c)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.exp(-0.5 * numpy.power((x_LogX-b) / c, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow((log(x_in)-b) / c, 2.0));\n"
return s
class LogNormalShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Log-Normal Peak Shifted"
_HTML = 'y = a * exp(-0.5 * ((ln(x-d)-b)/c)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * numpy.exp(-0.5 * numpy.power((numpy.log(x_in-d)-b) / c, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow((log(x_in-d)-b) / c, 2.0));\n"
return s
class LogNormal_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Log-Normal Peak Modified"
_HTML = 'y = a * exp(-0.5 * ((ln(x)-b)/c)<sup>d</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * numpy.exp(-0.5 * numpy.power((x_LogX-b) / c, d))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow((log(x_in)-b) / c, d));\n"
return s
class LogNormal_ModifiedShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Log-Normal Peak Modified Shifted"
_HTML = 'y = a * exp(-0.5 * ((ln(x-e)-b)/c)<sup>d</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'e']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
e = inCoeffs[4]
try:
temp = a * numpy.exp(-0.5 * numpy.power((numpy.log(x_in-e)-b) / c, d))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow((log(x_in-e)-b) / c, d));\n"
return s
class Logistic(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Logistic Peak"
_HTML = 'y = 4a * exp(-1.0 * (x-b) / c) / (1.0 + exp(-1.0 * (x-b) / c))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = 4.0 * a * numpy.exp(-1.0 * (x_in - b) / c) / (1.0 + numpy.exp(-1.0 * (x_in - b) / c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 4.0 * a * exp(-1.0 * (x_in - b) / c) / (1.0 + exp(-1.0 * (x_in - b) / c));\n"
return s
class LorentzianModifiedPeakA(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Modified Peak A"
_HTML = 'y = 1.0 / (1.0 + (x-a)<sup>b</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = 1.0 / (1.0 + numpy.power(x_in-a, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (1.0 + pow(x_in-a, 2.0));\n"
return s
class LorentzianModifiedPeakB(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Modified Peak B"
_HTML = 'y = 1.0 / (a + (x-b)<sup>c</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = 1.0 / (a + numpy.power(x_in-b, c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (a + pow(x_in-b, c));\n"
return s
class LorentzianModifiedPeakC(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Modified Peak C"
_HTML = 'y = a / (b + (x-c)<sup>d</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a/ (b + numpy.power(x_in-c, d))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a/ (b + pow(x_in-c, d));\n"
return s
class LorentzianModifiedPeakD(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Modified Peak D"
_HTML = 'y = 1.0 / (1.0 + ((x-a)/b)<sup>c</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = 1.0 / (1.0 + numpy.power((x_in-a) / b, c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (1.0 + pow((x_in-a) / b, c));\n"
return s
class LorentzianModifiedPeakE(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Modified Peak E"
_HTML = 'y = 1.0 / (a + ((x-b)/c)<sup>d</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = 1.0 / (a + numpy.power((x_in-b)/c, d))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (a + pow((x_in-b)/c, d));\n"
return s
class LorentzianModifiedPeakF(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Modified Peak F"
_HTML = 'y = a / (b + ((x-c)/d)<sup>f</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a / (b + numpy.power((x_in-c)/d, f))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a / (b + pow((x_in-c)/d, f));\n"
return s
class LorentzianPeakA(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Peak A"
_HTML = 'y = 1.0 / (1.0 + (x-a)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = 1.0 / (1.0 + numpy.power(x_in-a, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (1.0 + pow(x_in-a, 2.0));\n"
return s
class LorentzianPeakB(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Peak B"
_HTML = 'y = 1.0 / (a + (x-b)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = 1.0 / (a + numpy.power(x_in-b, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (a + pow(x_in-b, 2.0));\n"
return s
class LorentzianPeakC(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Peak C"
_HTML = 'y = a / (b + (x-c)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a/ (b + numpy.power(x_in-c, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a/ (b + pow(x_in-c, 2.0));\n"
return s
class LorentzianPeakD(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Peak D"
_HTML = 'y = 1.0 / (1.0 + ((x-a)/b)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = 1.0 / (1.0 + numpy.power((x_in-a) / b, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (1.0 + pow((x_in-a) / b, 2.0));\n"
return s
class LorentzianPeakE(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Peak E"
_HTML = 'y = 1.0 / (a + ((x-b)/c)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = 1.0 / (a + numpy.power((x_in-b)/c, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 1.0 / (a + pow((x_in-b)/c, 2.0));\n"
return s
class LorentzianPeakF(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Lorentzian Peak F"
_HTML = 'y = a / (b + ((x-c)/d)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a/ (b + numpy.power((x_in-c)/d, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a/ (b + pow((x_in-c)/d, 2.0));\n"
return s
class PseudoVoight(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Pseudo-Voight Peak"
_HTML = 'y = a * (d * (1/(1+((x-b)/c)<sup>2</sup>)) + (1-d) * exp(-0.5 * ((x-b)/c)<sup>2</sup>))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = numpy.power((x_in-b) / c, 2.0)
temp = a * (d * (1.0 / (1.0 + temp)) + (1.0-d) * numpy.exp(-0.5 * temp))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow((x_in-b) / c, 2.0);\n"
s = "\ttemp = a * (d * (1.0 / (1.0 + temp)) + (1.0-d) * exp(-0.5 * temp));\n"
return s
class PseudoVoight_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Pseudo-Voight Peak Modified"
_HTML = 'y = a * (d * (1/(1+((x-b)/c)<sup>e</sup>)) + (1-d) * exp(-0.5 * ((x-b)/c)<sup>f</sup>))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = numpy.power((x_in-b) / c, f)
temp = a * (d * (1.0 / (1.0 + temp)) + (1.0-d) * numpy.exp(-0.5 * temp))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = pow((x_in-b) / c, f);\n"
s = "\ttemp = a * (d * (1.0 / (1.0 + temp)) + (1.0-d) * exp(-0.5 * temp));\n"
return s
class Pulse(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Pulse Peak"
_HTML = 'y = 4a * exp(-(x-b)/c) * (1.0 - exp(-(x-b)/c))'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp1 = numpy.exp(-1.0 * (x_in-b) / c)
temp = 4.0 * a * temp1 * (1.0 - temp1)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 4.0 * a * exp(-1.0 * (x_in-b) / c) * (1.0 - exp(-1.0 * (x_in-b) / c));\n"
return s
class WeibullPeak(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Weibull Peak"
_HTML = 'y = a * exp(-0.5 * (ln(x/b)/c)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.exp(-0.5 * numpy.power(numpy.log(x_in/b) / c, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow(log(x_in/b) / c, 2.0));\n"
return s
class WeibullPeakShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Weibull Peak Shifted"
_HTML = 'y = a * exp(-0.5 * (ln((x-d)/b)/c)<sup>2</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * numpy.exp(-0.5 * numpy.power(numpy.log((x_in-d)/b) / c, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow(log((x_in-d)/b) / c, 2.0));\n"
return s
class WeibullPeak_Modified(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Weibull Peak Modified"
_HTML = 'y = a * exp(-0.5 * (ln(x/b)/c)<sup>d</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
try:
temp = a * numpy.exp(-0.5 * numpy.power(numpy.log(x_in/b) / c, d))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow(log(x_in/b) / c, d));\n"
return s
class WeibullPeak_ModifiedShifted(pyeq2.Model_2D_BaseClass.Model_2D_BaseClass):
_baseName = "Weibull Peak Modified Shifted"
_HTML = 'y = a * exp(-0.5 * (ln((x-e)/b)/c)<sup>d</sup>)'
_leftSideHTML = 'y'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'e']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
e = inCoeffs[4]
try:
temp = a * numpy.exp(-0.5 * numpy.power(numpy.log((x_in-e)/b) / c, d))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * exp(-0.5 * pow(log((x_in-e)/b) / c, d));\n"
return s
| JMoravec/unkRadnet | fitToCurve/pyeq2/Models_2D/Peak.py | Python | bsd-3-clause | 66,908 |
im = open('006993_photoA.tif', 'rb')
ord(im.read(1))
chr(ord(im.read(1))) | vincentdavis/corrupt_image_finder | test_images/RandomMutateImage.py | Python | bsd-3-clause | 73 |
"""MNE software for MEG and EEG data analysis."""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.devN' where N is an integer.
#
from ._version import __version__
# have to import verbose first since it's needed by many things
from .utils import (set_log_level, set_log_file, verbose, set_config,
get_config, get_config_path, set_cache_dir,
set_memmap_min_size, grand_average, sys_info, open_docs)
from .io.pick import (pick_types, pick_channels,
pick_channels_regexp, pick_channels_forward,
pick_types_forward, pick_channels_cov,
pick_channels_evoked, pick_info,
channel_type, channel_indices_by_type)
from .io.base import concatenate_raws
from .io.meas_info import create_info, Info
from .io.proj import Projection
from .io.kit import read_epochs_kit
from .io.eeglab import read_epochs_eeglab
from .io.reference import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from .io.what import what
from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_surfaces, write_bem_surfaces, write_head_bem,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance,
compute_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps, AcqParserFIF)
from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, Forward,
write_forward_solution, make_forward_solution,
convert_forward_solution, make_field_map,
make_forward_dipole, use_coil_def)
from .source_estimate import (read_source_estimate,
SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, VolVectorSourceEstimate,
MixedSourceEstimate, MixedVectorSourceEstimate,
grade_to_tris,
spatial_src_adjacency,
spatial_tris_adjacency,
spatial_dist_adjacency,
spatial_inter_hemi_adjacency,
spatio_temporal_src_adjacency,
spatio_temporal_tris_adjacency,
spatio_temporal_dist_adjacency,
extract_label_time_course, stc_near_sensors)
from .surface import (read_surface, write_surface, decimate_surface, read_tri,
read_morph_map, get_head_surf, get_meg_helmet_surf,
dig_mri_distances)
from .morph import (SourceMorph, read_source_morph, grade_to_vertices,
compute_source_morph)
from .source_space import (read_source_spaces, vertex_to_mni,
head_to_mni, head_to_mri, read_talxfm,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
add_source_space_distances, morph_source_spaces,
get_volume_labels_from_aseg,
get_volume_labels_from_src, read_freesurfer_lut)
from .annotations import (Annotations, read_annotations, annotations_from_events,
events_from_annotations)
from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs,
concatenate_epochs, make_fixed_length_epochs)
from .evoked import Evoked, EvokedArray, read_evokeds, write_evokeds, combine_evoked
from .label import (read_label, label_sign_flip,
write_label, stc_to_label, grow_labels, Label, split_label,
BiHemiLabel, read_labels_from_annot, write_labels_to_annot,
random_parcellation, morph_labels, labels_to_stc)
from .misc import parse_config, read_reject_parameters
from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
scale_source_space)
from .transforms import (read_trans, write_trans,
transform_surface_to, Transform)
from .proj import (read_proj, write_proj, compute_proj_epochs,
compute_proj_evoked, compute_proj_raw, sensitivity_map)
from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole
from .channels import (equalize_channels, rename_channels, find_layout,
read_vectorview_selection)
from .report import Report, open_report
from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff
from .rank import compute_rank
from . import beamformer
from . import channels
from . import chpi
from . import commands
from . import connectivity
from . import coreg
from . import cuda
from . import datasets
from . import dipole
from . import epochs
from . import event
from . import externals
from . import io
from . import filter
from . import gui
from . import inverse_sparse
from . import minimum_norm
from . import preprocessing
from . import simulation
from . import stats
from . import surface
from . import time_frequency
from . import viz
from . import decoding
# deprecations
from .utils import deprecated_alias
deprecated_alias('read_selection', read_vectorview_selection)
# initialize logging
set_log_level(None, False)
set_log_file()
| kambysese/mne-python | mne/__init__.py | Python | bsd-3-clause | 5,932 |
""" pytest fixtures for test suite """
import pytest
import sqlalchemy as sa
import sqlalchemy.orm as orm
import testing.postgresql
import temporal_sqlalchemy as temporal
from . import models
@pytest.yield_fixture(scope='session')
def engine():
"""Creates a postgres database for testing, returns a sqlalchemy engine"""
db = testing.postgresql.Postgresql()
engine_ = sa.create_engine(db.url())
yield engine_
engine_.dispose()
db.stop()
@pytest.yield_fixture(scope='session')
def connection(engine): # pylint: disable=redefined-outer-name
"""Session-wide test database."""
conn = engine.connect()
for extension in ['uuid-ossp', 'btree_gist']:
conn.execute("""\
CREATE EXTENSION IF NOT EXISTS "%s"
WITH SCHEMA pg_catalog
""" % extension)
for schema in [models.SCHEMA, models.TEMPORAL_SCHEMA]:
conn.execute('CREATE SCHEMA IF NOT EXISTS ' + schema)
models.basic_metadata.create_all(conn)
yield conn
conn.close()
@pytest.yield_fixture(scope="session")
def sessionmaker():
""" yields a temporalized sessionmaker -- per test session """
Session = orm.sessionmaker()
yield temporal.temporal_session(Session)
Session.close_all()
@pytest.yield_fixture()
def session(connection: sa.engine.Connection, sessionmaker: orm.sessionmaker): # pylint: disable=redefined-outer-name
""" yields temporalized session -- per test """
transaction = connection.begin()
sess = sessionmaker(bind=connection)
yield sess
transaction.rollback()
sess.close()
| CloverHealth/temporal-sqlalchemy | temporal_sqlalchemy/tests/conftest.py | Python | bsd-3-clause | 1,589 |
"""
drslib exceptions
"""
class TranslationError(Exception):
pass
| ESGF/esgf-drslib | drslib/exceptions.py | Python | bsd-3-clause | 73 |
import logging
import os
import unittest
from unittest.mock import Mock, patch
from flask import Flask
from flask_appbuilder import AppBuilder, SQLA
from flask_appbuilder.security.manager import AUTH_LDAP
import jinja2
import ldap
from mockldap import MockLdap
from ..const import USERNAME_ADMIN, USERNAME_READONLY
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(name)s:%(message)s")
logging.getLogger().setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
class LDAPSearchTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mockldap = MockLdap(cls.directory)
@classmethod
def tearDownClass(cls):
del cls.mockldap
def setUp(self):
# start MockLdap
self.mockldap.start()
self.ldapobj = self.mockldap["ldap://localhost/"]
# start Flask
self.app = Flask(__name__)
self.app.jinja_env.undefined = jinja2.StrictUndefined
self.app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get(
"SQLALCHEMY_DATABASE_URI"
)
self.app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
self.app.config["AUTH_TYPE"] = AUTH_LDAP
self.app.config["AUTH_LDAP_SERVER"] = "ldap://localhost/"
self.app.config["AUTH_LDAP_UID_FIELD"] = "uid"
self.app.config["AUTH_LDAP_FIRSTNAME_FIELD"] = "givenName"
self.app.config["AUTH_LDAP_LASTNAME_FIELD"] = "sn"
self.app.config["AUTH_LDAP_EMAIL_FIELD"] = "email"
# start Database
self.db = SQLA(self.app)
def tearDown(self):
# Remove test user
user_alice = self.appbuilder.sm.find_user("alice")
if user_alice:
self.db.session.delete(user_alice)
self.db.session.commit()
user_natalie = self.appbuilder.sm.find_user("natalie")
if user_natalie:
self.db.session.delete(user_natalie)
self.db.session.commit()
# stop MockLdap
self.mockldap.stop()
del self.ldapobj
# stop Flask
self.app = None
# stop Flask-AppBuilder
self.appbuilder = None
# stop Database
self.db.session.remove()
self.db = None
def assertOnlyDefaultUsers(self):
users = self.appbuilder.sm.get_all_users()
user_names = [user.username for user in users]
self.assertEqual(user_names, [USERNAME_ADMIN, USERNAME_READONLY])
# ----------------
# LDAP Directory
# ----------------
top = ("o=test", {"o": ["test"]})
ou_users = ("ou=users,o=test", {"ou": ["users"]})
ou_groups = ("ou=groups,o=test", {"ou": ["groups"]})
user_admin = (
"uid=admin,ou=users,o=test",
{"uid": ["admin"], "userPassword": ["admin_password"]},
)
user_alice = (
"uid=alice,ou=users,o=test",
{
"uid": ["alice"],
"userPassword": ["alice_password"],
"memberOf": [b"cn=staff,ou=groups,o=test"],
"givenName": [b"Alice"],
"sn": [b"Doe"],
"email": [b"alice@example.com"],
},
)
user_natalie = (
"uid=natalie,ou=users,o=test",
{
"uid": ["natalie"],
"userPassword": ["natalie_password"],
"memberOf": [
b"cn=staff,ou=groups,o=test",
b"cn=admin,ou=groups,o=test",
b"cn=exec,ou=groups,o=test",
],
"givenName": [b"Natalie"],
"sn": [b"Smith"],
"email": [b"natalie@example.com"],
},
)
group_admins = (
"cn=admins,ou=groups,o=test",
{"cn": ["admins"], "member": [user_admin[0]]},
)
group_staff = (
"cn=staff,ou=groups,o=test",
{"cn": ["staff"], "member": [user_alice[0]]},
)
directory = dict(
[
top,
ou_users,
ou_groups,
user_admin,
user_alice,
user_natalie,
group_admins,
group_staff,
]
)
# ----------------
# LDAP Queries
# ----------------
call_initialize = ("initialize", tuple(["ldap://localhost/"]), {})
call_set_option = ("set_option", tuple([ldap.OPT_REFERRALS, 0]), {})
call_bind_admin = (
"simple_bind_s",
tuple(["uid=admin,ou=users,o=test", "admin_password"]),
{},
)
call_bind_alice = (
"simple_bind_s",
tuple(["uid=alice,ou=users,o=test", "alice_password"]),
{},
)
call_bind_natalie = (
"simple_bind_s",
tuple(["uid=natalie,ou=users,o=test", "natalie_password"]),
{},
)
call_search_alice = (
"search_s",
tuple(["ou=users,o=test", 2, "(uid=alice)", ["givenName", "sn", "email"]]),
{},
)
call_search_alice_memberof = (
"search_s",
tuple(
[
"ou=users,o=test",
2,
"(uid=alice)",
["givenName", "sn", "email", "memberOf"],
]
),
{},
)
call_search_natalie_memberof = (
"search_s",
tuple(
[
"ou=users,o=test",
2,
"(uid=natalie)",
["givenName", "sn", "email", "memberOf"],
]
),
{},
)
call_search_alice_filter = (
"search_s",
tuple(
[
"ou=users,o=test",
2,
"(&(memberOf=cn=staff,ou=groups,o=test)(uid=alice))",
["givenName", "sn", "email"],
]
),
{},
)
# ----------------
# Unit Tests
# ----------------
def test___search_ldap(self):
"""
LDAP: test `_search_ldap` method
"""
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# prepare `con` object
con = ldap.initialize("ldap://localhost/")
sm._ldap_bind_indirect(ldap, con)
# run `_search_ldap` method
user_dn, user_attributes = sm._search_ldap(ldap, con, "alice")
# validate - search returned expected data
self.assertEqual(user_dn, self.user_alice[0])
self.assertEqual(user_attributes["givenName"], self.user_alice[1]["givenName"])
self.assertEqual(user_attributes["sn"], self.user_alice[1]["sn"])
self.assertEqual(user_attributes["email"], self.user_alice[1]["email"])
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[self.call_initialize, self.call_bind_admin, self.call_search_alice],
)
def test___search_ldap_filter(self):
"""
LDAP: test `_search_ldap` method (with AUTH_LDAP_SEARCH_FILTER)
"""
# MockLdap needs non-bytes for search filters, so we patch `memberOf`
# to a string, only for this test
with patch.dict(
self.directory[self.user_alice[0]],
{
"memberOf": [
i.decode() for i in self.directory[self.user_alice[0]]["memberOf"]
]
},
):
_mockldap = MockLdap(self.directory)
_mockldap.start()
_ldapobj = _mockldap["ldap://localhost/"]
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config[
"AUTH_LDAP_SEARCH_FILTER"
] = "(memberOf=cn=staff,ou=groups,o=test)"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# prepare `con` object
con = ldap.initialize("ldap://localhost/")
sm._ldap_bind_indirect(ldap, con)
# run `_search_ldap` method
user_dn, user_info = sm._search_ldap(ldap, con, "alice")
# validate - search returned expected data
self.assertEqual(user_dn, self.user_alice[0])
self.assertEqual(user_info["givenName"], self.user_alice[1]["givenName"])
self.assertEqual(user_info["sn"], self.user_alice[1]["sn"])
self.assertEqual(user_info["email"], self.user_alice[1]["email"])
# validate - expected LDAP methods were called
self.assertEqual(
_ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_bind_admin,
self.call_search_alice_filter,
],
)
def test___search_ldap_with_search_referrals(self):
"""
LDAP: test `_search_ldap` method w/returned search referrals
"""
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# run `_search_ldap` method w/mocked ldap connection
mock_con = Mock()
mock_con.search_s.return_value = [
(
None,
[
"ldap://ForestDnsZones.mycompany.com/"
"DC=ForestDnsZones,DC=mycompany,DC=com"
],
),
self.user_alice,
(None, ["ldap://mycompany.com/CN=Configuration,DC=mycompany,DC=com"]),
]
user_dn, user_attributes = sm._search_ldap(ldap, mock_con, "alice")
# validate - search returned expected data
self.assertEqual(user_dn, self.user_alice[0])
self.assertEqual(user_attributes["givenName"], self.user_alice[1]["givenName"])
self.assertEqual(user_attributes["sn"], self.user_alice[1]["sn"])
self.assertEqual(user_attributes["email"], self.user_alice[1]["email"])
mock_con.search_s.assert_called()
def test__missing_credentials(self):
"""
LDAP: test login flow for - missing credentials
"""
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# validate - login failure (missing username)
self.assertIsNone(sm.auth_user_ldap(None, "password"))
self.assertIsNone(sm.auth_user_ldap("", "password"))
# validate - login failure (missing password)
self.assertIsNone(sm.auth_user_ldap("username", None))
self.assertIsNone(sm.auth_user_ldap("username", ""))
# validate - login failure (missing username/password)
self.assertIsNone(sm.auth_user_ldap(None, None))
self.assertIsNone(sm.auth_user_ldap("", None))
self.assertIsNone(sm.auth_user_ldap("", ""))
self.assertIsNone(sm.auth_user_ldap(None, ""))
# validate - no users were created
self.assertOnlyDefaultUsers()
# validate - expected LDAP methods were called
self.assertEqual(self.ldapobj.methods_called(with_args=True), [])
def test__inactive_user(self):
"""
LDAP: test login flow for - inactive user
"""
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user(
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# set user inactive
new_user.active = False
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was not allowed to log in
self.assertIsNone(user)
# validate - expected LDAP methods were called
self.assertEqual(self.ldapobj.methods_called(with_args=True), [])
def test__multi_group_user_mapping_to_same_role(self):
"""
LDAP: test login flow for - user in multiple groups mapping to same role
"""
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["Admin"],
"cn=admin,ou=groups,o=test": ["Admin", "User"],
"cn=exec,ou=groups,o=test": ["Public"],
}
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("natalie", "natalie_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# validate - user was given the correct roles
self.assertListEqual(
user.roles,
[sm.find_role("Admin"), sm.find_role("Public"), sm.find_role("User")],
)
# validate - user was given the correct attributes (read from LDAP)
self.assertEqual(user.first_name, "Natalie")
self.assertEqual(user.last_name, "Smith")
self.assertEqual(user.email, "natalie@example.com")
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_natalie,
self.call_search_natalie_memberof,
],
)
def test__direct_bind__unregistered(self):
"""
LDAP: test login flow for - direct bind - unregistered user
"""
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# validate - user was given the AUTH_USER_REGISTRATION_ROLE role
self.assertEqual(user.roles, [sm.find_role("Public")])
# validate - user was given the correct attributes (read from LDAP)
self.assertEqual(user.first_name, "Alice")
self.assertEqual(user.last_name, "Doe")
self.assertEqual(user.email, "alice@example.com")
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_alice,
self.call_search_alice,
],
)
def test__direct_bind__unregistered__no_self_register(self):
"""
LDAP: test login flow for - direct bind - unregistered user - no self-registration
"""
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.app.config["AUTH_USER_REGISTRATION"] = False
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was not allowed to log in
self.assertIsNone(user)
# validate - no users were registered
self.assertOnlyDefaultUsers()
# validate - expected LDAP methods were called
self.assertEqual(self.ldapobj.methods_called(with_args=True), [])
def test__direct_bind__unregistered__no_search(self):
"""
LDAP: test login flow for - direct bind - unregistered user - no ldap search
"""
self.app.config["AUTH_LDAP_SEARCH"] = None
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was NOT allowed to log in (because registration requires search)
self.assertIsNone(user)
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[self.call_initialize, self.call_set_option, self.call_bind_alice],
)
def test__direct_bind__registered(self):
"""
LDAP: test login flow for - direct bind - registered user
"""
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_alice,
self.call_search_alice,
],
)
def test__direct_bind__registered__no_search(self):
"""
LDAP: test login flow for - direct bind - registered user - no ldap search
"""
self.app.config["AUTH_LDAP_SEARCH"] = None
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in (because they are already registered)
self.assertIsInstance(user, sm.user_model)
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[self.call_initialize, self.call_set_option, self.call_bind_alice],
)
def test__indirect_bind__unregistered(self):
"""
LDAP: test login flow for - indirect bind - unregistered user
"""
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# validate - user was given the AUTH_USER_REGISTRATION_ROLE role
self.assertListEqual(user.roles, [sm.find_role("Public")])
# validate - user was given the correct attributes (read from LDAP)
self.assertEqual(user.first_name, "Alice")
self.assertEqual(user.last_name, "Doe")
self.assertEqual(user.email, "alice@example.com")
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_admin,
self.call_search_alice,
self.call_bind_alice,
],
)
def test__indirect_bind__unregistered__no_self_register(self):
"""
LDAP: test login flow for - indirect bind - unregistered user - no self-registration
""" # noqa
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_USER_REGISTRATION"] = False
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was not allowed to log in
self.assertIsNone(user)
# validate - no users were registered
self.assertOnlyDefaultUsers()
# validate - expected LDAP methods were called
self.assertEqual(self.ldapobj.methods_called(with_args=True), [])
def test__indirect_bind__unregistered__no_search(self):
"""
LDAP: test login flow for - indirect bind - unregistered user - no ldap search
"""
self.app.config["AUTH_LDAP_SEARCH"] = None
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was NOT allowed to log in
# (because indirect bind requires search)
self.assertIsNone(user)
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[self.call_initialize, self.call_set_option, self.call_bind_admin],
)
def test__indirect_bind__registered(self):
"""
LDAP: test login flow for - indirect bind - registered user
"""
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_admin,
self.call_search_alice,
self.call_bind_alice,
],
)
def test__indirect_bind__registered__no_search(self):
"""
LDAP: test login flow for - indirect bind - registered user - no ldap search
"""
self.app.config["AUTH_LDAP_SEARCH"] = None
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was NOT allowed to log in
# (because indirect bind requires search)
self.assertIsNone(user)
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[self.call_initialize, self.call_set_option, self.call_bind_admin],
)
def test__direct_bind__unregistered__single_role(self):
"""
LDAP: test login flow for - direct bind - unregistered user - single role mapping
"""
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["User"],
"cn=admins,ou=groups,o=test": ["Admin"],
}
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# validate - user was given the correct roles
self.assertListEqual(user.roles, [sm.find_role("Public"), sm.find_role("User")])
# validate - user was given the correct attributes (read from LDAP)
self.assertEqual(user.first_name, "Alice")
self.assertEqual(user.last_name, "Doe")
self.assertEqual(user.email, "alice@example.com")
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_alice,
self.call_search_alice_memberof,
],
)
def test__direct_bind__unregistered__multi_role(self):
"""
LDAP: test login flow for - direct bind - unregistered user - multi role mapping
"""
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["Admin", "User"]
}
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# validate - user was given the correct roles
self.assertListEqual(
user.roles,
[sm.find_role("Admin"), sm.find_role("Public"), sm.find_role("User")],
)
# validate - user was given the correct attributes (read from LDAP)
self.assertEqual(user.first_name, "Alice")
self.assertEqual(user.last_name, "Doe")
self.assertEqual(user.email, "alice@example.com")
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_alice,
self.call_search_alice_memberof,
],
)
def test__direct_bind__registered__multi_role__no_role_sync(self):
"""
LDAP: test login flow for - direct bind - registered user - multi role mapping - no login role-sync
""" # noqa
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["Admin", "User"]
}
self.app.config["AUTH_ROLES_SYNC_AT_LOGIN"] = False
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was given no roles
self.assertListEqual(user.roles, [])
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_alice,
self.call_search_alice_memberof,
],
)
def test__direct_bind__registered__multi_role__with_role_sync(self):
"""
LDAP: test login flow for - direct bind - registered user - multi role mapping - with login role-sync
""" # noqa
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["Admin", "User"]
}
self.app.config["AUTH_ROLES_SYNC_AT_LOGIN"] = True
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_USERNAME_FORMAT"] = "uid=%s,ou=users,o=test"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was given the correct roles
self.assertListEqual(user.roles, [sm.find_role("Admin"), sm.find_role("User")])
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_alice,
self.call_search_alice_memberof,
],
)
def test__indirect_bind__unregistered__single_role(self):
"""
LDAP: test login flow for - indirect bind - unregistered user - single role mapping
""" # noqa
self.app.config["AUTH_ROLES_MAPPING"] = {"cn=staff,ou=groups,o=test": ["User"]}
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# validate - user was given the correct roles
self.assertListEqual(user.roles, [sm.find_role("Public"), sm.find_role("User")])
# validate - user was given the correct attributes (read from LDAP)
self.assertEqual(user.first_name, "Alice")
self.assertEqual(user.last_name, "Doe")
self.assertEqual(user.email, "alice@example.com")
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_admin,
self.call_search_alice_memberof,
self.call_bind_alice,
],
)
def test__indirect_bind__unregistered__multi_role(self):
"""
LDAP: test login flow for - indirect bind - unregistered user - multi role mapping
"""
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["Admin", "User"]
}
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.app.config["AUTH_USER_REGISTRATION"] = True
self.app.config["AUTH_USER_REGISTRATION_ROLE"] = "Public"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# validate - user was given the correct roles
self.assertListEqual(
user.roles,
[sm.find_role("Admin"), sm.find_role("Public"), sm.find_role("User")],
)
# validate - user was given the correct attributes (read from LDAP)
self.assertEqual(user.first_name, "Alice")
self.assertEqual(user.last_name, "Doe")
self.assertEqual(user.email, "alice@example.com")
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_admin,
self.call_search_alice_memberof,
self.call_bind_alice,
],
)
def test__indirect_bind__registered__multi_role__no_role_sync(self):
"""
LDAP: test login flow for - indirect bind - registered user - multi role mapping - no login role-sync
""" # noqa
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["Admin", "User"]
}
self.app.config["AUTH_ROLES_SYNC_AT_LOGIN"] = False
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was given no roles
self.assertListEqual(user.roles, [])
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_admin,
self.call_search_alice_memberof,
self.call_bind_alice,
],
)
def test__indirect_bind__registered__multi_role__with_role_sync(self):
"""
LDAP: test login flow for - indirect bind - registered user - multi role mapping - with login role-sync
""" # noqa
self.app.config["AUTH_ROLES_MAPPING"] = {
"cn=staff,ou=groups,o=test": ["Admin", "User"]
}
self.app.config["AUTH_ROLES_SYNC_AT_LOGIN"] = True
self.app.config["AUTH_LDAP_SEARCH"] = "ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_USER"] = "uid=admin,ou=users,o=test"
self.app.config["AUTH_LDAP_BIND_PASSWORD"] = "admin_password"
self.appbuilder = AppBuilder(self.app, self.db.session)
sm = self.appbuilder.sm
# add User role
sm.add_role("User")
# validate - no users are registered
self.assertOnlyDefaultUsers()
# register a user
new_user = sm.add_user( # noqa
username="alice",
first_name="Alice",
last_name="Doe",
email="alice@example.com",
role=[],
)
# validate - user was registered
self.assertEqual(len(sm.get_all_users()), 3)
# attempt login
user = sm.auth_user_ldap("alice", "alice_password")
# validate - user was allowed to log in
self.assertIsInstance(user, sm.user_model)
# validate - user was given the correct roles
self.assertListEqual(user.roles, [sm.find_role("Admin"), sm.find_role("User")])
# validate - expected LDAP methods were called
self.assertEqual(
self.ldapobj.methods_called(with_args=True),
[
self.call_initialize,
self.call_set_option,
self.call_bind_admin,
self.call_search_alice_memberof,
self.call_bind_alice,
],
)
| dpgaspar/Flask-AppBuilder | flask_appbuilder/tests/security/test_auth_ldap.py | Python | bsd-3-clause | 41,672 |
import wx
import wx.calendar
from wx.lib.masked import TimeCtrl
from wx.lib.agw import hypertreelist as HTL
from datetime import datetime, time
from lib import Task, DATA, PRIORITIES, DEFAULT_PRIORITY
from decorators import requires_selection
ID_ADD_TASK = 1000
ID_ADD_SUBTASK = 1010
ID_COLLAPSE = 1020
ID_EXPAND = 1030
HIDE_COMPLETE = False
class TaskList(HTL.HyperTreeList):
"""
This is the widget that houses the tasks
"""
def __init__(self, parent):
self.parent = parent
style = wx.SUNKEN_BORDER | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.TR_HIDE_ROOT | wx.TR_FULL_ROW_HIGHLIGHT | wx.TR_ROW_LINES | wx.TR_EDIT_LABELS #| wx.TR_COLUMN_LINES | HTL.TR_AUTO_CHECK_PARENT
HTL.HyperTreeList.__init__(self, parent, -1, style=style)
self.AddColumn('%')
self.AddColumn('!')
self.AddColumn('Task')
self.AddColumn('Due')
self.SetMainColumn(2)
self.root = self.AddRoot('Tasks')
self.GetMainWindow().Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnEndEdit)
self.Bind(HTL.EVT_TREE_ITEM_CHECKED, self.OnItemToggled)
def EvaluateCompleteness(self, item=None):
"""Determines how complete various task trees are"""
pass
def OnEndEdit(self, evt):
print 'Save task?', evt.GetLabel(), evt.GetItem()
task = evt.GetItem().GetData()
if task:
task.summary = evt.GetLabel()
def OnLeftDClick(self, evt):
pt = evt.GetPosition()
item, flags, column = self.HitTest(pt)
if item and (flags & wx.TREE_HITTEST_ONITEMLABEL):
#self.EditLabel(item)
self.parent.EditTask(item)
evt.Skip()
def OnItemToggled(self, evt):
item = evt.GetItem()
task = item.GetData()
if task:
task.is_complete = item.IsChecked()
if HIDE_COMPLETE:
item.Hide(task.is_complete)
self.EvaluateCompleteness()
def SetTasks(self, tasks):
for task in tasks:
self.AddTask(task, refresh=False)
self.Refresh()
self.ExpandAll()
def AddTask(self, task, parent=None, refresh=True):
if parent is None:
parent = self.root
task.parent = parent
item = self.AppendItem(parent, task.summary, ct_type=1)
item.SetData(task)
for child in task.children:
self.AddTask(child, item, refresh=refresh)
if refresh:
self.Refresh()
def Refresh(self, erase=True, rect=None, parent=None):
"""Refreshes the tree when a task has changed"""
if parent is None:
parent = self.root
for child in parent.GetChildren():
task = child.GetData()
if task:
self.SetItemText(child, '0%', 0)
self.SetItemText(child, str(task._priority), 1)
self.SetItemText(child, task.summary, 2)
child.Check(task.is_complete)
if HIDE_COMPLETE:
child.Hide(task.is_complete)
if task.due_date:
self.SetItemText(child, task.due_date.strftime('%H:%M %m/%d/%y'), 3)
else:
self.SetItemText(child, '', 3)
self.Refresh(parent=child)
super(TaskList, self).Refresh()
class TaskInfoDialog(wx.Dialog):
def __init__(self, *args, **kwds):
self.task = kwds.pop('task', None)
kwds['style'] = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.THICK_FRAME
wx.Dialog.__init__(self, *args, **kwds)
self.panel = wx.Panel(self, -1)
self.txtSummary = wx.TextCtrl(self.panel, -1, "")
self.lblNotes = wx.StaticText(self.panel, -1, _('Notes:'), style=wx.ALIGN_RIGHT)
self.txtNotes = wx.TextCtrl(self.panel, -1, "", style=wx.TE_MULTILINE|wx.TE_RICH|wx.TE_WORDWRAP)
self.lblPriority = wx.StaticText(self.panel, -1, _('Priority:'), style=wx.ALIGN_RIGHT)
choices = [p[1] for p in sorted(PRIORITIES.items(), key=lambda p: p[0])]
self.cmbPriority = wx.ComboBox(self.panel, -1, choices=choices, style=wx.CB_DROPDOWN)
self.chkIsComplete = wx.CheckBox(self.panel, -1, _('Is Complete'))
self.lblDateDue = wx.StaticText(self.panel, -1, _('Due:'), style=wx.ALIGN_RIGHT)
self.chkIsDue = wx.CheckBox(self.panel, -1, _('Has due date'))
self.calDueDate = wx.calendar.CalendarCtrl(self.panel, -1)
self.txtTime = TimeCtrl(self.panel, id=-1,
value=datetime.now().strftime('%X'),
style=wx.TE_PROCESS_TAB,
validator=wx.DefaultValidator,
format='24HHMMSS',
fmt24hr=True,
displaySeconds=True,
)
self.__set_properties()
self.__do_layout()
self.chkIsDue.Bind(wx.EVT_CHECKBOX, self.ToggleDueDate)
self.txtSummary.SetFocus()
if self.task is not None:
self.SetTask(self.task)
def __set_properties(self):
self.SetTitle(_('Task Information'))
self.cmbPriority.SetValue(PRIORITIES[DEFAULT_PRIORITY])
self.calDueDate.Enable(False)
self.txtTime.Enable(False)
def __do_layout(self):
mainSizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.FlexGridSizer(5, 2, 5, 5)
lblSubject = wx.StaticText(self.panel, -1, _('Summary:'))
sizer.Add(lblSubject, 0, wx.EXPAND, 0)
sizer.Add(self.txtSummary, 0, wx.ALL|wx.EXPAND, 0)
sizer.Add(self.lblNotes, 0, wx.EXPAND, 0)
sizer.Add(self.txtNotes, 0, wx.EXPAND, 0)
sizer.Add(self.lblPriority, 0, wx.EXPAND, 0)
sizer.Add(self.cmbPriority, 0, wx.EXPAND, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.chkIsComplete, 0, 0, 0)
sizer.Add(self.lblDateDue, 0, wx.ALIGN_RIGHT, 0)
sizer.Add(self.chkIsDue, 0, 0, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.calDueDate, 0, 0, 0)
sizer.Add((20, 20), 0, 0, 0)
sizer.Add(self.txtTime, 0, 0, 0)
self.panel.SetSizer(sizer)
sizer.AddGrowableRow(1)
sizer.AddGrowableCol(1)
mainSizer.Add(self.panel, 1, wx.ALL|wx.EXPAND, 5)
mainSizer.AddF(self.CreateStdDialogButtonSizer(wx.OK|wx.CANCEL),
wx.SizerFlags(0).Expand().Border(wx.BOTTOM|wx.RIGHT, 5))
self.SetSizer(mainSizer)
mainSizer.Fit(self)
self.Layout()
self.Centre()
size = (290, 450)
self.SetMinSize(size)
self.SetSize(size)
def ToggleDueDate(self, evt):
en = self.chkIsDue.IsChecked()
self.calDueDate.Enable(en)
self.txtTime.Enable(en)
def GetTask(self):
if self.task is None:
self.task = Task()
if self.chkIsDue.IsChecked():
due = self.calDueDate.PyGetDate()
tm = self.txtTime.GetValue()
try:
tm = datetime.strptime(tm, '%H:%M:%S').time()
except:
tm = datetime.strptime(tm, '%H:%M').time()
due = datetime.combine(due, tm)
else:
due = None
self.task.summary = self.txtSummary.GetValue()
self.task.is_complete = self.chkIsComplete.IsChecked()
self.task.due_date = due
self.task.priority = self.cmbPriority.GetValue()
self.task.notes = self.txtNotes.GetValue()
return self.task
def SetTask(self, task):
self.txtSummary.SetValue(task.summary)
self.txtNotes.SetValue(task.notes)
self.cmbPriority.SetStringSelection(task.priority)
self.chkIsComplete.SetValue(task.is_complete)
if task.due_date is not None:
self.chkIsDue.SetValue(True)
self.calDueDate.PySetDate(task.due_date)
self.txtTime.SetValue(task.due_date.strftime('%X'))
self.task = task
class TreeDoFrame(wx.Frame):
"""
This is the main TreeDo window, where your tasks are laid out before you.
"""
def __init__(self):
wx.Frame.__init__(self, None, -1, title=_('TreeDo'), size=(350, 500))
self.SetMinSize((300, 300))
self.CenterOnParent()
self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT)
self.toolbar.SetToolBitmapSize((24, 24))
save_img = wx.Bitmap('res/save.png', wx.BITMAP_TYPE_PNG)
add_img = wx.Bitmap('res/add.png', wx.BITMAP_TYPE_PNG)
add_sub_img = wx.Bitmap('res/add_subtask.png', wx.BITMAP_TYPE_PNG)
collapse_img = wx.Bitmap('res/collapse.png', wx.BITMAP_TYPE_PNG)
expand_img = wx.Bitmap('res/expand.png', wx.BITMAP_TYPE_PNG)
delete_img = wx.Bitmap('res/delete.png', wx.BITMAP_TYPE_PNG)
self.toolbar.AddSimpleTool(wx.ID_SAVE, save_img, _('Save Task List'), _('Save the task list to the hard drive'))
self.toolbar.AddSimpleTool(ID_ADD_TASK, add_img, _('Add Task'), _('Create a new task'))
self.toolbar.AddSimpleTool(ID_ADD_SUBTASK, add_sub_img, _('Add Sub-Task'), _('Create a new subtask'))
#self.toolbar.AddSimpleTool(ID_COLLAPSE, collapse_img, _('Collapse'), _('Collapse all tasks'))
self.toolbar.AddSimpleTool(ID_EXPAND, expand_img, _('Expand'), _('Expand all tasks'))
self.toolbar.AddSimpleTool(wx.ID_DELETE, delete_img, _('Delete'), _('Delete this task'))
self.Bind(wx.EVT_TOOL, self.OnToolClick)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
self.tree = TaskList(self)
sizer.Add(self.tree, 1, wx.EXPAND)
self.Bind(wx.EVT_SIZE, self.UpdateColumnWidths)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.ToggleToolbarButtons)
self.tree.SetTasks(DATA.get_list())
self.ToggleToolbarButtons()
def UpdateColumnWidths(self, evt=None):
width, height = self.GetSize()
self.tree.SetColumnWidth(0, 40)
self.tree.SetColumnWidth(1, 20)
self.tree.SetColumnWidth(2, width - 180)
self.tree.SetColumnWidth(3, 100)
evt.Skip()
def ToggleToolbarButtons(self, evt=None):
"""Enable or disable certain toolbar buttons based on the selection"""
enable_sub_btns = (self.tree.GetSelection() != self.tree.root)
self.toolbar.EnableTool(ID_ADD_SUBTASK, enable_sub_btns)
self.toolbar.EnableTool(wx.ID_DELETE, enable_sub_btns)
if evt:
evt.Skip()
def AddTask(self, parent=None):
"""Allows the user to add a new task"""
taskDlg = TaskInfoDialog(self, -1, _('Task Info'))
if taskDlg.ShowModal() == wx.ID_OK:
task = taskDlg.GetTask()
self.tree.AddTask(task, parent)
@requires_selection
def AddSubTask(self):
"""Allows the user to add a new task to the selected task"""
parent = self.tree.GetSelection()
return self.AddTask(parent)
@requires_selection
def EditSelectedTask(self):
"""Allows the user to edit the selected task"""
item = self.tree.GetSelection()
self.EditTask(item)
def EditTask(self, item):
"""Allows the user to edit a task's information"""
task = item.GetData()
taskDlg = TaskInfoDialog(self, -1, _('Task Info'), task=task)
if taskDlg.ShowModal() == wx.ID_OK:
task = taskDlg.GetTask()
item.SetData(task)
self.tree.Refresh()
@requires_selection
def DeleteSelectedTask(self):
"""Allows the user to delete the selected task"""
item = self.tree.GetSelection()
self.DeleteTask(item)
def DeleteTask(self, item):
"""Allows the user to delete a task"""
if item.HasChildren():
print 'Deleting item with children'
self.tree.DeleteChildren(item)
self.tree.Delete(item)
def OnToolClick(self, evt):
eid = evt.GetId()
if eid == ID_ADD_TASK:
self.AddTask()
elif eid == ID_ADD_SUBTASK:
self.AddSubTask()
elif eid == ID_COLLAPSE:
for item in self.tree.GetChildren():
item.Collapse(self.tree)
elif eid == ID_EXPAND:
self.tree.ExpandAll()
elif eid == wx.ID_SAVE:
self.Persist()
elif eid == wx.ID_DELETE:
self.DeleteSelectedTask()
def Persist(self):
"""Persists the task list to the filesystem"""
DATA.persist(self.tree.root)
| codekoala/treedo | treedo/gui.py | Python | bsd-3-clause | 12,628 |
# -*- coding: utf-8 -*-
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Standard Library
from datetime import datetime
# Third Party
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D # Load 3d plots capabilities
# First Party
from metaopt.plugin.plugin import Plugin
NUMBER_OF_SAMPLES = 200
COLORMAP = cm.jet
REVERSED_COLORMAP = cm.jet_r
class VisualizeBestFitnessPlugin(Plugin):
"""Visualize optimization progess"""
def __init__(self):
self.best_fitnesses = []
self.timestamps = []
self.start_time = None
self.current_best = None
self.return_spec = None
def setup(self, f, param_spec, return_spec):
del f, param_spec
self.return_spec = return_spec
if not self.start_time:
self.start_time = datetime.now()
def on_result(self, invocation):
fitness = invocation.current_result
if self.current_best is None or fitness < self.current_best:
self.current_best = fitness
self.best_fitnesses.append(self.current_best.raw_values)
time_delta = datetime.now() - self.start_time
self.timestamps.append(time_delta.total_seconds())
def show_fitness_invocations_plot(self):
"""Show a fitness--invocations plot"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Number of Invocations")
ax.set_ylabel(self.get_y_label())
ax.plot(self.best_fitnesses)
plt.show()
def show_fitness_time_plot(self):
"""Show a fitness--time plot"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Time")
ax.set_ylabel(self.get_y_label())
ax.plot(self.timestamps, self.best_fitnesses)
plt.show()
def get_y_label(self):
return self.return_spec.return_values[0]["name"]
| cigroup-ol/metaopt | metaopt/plugin/visualization/best_fitness.py | Python | bsd-3-clause | 1,969 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import inselect
REQUIREMENTS = [
# TODO How to specify OpenCV? 'cv2>=3.1.0',
'numpy>=1.11.1,<1.12',
'Pillow>=3.4.2,<3.5',
'python-dateutil>=2.6.0,<2.7',
'pytz>=2016.7',
'PyYAML>=3.12,<3.2',
'schematics>=1.1.1,<1.2',
'scikit-learn>=0.18.1,<0.19',
'scipy>=0.18.1,<0.19',
'unicodecsv>=0.14.1,<0.15',
]
SCRIPTS = ('export_metadata', 'ingest', 'read_barcodes', 'save_crops', 'segment')
setup_data = {
'name': 'inselect',
'version': inselect.__version__,
'author': (u'Lawrence Hudson, Alice Heaton, Pieter Holtzhausen, '
u'Stéfan van der Walt'),
'author_email': 'l.hudson@nhm.ac.uk',
'maintainer': 'Lawrence Hudson',
'maintainer_email': 'l.hudson@nhm.ac.uk',
'url': 'https://github.com/NaturalHistoryMuseum/inselect/',
'license': 'Modified BSD',
'description': inselect.__doc__,
'long_description': inselect.__doc__,
'packages': [
'inselect', 'inselect.gui', 'inselect.gui.plugins',
'inselect.gui.views', 'inselect.gui.views.boxes', 'inselect.lib',
'inselect.lib.templates', 'inselect.scripts',
],
'include_package_data': True,
'test_suite': 'inselect.tests',
'scripts': ['inselect/scripts/{0}.py'.format(script) for script in SCRIPTS],
'install_requires': REQUIREMENTS,
'extras_require': {
'gui': [
'ExifRead>=2.1.2', 'humanize>=0.5.1', 'psutil>=5.0.0',
'PyQt5>=5.6.0'
],
'barcodes': ['gouda>=0.1.13', 'pylibdmtx>=0.1.6', 'pyzbar>=0.1.3'],
'windows': ['pywin32>=220'],
'development': ['coveralls>=1.1', 'mock>=2.0.0', 'nose>=1.3.7'],
},
'entry_points': {
'gui_scripts':
['inselect = inselect.gui.app:main'],
'console_scripts':
['{0} = inselect.scripts.{0}:main'.format(script) for script in SCRIPTS],
},
'classifiers': [
'Development Status :: 4 - Beta',
'Topic :: Utilities',
'Topic :: Scientific/Engineering :: Bio-Informatics'
'Programming Language :: Python :: 3.5',
],
}
def setuptools_setup():
"""setuptools setup"""
from setuptools import setup
setup(**setup_data)
def _qt_files(site_packages):
"""Returns a list of tuples (src, dest) of Qt dependencies to be installed.
Elements are instances of Path.
site_packages should be an instance of Path to the site-packages directory.
IF we leave cx_Freeze to do its thing then the entirety of PyQt5, Qt5 and
uic are included in the installer. The only way to avoid horrible bloat is
to hand-tune which files we include.
This whole system is fucked beyond belief.
"""
from pathlib import Path
return [
# Qt DLLs
(
site_packages.joinpath('PyQt5/Qt/bin').joinpath(dep),
dep
)
for dep in ('Qt5Core.dll', 'Qt5Gui.dll', 'Qt5Widgets.dll')
] + [
# Qt plugins
(
site_packages.joinpath('PyQt5/Qt/plugins/platforms').joinpath(dep),
Path('platforms').joinpath(dep)
)
for dep in ('qwindows.dll',)
] + [
# PyQt extension modules
(
site_packages.joinpath('PyQt5').joinpath(dep),
Path('PyQt5').joinpath(dep)
)
for dep in ('__init__.py', 'Qt.pyd', 'QtCore.pyd', 'QtGui.pyd', 'QtWidgets.pyd')
]
def cx_setup():
"""cx_Freeze setup. Used for building Windows installers"""
import scipy
from pathlib import Path
from distutils.sysconfig import get_python_lib
from cx_Freeze import setup, Executable
from pylibdmtx import pylibdmtx
from pyzbar import pyzbar
# Useful paths
environment_root = Path(sys.executable).parent
site_packages = Path(get_python_lib())
project_root = Path(__file__).parent
# Files as tuples (source, dest)
include_files = [
# Evil, evil, evil
# cx_Freeze breaks pywintypes and pythoncom on Python 3.5
# https://bitbucket.org/anthony_tuininga/cx_freeze/issues/194/error-with-frozen-executable-using-35-and
(site_packages.joinpath('win32/lib/pywintypes.py'), 'pywintypes.py'),
(site_packages.joinpath('pythoncom.py'), 'pythoncom.py'),
# Binary dependencies that are not detected
(environment_root.joinpath('Library/bin/mkl_core.dll'), 'mkl_core.dll'),
(environment_root.joinpath('Library/bin/mkl_intel_thread.dll'), 'mkl_intel_thread.dll'),
(environment_root.joinpath('Library/bin/libiomp5md.dll'), 'libiomp5md.dll'),
# Stylesheet
(project_root.joinpath('inselect/gui/inselect.qss'), 'inselect.qss'),
] + [
# DLLs that are not detected because they are loaded by ctypes
(dep._name, Path(dep._name).name)
for dep in pylibdmtx.EXTERNAL_DEPENDENCIES + pyzbar.EXTERNAL_DEPENDENCIES
] + _qt_files(site_packages)
# Convert instances of Path to strs
include_files = [(str(source), str(dest)) for source, dest in include_files]
# Directories as strings
include_files += [
# Fixes scipy freeze
# http://stackoverflow.com/a/32822431/1773758
str(Path(scipy.__file__).parent),
]
# Packages to exclude.
exclude_packages = [
str(p.relative_to(site_packages)).replace('\\', '.') for p in
site_packages.rglob('*/tests')
]
setup(
name=setup_data['name'],
version=setup_data['version'],
options={
'build_exe': {
'packages':
setup_data.get('packages', []) + [
'urllib', 'sklearn.neighbors', 'win32com.gen_py',
'win32timezone',
],
'excludes': [
# '_bz2', # Required by sklearn
'_decimal', '_elementtree', '_hashlib', '_lzma',
'_ssl', 'curses',
'distutils', 'email', 'http', 'lib2to3', 'mock', 'nose',
'PyQt5',
# 'pydoc', # Required by sklearn
'tcl', 'Tkinter', 'ttk', 'Tkconstants',
# 'unittest', # Required by numpy.core.multiarray
'win32com.HTML', 'win32com.test', 'win32evtlog', 'win32pdh',
'win32trace', 'win32ui', 'win32wnet',
'xml', 'xmlrpc',
'inselect.tests',
] + exclude_packages,
'includes': [
],
'include_files': include_files,
'include_msvcr': True,
'optimize': 2,
},
'bdist_msi': {
'upgrade_code': '{fe2ed61d-cd5e-45bb-9d16-146f725e522f}'
}
},
executables=[
Executable(
script='inselect/scripts/inselect.py',
targetName='inselect.exe',
icon='icons/inselect.ico',
base='Win32GUI',
shortcutName='Inselect', # See http://stackoverflow.com/a/15736406
shortcutDir='ProgramMenuFolder'
)
] + [
Executable(
script='inselect/scripts/{0}.py'.format(script),
targetName='{0}.exe'.format(script),
icon='icons/inselect.ico',
base='Console'
)
for script in SCRIPTS
],
)
if (3, 5) <= sys.version_info:
if 'bdist_msi' in sys.argv:
cx_setup()
else:
setuptools_setup()
else:
sys.exit('Only Python >= 3.5 is supported')
| NaturalHistoryMuseum/inselect | setup.py | Python | bsd-3-clause | 7,651 |
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" periodic table data, **obsolete**
now that the C++ code exposes an interface to the internal PT stuff,
this data is mostly obsolete
"""
# Num Symb RCov RBO RVdW Max Bnd Mass nval
periodicData=\
"""
0 X 0.0 0.0 0.0 0 0.000 0
1 H 0.230 0.330 1.200 1 1.008 1
2 He 0.930 0.700 1.400 0 4.003 2
3 Li 0.680 1.230 1.820 1 6.941 1
4 Be 0.350 0.900 1.700 2 9.012 2
5 B 0.830 0.820 2.080 3 10.812 3
6 C 0.680 0.770 1.950 4 12.011 4
7 N 0.680 0.700 1.850 4 14.007 5
8 O 0.680 0.660 1.700 2 15.999 6
9 F 0.640 0.611 1.730 1 18.998 7
10 Ne 1.120 0.700 1.540 0 20.180 8
11 Na 0.970 1.540 2.270 1 22.990 1
12 Mg 1.100 1.360 1.730 2 24.305 2
13 Al 1.350 1.180 2.050 6 26.982 3
14 Si 1.200 0.937 2.100 6 28.086 4
15 P 0.750 0.890 2.080 5 30.974 5
16 S 1.020 1.040 2.000 6 32.067 6
17 Cl 0.990 0.997 1.970 1 35.453 7
18 Ar 1.570 1.740 1.880 0 39.948 8
19 K 1.330 2.030 2.750 1 39.098 1
20 Ca 0.990 1.740 1.973 2 40.078 2
21 Sc 1.440 1.440 1.700 6 44.956 3
22 Ti 1.470 1.320 1.700 6 47.867 4
23 V 1.330 1.220 1.700 6 50.942 5
24 Cr 1.350 1.180 1.700 6 51.996 6
25 Mn 1.350 1.170 1.700 8 54.938 7
26 Fe 1.340 1.170 1.700 6 55.845 8
27 Co 1.330 1.160 1.700 6 58.933 9
28 Ni 1.500 1.150 1.630 6 58.693 10
29 Cu 1.520 1.170 1.400 6 63.546 11
30 Zn 1.450 1.250 1.390 6 65.39 2
31 Ga 1.220 1.260 1.870 3 69.723 3
32 Ge 1.170 1.188 1.700 4 72.61 4
33 As 1.210 1.200 1.850 3 74.922 5
34 Se 1.220 1.170 1.900 2 78.96 6
35 Br 1.210 1.167 2.100 1 79.904 7
36 Kr 1.910 1.910 2.020 0 83.80 8
37 Rb 1.470 2.160 1.700 1 85.468 1
38 Sr 1.120 1.910 1.700 2 87.62 2
39 Y 1.780 1.620 1.700 6 88.906 3
40 Zr 1.560 1.450 1.700 6 91.224 4
41 Nb 1.480 1.340 1.700 6 92.906 5
42 Mo 1.470 1.300 1.700 6 95.94 6
43 Tc 1.350 1.270 1.700 6 98.0 7
44 Ru 1.400 1.250 1.700 6 101.07 8
45 Rh 1.450 1.250 1.700 6 102.906 9
46 Pd 1.500 1.280 1.630 6 106.42 10
47 Ag 1.590 1.340 1.720 6 107.868 11
48 Cd 1.690 1.480 1.580 6 112.412 2
49 In 1.630 1.440 1.930 3 114.818 3
50 Sn 1.460 1.385 2.170 4 118.711 4
51 Sb 1.460 1.400 2.200 3 121.760 5
52 Te 1.470 1.378 2.060 2 127.60 6
53 I 1.400 1.387 2.150 1 126.904 7
54 Xe 1.980 1.980 2.160 0 131.29 8
55 Cs 1.670 2.350 1.700 1 132.905 1
56 Ba 1.340 1.980 1.700 2 137.328 2
57 La 1.870 1.690 1.700 12 138.906 3
58 Ce 1.830 1.830 1.700 6 140.116 4
59 Pr 1.820 1.820 1.700 6 140.908 3
60 Nd 1.810 1.810 1.700 6 144.24 4
61 Pm 1.800 1.800 1.700 6 145.0 5
62 Sm 1.800 1.800 1.700 6 150.36 6
63 Eu 1.990 1.990 1.700 6 151.964 7
64 Gd 1.790 1.790 1.700 6 157.25 8
65 Tb 1.760 1.760 1.700 6 158.925 9
66 Dy 1.750 1.750 1.700 6 162.50 10
67 Ho 1.740 1.740 1.700 6 164.930 11
68 Er 1.730 1.730 1.700 6 167.26 12
69 Tm 1.720 1.720 1.700 6 168.934 13
70 Yb 1.940 1.940 1.700 6 173.04 14
71 Lu 1.720 1.720 1.700 6 174.967 15
72 Hf 1.570 1.440 1.700 6 178.49 4
73 Ta 1.430 1.340 1.700 6 180.948 5
74 W 1.370 1.300 1.700 6 183.84 6
75 Re 1.350 1.280 1.700 6 186.207 7
76 Os 1.370 1.260 1.700 6 190.23 8
77 Ir 1.320 1.270 1.700 6 192.217 9
78 Pt 1.500 1.300 1.720 6 195.078 10
79 Au 1.500 1.340 1.660 6 196.967 11
80 Hg 1.700 1.490 1.550 6 200.59 2
81 Tl 1.550 1.480 1.960 3 204.383 3
82 Pb 1.540 1.480 2.020 4 207.2 4
83 Bi 1.540 1.450 1.700 3 208.980 5
84 Po 1.680 1.460 1.700 2 209.0 6
85 At 1.700 1.450 1.700 1 210.0 7
86 Rn 2.400 2.400 1.700 0 222.0 8
87 Fr 2.000 2.000 1.700 1 223.0 1
88 Ra 1.900 1.900 1.700 2 226.0 2
89 Ac 1.880 1.880 1.700 6 227.0 3
90 Th 1.790 1.790 1.700 6 232.038 4
91 Pa 1.610 1.610 1.700 6 231.036 3
92 U 1.580 1.580 1.860 6 238.029 4
93 Np 1.550 1.550 1.700 6 237.0 5
94 Pu 1.530 1.530 1.700 6 244.0 6
95 Am 1.510 1.070 1.700 6 243.0 7
96 Cm 1.500 0.000 1.700 6 247.0 8
97 Bk 1.500 0.000 1.700 6 247.0 9
98 Cf 1.500 0.000 1.700 6 251.0 10
99 Es 1.500 0.000 1.700 6 252.0 11
100 Fm 1.500 0.000 1.700 6 257.0 12
101 Md 1.500 0.000 1.700 6 258.0 13
102 No 1.500 0.000 1.700 6 259.0 14
103 Lr 1.500 0.000 1.700 6 262.0 15
"""
nameTable = {}
numTable = {}
for line in periodicData.split('\n'):
splitLine = line.split()
if len(splitLine) > 1:
nameTable[splitLine[1]] = (int(splitLine[0]),float(splitLine[6]),int(splitLine[7]),\
int(splitLine[5]),float(splitLine[2]),float(splitLine[3]),
float(splitLine[4]))
numTable[int(splitLine[0])] = (splitLine[1],float(splitLine[6]),int(splitLine[7]),\
int(splitLine[5]),float(splitLine[2]),float(splitLine[3]),
float(splitLine[4]))
# a list of metals (transition metals, semi-metals, lanthanides and actinides)
metalRanges = ["13", "21-32", "39-51", "57-84", "89-103"]
metalNumList = []
for entry in metalRanges:
t = entry.split('-')
start = int(t[0])
if len(t) > 1:
end = int(t[1])
else:
end = start
if start > end:
start, end = end, start
metalNumList += range(start, end + 1)
metalNames = map(lambda x: numTable[x][0], metalNumList)
# these are from table 4 of Rev. Comp. Chem. vol 2, 367-422, (1991)
# the order is [alpha(SP),alpha(SP2),alpha(SP3)]
# where values are not known, None has been inserted
hallKierAlphas = {
'H': [0.0, 0.0, 0.0], # removes explicit H's from consideration in the shape
'C': [-0.22, -0.13, 0.0],
'N': [-0.29, -0.20, -0.04],
'O': [None, -0.20, -0.04],
'F': [None, None, -0.07],
'P': [None, 0.30, 0.43],
'S': [None, 0.22, 0.35],
'Cl': [None, None, 0.29],
'Br': [None, None, 0.48],
'I': [None, None, 0.73]
}
| jandom/rdkit | rdkit/Chem/PeriodicTable.py | Python | bsd-3-clause | 5,870 |
from communities.models import Community, SendToOption
from datetime import datetime, date, time
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from ocd.formfields import HTMLArea, OCSplitDateTime, OCCheckboxSelectMultiple
from users.models import OCUser, Membership
import floppyforms as forms
from haystack.forms import SearchForm, ModelSearchForm
class EditUpcomingMeetingForm(forms.ModelForm):
class Meta:
model = Community
fields = (
'upcoming_meeting_title',
'upcoming_meeting_location',
'upcoming_meeting_scheduled_at',
# 'voting_ends_at',
'upcoming_meeting_comments',
)
widgets = {
'upcoming_meeting_title': forms.TextInput,
'upcoming_meeting_scheduled_at': OCSplitDateTime,
'upcoming_meeting_location': forms.TextInput,
# 'voting_ends_at': OCSplitDateTime,
'upcoming_meeting_comments': HTMLArea,
}
def __init__(self, *args, **kwargs):
super(EditUpcomingMeetingForm, self).__init__(*args, **kwargs)
self.fields['upcoming_meeting_title'].label = _('Title')
self.fields['upcoming_meeting_scheduled_at'].label = _('Scheduled at')
self.fields['upcoming_meeting_location'].label = _('Location')
self.fields['upcoming_meeting_comments'].label = _('Background')
"""
removed this function as we don't include voting_end_time in the form any more.
# ----------------------------------------------------------------------------
def clean(self):
#prevent voting end time from illegal values (past time,
#time after meeting schedule)
try:
voting_ends_at = self.cleaned_data['voting_ends_at']
except KeyError:
voting_ends_at = None
try:
meeting_time = self.cleaned_data['upcoming_meeting_scheduled_at']
except KeyError:
meeting_time = None
if voting_ends_at:
if voting_ends_at <= timezone.now():
raise forms.ValidationError(_("End voting time cannot be set to the past"))
if meeting_time and voting_ends_at > meeting_time:
raise forms.ValidationError(_("End voting time cannot be set to after the meeting time"))
return self.cleaned_data
"""
def save(self):
c = super(EditUpcomingMeetingForm, self).save()
c.voting_ends_at = datetime.combine(date(2025, 1, 1), time(12, 0, 0))
c.save()
return c
class PublishUpcomingMeetingForm(forms.ModelForm):
send_to = forms.TypedChoiceField(label=_("Send to"), coerce=int,
choices=SendToOption.choices,
widget=forms.RadioSelect)
class Meta:
model = Community
fields = ()
class EditUpcomingMeetingSummaryForm(forms.ModelForm):
class Meta:
model = Community
fields = (
'upcoming_meeting_summary',
)
widgets = {
'upcoming_meeting_summary': HTMLArea,
}
class UpcomingMeetingParticipantsForm(forms.ModelForm):
board = forms.MultipleChoiceField(widget=OCCheckboxSelectMultiple, required=False)
class Meta:
model = Community
fields = (
'upcoming_meeting_participants',
'upcoming_meeting_guests',
)
widgets = {
'upcoming_meeting_participants': OCCheckboxSelectMultiple,
'upcoming_meeting_guests': forms.Textarea,
}
def __init__(self, *args, **kwargs):
super(UpcomingMeetingParticipantsForm, self).__init__(*args, **kwargs)
participants = self.instance.upcoming_meeting_participants.values_list(
'id', flat=True)
board_in = []
board_choices = []
for b in self.instance.get_board_members():
board_choices.append((b.id, b.display_name,))
if b.id in participants:
board_in.append(b.id)
self.fields['board'].choices = board_choices
self.initial['board'] = board_in
self.fields['upcoming_meeting_participants'].queryset = self.instance.get_members()
self.fields['upcoming_meeting_participants'].label = ""
class CommunitySearchForm(ModelSearchForm):
pass
# def search(self):
# # First, store the SearchQuerySet received from other processing.
# sqs = super(DateRangeSearchForm, self).search()
#
# if not self.is_valid():
# return self.no_query_found()
#
# return sqs
| hasadna/OpenCommunity | src/communities/forms.py | Python | bsd-3-clause | 4,808 |
# -*- coding: utf8
from __future__ import division, print_function
from collections import defaultdict
from matplotlib import pyplot as plt
from radar import radar_factory
from scipy import stats
from scripts import initialize_matplotlib
import numpy as np
import plac
import sys
REFERRER_ABBRV = {
'EXTERNAL':'EXT.',
'FEATURED':'FEAT.',
'INTERNAL':'INT.',
'MOBILE':'MOBI.',
'SEARCH':'SEAR.',
'SOCIAL':'SOC.',
'VIRAL':'VIR.'}
CATEG_ABBRV = {
'Autos&Vehicles':'Vehi.',
'Autos':'Vehi.',
'Comedy':'Com.',
'Education':'Edu.',
'Entertainment':'Ent.',
'Film':'Film',
'Film&Animation':'Film',
'Games':'Game',
'Gaming':'Game',
'Howto':'Howto',
'Howto&Style':'Howto',
'Movies':'Film',
'Music':'Music',
'NULL':'-',
'News':'News',
'News&Politics':'News',
'Nonprofit':'Nonprof.',
'Nonprofits&Activism':'Nonprof.',
'People&Blogs':'People',
'People':'People',
'Pets&Animals':'Pets',
'Pets':'Pets',
'Animals':'Pets',
'Science&Technology':'Sci.',
'Science':'Sci.',
'Tech':'Sci.',
'Shows':'Show',
'Sports':'Sport',
'Trailers':'Film',
'Travel&Events':'Travel',
'Travel':'Travel'}
def load_text_file(features_fpath, col_to_use, classes):
to_plot = defaultdict(lambda: defaultdict(float))
sum_classes = defaultdict(float)
labels = set()
with open(features_fpath) as features_file:
for curr_line, line in enumerate(features_file):
spl = line.split()
if col_to_use >= len(spl):
continue
data = CATEG_ABBRV[line.split()[col_to_use].strip()]
class_num = classes[curr_line]
labels.add(data)
sum_classes[class_num] += 1
to_plot[class_num][data] += 1
return to_plot, sum_classes, sorted(labels)
def load_svm_file(features_fpath, classes):
col_dict = {
'EXTERNAL':13,
'FEATURED':14,
'INTERNAL':15,
'MOBILE':16,
'SEARCH':17,
'SOCIAL':18,
'VIRAL':19
}
to_plot = defaultdict(lambda: defaultdict(float))
sum_classes = defaultdict(float)
labels = set()
with open(features_fpath) as features_file:
curr_line = 0
for line in features_file:
if '#' in line:
for key, id_ in col_dict.items():
print(id_, key, line.split()[id_])
continue
class_num = classes[curr_line]
sum_classes[class_num] += float(line.split()[-1])
for ref_name, col_id in col_dict.items():
ref_abbrv = REFERRER_ABBRV[ref_name]
val = float(line.split()[col_id])
present = val > 0
if present:
labels.add(ref_abbrv)
to_plot[class_num][ref_abbrv] += val
curr_line += 1
return to_plot, sum_classes, sorted(labels)
def generate_data_plot(to_plot, sum_classes, labels, classes):
num_classes = len(set(classes))
colors = ['b', 'g', 'm', 'y']
total = 0
for class_num in xrange(num_classes):
color = colors[class_num]
data_plot = []
for label in labels:
total += to_plot[class_num][label]
data_plot.append(to_plot[class_num][label] / sum_classes[class_num])
yield data_plot, color, class_num
def radar_plot(labels, data_plots, out_fpath):
theta = radar_factory(len(labels))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='radar')
for data_plot, color, class_num in data_plots:
ax.plot(theta, data_plot, color=color, label='C%d'%class_num)
ax.fill(theta, data_plot, facecolor=color, alpha=0.25)
ax.set_varlabels(labels)
plt.legend(frameon=False, ncol=4, bbox_to_anchor=(0.5, -0.15),
loc='lower center')
plt.savefig(out_fpath)
def chisq(counts, expected_prob):
counts = np.array(counts)
expected = np.array(expected_prob) * counts.sum()
return stats.chisquare(counts, expected)[1]
def allchisq(to_plot, sum_classes, labels, classes):
num_classes = len(set(classes))
totals = []
for label in labels:
sum_ = 0
for class_num in xrange(num_classes):
sum_ += to_plot[class_num][label]
totals.append(sum_)
probs = []
sum_totals = sum(totals)
for i, t in enumerate(totals):
probs.append( t / sum_totals)
for class_num in xrange(num_classes):
counts = []
for label in labels:
counts.append(to_plot[class_num][label])
chisq(counts, probs)
def stacked_bars(labels, data_plots, out_fpath, label_translation, ref=True):
x_locations = [1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19]
data_class = {}
data_label = {}
for data, _, class_num in data_plots:
best_idx = np.argsort(data)[::-1][:4]
best_cls = np.array(data)[best_idx]
best_lbl = np.array(labels)[best_idx]
data_class[label_translation[class_num]] = best_cls
data_label[label_translation[class_num]] = best_lbl
bar_data = []
bar_labels = []
for cls in sorted(data_class):
bar_data.extend(data_class[cls])
bar_labels.extend(data_label[cls])
colors = ['b', 'g', 'm', 'r', 'y', 'c', '#A617A1', '#2B5700', 'w',
'#FF7300', 'k'] * 3
colored={}
if ref:
to_use = set(REFERRER_ABBRV.values())
else:
to_use = set(CATEG_ABBRV.values())
for i, l in enumerate(to_use):
colored[l] = colors[i]
for x, y, l in zip(x_locations, bar_data, bar_labels):
c = colored[l]
plt.bar(left=x, height=y, color=c, width=1, alpha=0.5)
plt.text(x + .75, y, l, va='bottom', ha='center', rotation=45)
plt.xlim(xmin=0, xmax=21)
plt.xlabel('Cluster')
if ref:
plt.ylim(ymin=0, ymax=.31)
plt.ylabel('Fraction of Views in Cluster')
else:
plt.ylim(ymin=0, ymax=.4)
plt.ylabel('Fraction of Videos in Cluster')
plt.xticks([3, 8, 13, 18], ['$C0$', '$C1$', '$C2$', '$C3'])
plt.savefig(out_fpath)
@plac.annotations(features_fpath=plac.Annotation('Features file', type=str),
classes_fpath=plac.Annotation('Video classes file', type=str),
out_fpath=plac.Annotation('Plot file', type=str),
trans_fpath=plac.Annotation('Translation of cluster num to labe',
type=str),
col_to_use=plac.Annotation('Column number to use', type=int,
kind='option', abbrev='c'),
is_text_features=plac.Annotation('Indicates file type',
kind='flag', abbrev='t',
type=bool))
def main(features_fpath, classes_fpath, out_fpath,
trans_fpath, col_to_use=2, is_text_features=False):
initialize_matplotlib()
classes = np.loadtxt(classes_fpath)
if is_text_features:
to_plot, sum_classes, labels = \
load_text_file(features_fpath, col_to_use, classes)
ref=False
else:
to_plot, sum_classes, labels = \
load_svm_file(features_fpath, classes)
ref=True
trans = {}
with open(trans_fpath) as f:
for l in f:
spl = l.split()
trans[int(spl[0])] = int(spl[1])
data = generate_data_plot(to_plot, sum_classes, labels, classes)
stacked_bars(labels, data, out_fpath, trans, ref)
#allchisq(to_plot, sum_classes, labels, classes)
if __name__ == '__main__':
sys.exit(plac.call(main))
| flaviovdf/pyksc | src/scripts/col_to_cluster.py | Python | bsd-3-clause | 7,933 |
import json
import django
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.base import ModelBase
from django.utils.encoding import smart_unicode
from django.db.models.signals import post_syncdb
from django.contrib.auth.models import Permission
import sys
import datetime
import decimal
if 4 < django.VERSION[1] < 7:
AUTH_USER_MODEL = django.contrib.auth.get_user_model()
else:
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def add_view_permissions(sender, **kwargs):
"""
This syncdb hooks takes care of adding a view permission too all our
content types.
"""
argv = sys.argv
permissions_with_tests = getattr(settings, "XADMIN_TEST_VIEW_PERMISSIONS", True)
if not permissions_with_tests and len(argv) > 1 \
and (argv[1] == "test" or argv[1] == "jenkins"):
return
# for each of our content types
for content_type in ContentType.objects.all():
# build our permission slug
codename = "view_%s" % content_type.model
# if it doesn't exist..
if not Permission.objects.filter(content_type=content_type, codename=codename):
# add it
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can view %s" % content_type.name)
#print "Added view permission for %s" % content_type.name
# check for all our view permissions after a syncdb
post_syncdb.connect(add_view_permissions)
class Bookmark(models.Model):
title = models.CharField(_(u'Title'), max_length=128)
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"), blank=True, null=True)
url_name = models.CharField(_(u'Url Name'), max_length=64)
content_type = models.ForeignKey(ContentType)
query = models.CharField(_(u'Query String'), max_length=1000, blank=True)
is_share = models.BooleanField(_(u'Is Shared'), default=False)
@property
def url(self):
base_url = reverse(self.url_name)
if self.query:
base_url = base_url + '?' + self.query
return base_url
def __unicode__(self):
return self.title
class Meta:
verbose_name = _(u'Bookmark')
verbose_name_plural = _('Bookmarks')
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, ModelBase):
return '%s.%s' % (o._meta.app_label, o._meta.model_name)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class UserSettings(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"))
key = models.CharField(_('Settings Key'), max_length=256)
value = models.TextField(_('Settings Content'))
def json_value(self):
return json.loads(self.value)
def set_json(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
def __unicode__(self):
return "%s %s" % (self.user, self.key)
class Meta:
verbose_name = _(u'User Setting')
verbose_name_plural = _('User Settings')
class UserWidget(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_(u"user"))
page_id = models.CharField(_(u"Page"), max_length=256)
widget_type = models.CharField(_(u"Widget Type"), max_length=50)
value = models.TextField(_(u"Widget Params"))
def get_value(self):
value = json.loads(self.value)
value['id'] = self.id
value['type'] = self.widget_type
return value
def set_value(self, obj):
self.value = json.dumps(obj, cls=JSONEncoder, ensure_ascii=False)
def save(self, *args, **kwargs):
created = self.pk is None
super(UserWidget, self).save(*args, **kwargs)
if created:
try:
portal_pos = UserSettings.objects.get(
user=self.user, key="dashboard:%s:pos" % self.page_id)
portal_pos.value = "%s,%s" % (self.pk, portal_pos.value) if portal_pos.value else self.pk
portal_pos.save()
except Exception:
pass
def __unicode__(self):
return "%s %s widget" % (self.user, self.widget_type)
class Meta:
verbose_name = _(u'User Widget')
verbose_name_plural = _('User Widgets')
| marguslaak/django-xadmin | xadmin/models.py | Python | bsd-3-clause | 4,934 |
"""star subcommand tests"""
# (c) 2015-2021 Wibowo Arindrarto <contact@arindrarto.dev>
import json
import pytest
from click.testing import CliRunner
from crimson.cli import main
from .utils import get_test_path
@pytest.fixture(scope="module")
def star_fail():
runner = CliRunner()
in_file = get_test_path("star_nope.txt")
result = runner.invoke(main, ["star", in_file])
return result
@pytest.fixture(scope="module")
def star_v230_01():
runner = CliRunner()
in_file = get_test_path("star_v230_01.txt")
result = runner.invoke(main, ["star", in_file])
result.json = json.loads(result.output)
return result
@pytest.fixture(scope="module")
def star_v230_02():
runner = CliRunner()
in_file = get_test_path("star_v230_02.txt")
result = runner.invoke(main, ["star", in_file])
result.json = json.loads(result.output)
return result
def test_star_fail_exit_code(star_fail):
assert star_fail.exit_code != 0
def test_star_fail_output(star_fail):
err_msg = "Unexpected file structure. No contents parsed."
assert err_msg in star_fail.output
@pytest.mark.parametrize(
"attr, exp",
[
("avgDeletionLength", 1.36),
("avgInputLength", 98),
("avgInsertionLength", 1.21),
("avgMappedLength", 98.27),
("mappingSpeed", 403.16),
("nInput", 14782416),
("nMappedMultipleLoci", 1936775),
("nMappedTooManyLoci", 27644),
("nSplicesATAC", 2471),
("nSplicesAnnotated", 3780876),
("nSplicesGCAG", 22344),
("nSplicesGTAG", 3780050),
("nSplicesNonCanonical", 5148),
("nSplicesTotal", 3810013),
("nUniquelyMapped", 12347431),
("pctMappedMultipleLoci", 13.1),
("pctMappedTooManyLoci", 0.19),
("pctUniquelyMapped", 83.53),
("pctUnmappedForOther", 0.03),
("pctUnmappedForTooManyMismatches", 0.0),
("pctUnmappedForTooShort", 3.16),
("rateDeletionPerBase", 0.0),
("rateInsertionPerBase", 0.0),
("rateMismatchPerBase", 0.24),
("timeEnd", "Dec 11 19:01:56"),
("timeJobStart", "Dec 11 18:55:02"),
("timeMappingStart", "Dec 11 18:59:44"),
],
)
def test_star_v230_01(star_v230_01, attr, exp):
assert star_v230_01.json.get(attr) == exp, attr
| bow/crimson | tests/test_star.py | Python | bsd-3-clause | 2,311 |
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from collections import defaultdict, OrderedDict
import logging as _logging
from .internal_graph import InternalTorchIRNode, InternalTorchIRGraph
def generate_tensor_assignment_ops(graph):
"""
This graph pass handles inplace tensor assignements, specifically it handles:
`torch.Tensor.copy_` and `torch.Tensor.fill_`. There are many other inplace tensor
assignments which are currently not handled.
for instance:
def forward(self, x): # x a tensor with shape [4,10]
x[:2, 4] = [[1],[3]]
return x
In Pytorch, this is represented by a sequence of slice / select ops followed by a copy op:
input -> %x
%1 = slice(%x, dim=0, begin=0, end=2) # the slice for dimension 0
%2 = select(%1, dim=1, index=4) # the select for dimension 1
%3 = copy_(%2, value=[[1], [3]])
output -> %x
This graph pass fuses the sequences into a single InternalTorchIRNode of a new kind, which is defined as `_internal_op_tensor_inplace_copy`.
input -> %x
%nodes_to_fuse = [slice(%x, begin=0, end=2), select(%1, dim=1, index=4)]
%x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=[[1],[3]], nodes_to_fuse=nodes_to_fuse)
output -> x_internal_tensor_assign_1
The _internal_tensor_value_assign op takes an additional internal data member nodes_to_fuse,
which is a list of select / slice InternalTorchIRNodes that need to be fused.
Here is a more complicated example:
def forward(self, x): # x a tensor with shape [4,10]
x[0, 0] = 1
x[1:2, 1:2] = [[0]]
return x
Input graph:
input -> %x
%1 = select(%x, dim=0, index=0)
%2 = select(%1, dim=0, index=0)
%3 = copy_(%2, value=1)
%4 = slice(%x, dim=0, begin=1, end=2)
%5 = slice(%4, dim=1, begin=1, end=2)
%6 = copy_(%5, value=[[0]])
output -> %x
Output graph:
input -> %x
%nodes_to_fuse_1 = [select(%x, dim=0, index=0), select(%1, dim=0, index=0)]
%x_internal_tensor_assign_1 = _internal_op_tensor_inplace_copy(%x, value=1, nodes_to_fuse=nodes_to_fuse_1)
%nodes_to_fuse_2 = [slice(%x, dim=0, begin=1, end=2), slice(%4, dim=1, begin=1, end=2)]
%x_internal_tensor_assign_2 = _internal_op_tensor_inplace_copy(%x_internal_tensor_assign_1, value=[[0]], nodes_to_fuse=nodes_to_fuse_2)
output -> x_internal_tensor_assign_2
torch.Tensor.fill_ works in a similar way, except the InternalTorchIRNodes is defined by `_internal_op_tensor_inplace_fill`.
A fill_ operator is generated from the following forward pass:
def forward(self, x): # x a tensor with shape [5, 4]
x[2] = 9
return x
"""
TENSOR_ASSIGMENT_PREFIX = "_internal_tensor_assign_"
def _get_updated_name(name, updated_tensor_count):
if name in updated_tensor_count:
return name + TENSOR_ASSIGMENT_PREFIX + str(updated_tensor_count[name])
return name
def _construct_nodes_to_fuse_inputs(nodes_to_fuse):
inputs = []
for node in nodes_to_fuse:
if node.kind == "select":
inputs += [node.inputs[2], None]
if node.kind == "slice":
inputs += [node.inputs[2], node.inputs[3]]
return inputs
tensor_to_node_sequence_mapping = {}
updated_tensor_count = defaultdict(lambda : 0)
for i in range(len(graph.nodes)):
node = graph.nodes[i]
for idx in range(len(node.inputs)):
input_name = node.inputs[idx]
node.inputs[idx] = _get_updated_name(input_name, updated_tensor_count)
if node.kind in ("select", "slice"):
node_input = node.inputs[0]
node_output = node.outputs[0]
node_sequence = tensor_to_node_sequence_mapping.get(node_input, [])
if len(node_sequence) > 0:
tensor_to_node_sequence_mapping.pop(node_input)
node_sequence.append(node)
tensor_to_node_sequence_mapping[node_output] = node_sequence
if node.kind in ("copy_", "fill_"):
node_input = node.inputs[0]
if node_input not in tensor_to_node_sequence_mapping:
raise ValueError("No matching select or slice.")
if node.kind == "copy_":
kind = "_internal_op_tensor_inplace_copy"
else:
kind = "_internal_op_tensor_inplace_fill"
nodes_to_fuse = tensor_to_node_sequence_mapping[node_input]
source_tensor = nodes_to_fuse[0].inputs[0]
origin_name = source_tensor.split(TENSOR_ASSIGMENT_PREFIX)[0]
updated_tensor_count[origin_name] += 1
outputs = [_get_updated_name(origin_name, updated_tensor_count)]
update_value = node.inputs[1]
nodes_to_fuse_inputs = _construct_nodes_to_fuse_inputs(nodes_to_fuse)
tensor_assign_node = InternalTorchIRNode(
node=None,
inputs=[source_tensor, update_value] + nodes_to_fuse_inputs,
outputs=outputs,
kind=kind,
blocks=[],
)
graph.nodes[i] = tensor_assign_node
# modify the graph outputs if it is effected by this graph pass
for idx in range(len(graph.outputs)):
output = graph.outputs[idx]
if output in updated_tensor_count:
graph.outputs[idx] = _get_updated_name(output, updated_tensor_count)
def remove_getattr_nodes(graph):
"""
Remove the getattr nodes in the graph
"""
getattr_nodes = []
new_nodes = []
for node in graph.nodes:
for block in node.blocks:
remove_getattr_nodes(block)
if node.kind == "getattr":
getattr_nodes.append(node)
else:
new_nodes.append(node)
# check the getattr nodes not in the outputs
for node in getattr_nodes:
if node.name in graph.outputs:
raise RuntimeError("{} should not be in the graph outputs.".format(node.name))
# remove the getattr nodes
graph.nodes = new_nodes
def transform_inplace_ops(graph, name_remap_dict=None):
# As we modify ops, we'll need to remap symbols.
if name_remap_dict is None:
name_remap_dict = {}
for node in graph.nodes:
for k, v in name_remap_dict.items():
node.replace_name(k, v)
if node.kind == "append":
if isinstance(node.parent, InternalTorchIRGraph):
# If append appears in a graph (outer block), replace
# subsequent uses of its input symbol with its output symbol.
name_remap_dict[node.inputs[0]] = node.outputs[0]
elif node.parent.parent.kind == "loop":
# If append appears in a loop block, add its inputs to the block
# inputs and loop inputs, and its outputs to the block outputs
# and loop outputs.
# This is the global input to append. We need to add it to the
# loop's input list, and replace any uses after the node with
# @global_output below.
global_input = node.inputs[0]
# This will be the name of the input to append within the
# block. We need to add it to the block inputs.
local_input = node.parent.parent.name + ".0"
# This is the output of append. We need to add it to the list
# of block outputs.
local_output = node.outputs[0]
# This is the name of the new output from the loop. It should
# replace any uses of @global_input after the loop op.
global_output = local_output + ".out"
name_remap_dict[global_input] = global_output
node.parent.parent.inputs.append(global_input)
node.parent.inputs.append(local_input)
node.replace_name(global_input, local_input)
node.parent.outputs.append(local_output)
node.parent.parent.outputs.append(global_output)
node.parent.parent.name = node.parent.parent.outputs[0]
elif node.parent.parent.kind == "if":
# If append appears in an if/else block, add its outputs to the
# block outputs and loop outputs.
# Note that we can't assume the append appears in both blocks.
raise NotImplementedError(
"inplace_ops pass doesn't yet support append op inside conditional"
)
for block in node.blocks:
transform_inplace_ops(block, name_remap_dict)
# Replace names in graph outputs
for k, v in name_remap_dict.items():
try:
idx = graph.outputs.index(k)
except ValueError:
pass
else:
graph.outputs[idx] = v
def flatten_graph_input_values(graph):
""" CoreML can't handle nested iterables of tensors, so we flatten the
inputs of any graph that expects them.
"""
new_graph_inputs = graph.inputs
all_new_nodes = []
changed = True
notified = False
while changed:
old_graph_inputs = new_graph_inputs
new_graph_inputs = OrderedDict()
new_nodes = []
changed = False
for _input_name, _input_val in old_graph_inputs.items():
if isinstance(_input_val, (tuple, list)):
changed = True
if not notified:
notified = True
_logging.warning(
"Tuple detected at graph input. This will be flattened in the converted model."
)
# If this input to the graph is a tuple, we want to replace it
# with a flattened version and add an op to construct the tuple.
node_inputs = []
for idx, item in enumerate(_input_val):
name = _input_name + "_{}".format(idx)
new_graph_inputs[name] = item
node_inputs.append(name)
new_nodes.append(
InternalTorchIRNode(
inputs=node_inputs,
outputs=[_input_name],
kind="tupleconstruct",
)
)
else:
# This input isn't a tuple, keep it as is.
new_graph_inputs[_input_name] = _input_val
all_new_nodes = new_nodes + all_new_nodes
graph.inputs = new_graph_inputs
graph.nodes = all_new_nodes + graph.nodes
def flatten_graph_output_values(graph):
"""
CoreML can't handle nested iterables of tensors, so we flatten the
outputs of any graph that produces them.
"""
node_names = [node.name for node in graph.nodes]
new_graph_outputs = graph.outputs
changed = True
notified = False
while changed:
old_graph_outputs = new_graph_outputs
new_graph_outputs = []
changed = False
for outp in old_graph_outputs:
# Find the node that generates this output var.
# It is possible to not find the output var in the list of node
# names since nodes are named after their first output. In that
# case, it means the output var comes from a node that returns
# multiple outputs, which means that node cannot be a construct op.
try:
node_idx = node_names.index(outp)
except:
# @outp doesn't come from a construct op
new_graph_outputs.append(outp)
continue
if graph.nodes[node_idx].kind in [
"tupleconstruct",
"listconstruct",
]:
# Since this output came from a construct op, we can replace it
# with the inputs to the op.
new_graph_outputs.extend(graph.nodes[node_idx].inputs)
changed = True
if not notified:
notified = True
_logging.warning(
"Tuple detected at graph output. This will be flattened in the converted model."
)
else:
new_graph_outputs.append(outp)
# Note: if we flattened outputs, there are likely to be construct ops
# that are no longer needed. These will be removed in a later DCE pass.
graph.outputs = new_graph_outputs
| apple/coremltools | coremltools/converters/mil/frontend/torch/torchir_passes.py | Python | bsd-3-clause | 12,830 |
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly (or
identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
import os
import sys
import inspect
import traceback
import pdb
from glob import glob
from timeit import default_timer as clock
def isgeneratorfunction(object):
"""
Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
"""
CO_GENERATOR = 0x20
if (inspect.isfunction(object) or inspect.ismethod(object)) and \
object.func_code.co_flags & CO_GENERATOR:
return True
return False
def test(*paths, **kwargs):
"""
Runs the tests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.test()
Run one file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", "")
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
r = PyTestReporter(verbose, tb, colors)
t = SymPyTests(r, kw, post_mortem)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
return t.test()
def doctest(*paths, **kwargs):
"""
Runs the doctests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.doctest()
Run one file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
blacklist.extend([
"sympy/thirdparty/pyglet", # segfaults
"sympy/mpmath", # needs to be fixed upstream
"sympy/plotting", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/galgebra/GA.py", # needs numpy
"sympy/galgebra/latex_ex.py", # needs numpy
"sympy/conftest.py", # needs py.test
"sympy/utilities/benchmarking.py", # needs py.test
])
r = PyTestReporter(verbose)
t = SymPyDocTests(r, blacklist=blacklist)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
return t.test()
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = self.get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._tests = []
def add_paths(self, paths):
for path in paths:
path2 = os.path.join(self._root_dir, *path.split("/"))
if path2.endswith(".py"):
self._tests.append(path2)
else:
self._tests.extend(self.get_tests(path2))
def test(self):
"""
Runs the tests.
Returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._tests:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
name = "test%d" % self._count
name = os.path.splitext(os.path.basename(filename))[0]
self._count += 1
gl = {'__file__':filename}
try:
execfile(filename, gl)
except (ImportError, SyntaxError):
self._reporter.import_error(filename, sys.exc_info())
return
pytestfile = ""
if gl.has_key("XFAIL"):
pytestfile = inspect.getsourcefile(gl["XFAIL"])
disabled = gl.get("disabled", False)
if disabled:
funcs = []
else:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f])
or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i is not len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
self._reporter.entering_filename(filename, len(funcs))
for f in funcs:
self._reporter.entering_test(f)
try:
f()
except KeyboardInterrupt:
raise
except:
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip()
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_sympy_dir(self):
"""
Returns the root sympy directory.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return sympy_dir
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if self._kw == "":
return True
return x.__name__.find(self._kw) != -1
def get_paths(self, dir="", level=15):
"""
Generates a set of paths for testfiles searching.
Example:
>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py']
"""
wildcards = [dir]
for i in range(level):
wildcards.append(os.path.join(wildcards[-1], "*"))
p = [os.path.join(x, "test_*.py") for x in wildcards]
return p
def get_tests(self, dir):
"""
Returns the list of tests.
"""
g = []
for x in self.get_paths(dir):
g.extend(glob(x))
g = list(set(g))
g.sort()
return g
class SymPyDocTests(object):
def __init__(self, reporter, blacklist=[]):
self._count = 0
self._root_dir = self.get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._tests = []
self._blacklist = blacklist
def add_paths(self, paths):
for path in paths:
path2 = os.path.join(self._root_dir, *path.split("/"))
if path2.endswith(".py"):
self._tests.append(path2)
else:
self._tests.extend(self.get_tests(path2))
def test(self):
"""
Runs the tests.
Returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._tests:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
def setup_pprint():
from sympy import pprint_use_unicode
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
from sympy.interactive import init_printing
from sympy.printing import sstrrepr
init_printing(sstrrepr)
import doctest
import unittest
from StringIO import StringIO
rel_name = filename[len(self._root_dir)+1:]
module = rel_name.replace('/', '.')[:-3]
setup_pprint()
try:
module = doctest._normalize_module(module)
tests = doctest.DocTestFinder().find(module)
except:
self._reporter.import_error(filename, sys.exc_info())
return
tests.sort()
tests = [test for test in tests if len(test.examples) > 0]
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
runner = doctest.DocTestRunner()
old = sys.stdout
new = StringIO()
sys.stdout = new
try:
f, t = runner.run(test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_sympy_dir(self):
"""
Returns the root sympy directory.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return sympy_dir
def get_paths(self, dir="", level=15):
"""
Generates a set of paths for testfiles searching.
Example:
>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py']
"""
wildcards = [dir]
for i in range(level):
wildcards.append(os.path.join(wildcards[-1], "*"))
p = [os.path.join(x, "*.py") for x in wildcards]
return p
def is_on_blacklist(self, x):
"""
Returns True if "x" is on the blacklist. Otherwise False.
"""
for p in self._blacklist:
if x.find(p) != -1:
return True
return False
def get_tests(self, dir):
"""
Returns the list of tests.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs) and if "x" is not on self._blacklist.
"""
if self.is_on_blacklist(x):
return False
init_py = os.path.dirname(x) + os.path.sep + "__init__.py"
return os.path.exists(init_py)
g = []
for x in self.get_paths(dir):
g.extend(glob(x))
g = list(set(g))
g.sort()
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return g
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
def write(self, text, color="", align="left", width=80):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
color ... choose from the colors below, "" means default color
align ... left/right, left is a normal print, right is aligned on the
right hand side of the screen, filled with " " if necessary
width ... the screen width
"""
color_templates = (
("Black" , "0;30"),
("Red" , "0;31"),
("Green" , "0;32"),
("Brown" , "0;33"),
("Blue" , "0;34"),
("Purple" , "0;35"),
("Cyan" , "0;36"),
("LightGray" , "0;37"),
("DarkGray" , "1;30"),
("LightRed" , "1;31"),
("LightGreen" , "1;32"),
("Yellow" , "1;33"),
("LightBlue" , "1;34"),
("LightPurple" , "1;35"),
("LightCyan" , "1;36"),
("White" , "1;37"), )
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if align == "right":
if self._write_pos+len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width-self._write_pos-len(text)))
if not sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text)-l-1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = 80
if text != "":
text = " %s " % text
idx = (width-len(text)) // 2
t = delim*idx + text + delim*(width-idx-len(text))
self.write(t+"\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self):
self.write_center("test process starts")
executable = sys.executable
v = sys.version_info
python_version = "%s.%s.%s-%s-%s" % v
self.write("executable: %s (%s)\n\n" % (executable, python_version))
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
text = "tests finished: %d passed" % self._passed
if len(self._failed) > 0:
text += ", %d failed" % len(self._failed)
if len(self._failed_doctest) > 0:
text += ", %d failed" % len(self._failed_doctest)
if self._skipped > 0:
text += ", %d skipped" % self._skipped
if self._xfailed > 0:
text += ", %d xfailed" % self._xfailed
if len(self._xpassed) > 0:
text += ", %d xpassed" % len(self._xpassed)
if len(self._exceptions) > 0:
text += ", %d exceptions" % len(self._exceptions)
text += " in %.2f seconds" % (self._t_end - self._t_start)
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s:%s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
#self.write_center("These tests raised an exception", "_")
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
#self.write_center("Failed", "_")
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
#self.write_center("Failed", "_")
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir)+1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
if self._colors:
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n"+f.__name__+" ")
def test_xfail(self):
self._xfailed += 1
self.write("f")
def test_xpass(self, fname):
self._xpassed.append((self._active_file, fname))
self.write("X")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F")
self._active_file_error = True
def test_pass(self):
self._passed += 1
if self._verbose:
self.write("ok")
else:
self.write(".")
def test_skip(self):
self._skipped += 1
self.write("s")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir)+1:]
self.write(rel_name)
self.write("[?] Failed to import")
if self._colors:
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
| gnulinooks/sympy | sympy/utilities/runtests.py | Python | bsd-3-clause | 21,885 |
from djangobench.utils import run_benchmark
from query_latest.models import Book
def benchmark():
Book.objects.latest()
run_benchmark(
benchmark,
meta = {
'description': 'A simple Model.objects.latest() call.',
}
)
| alex/djangobench | djangobench/benchmarks/query_latest/benchmark.py | Python | bsd-3-clause | 241 |
#!/usr/bin/env python
"""Execute the tests for the razers2 program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for razers2'
print '==========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'core/apps/razers2/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'core/apps/razers2', 'razers2')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# Run Adeno Single-End Tests
# ============================================================
# We run the following for all read lengths we have reads for.
for rl in [36, 100]:
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1.stdout' % rl),
args=['--low-memory',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1.razers' % rl)],
to_diff=[(ph.inFile('se-adeno-reads%d_1.razers' % rl),
ph.outFile('se-adeno-reads%d_1.razers' % rl)),
(ph.inFile('se-adeno-reads%d_1.stdout' % rl),
ph.outFile('se-adeno-reads%d_1.stdout' % rl))])
conf_list.append(conf)
# Allow indels.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id.stdout' % rl),
args=['--low-memory', '-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id.razers' % rl)],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id.razers' % rl),
ph.outFile('se-adeno-reads%d_1-id.razers' % rl)),
(ph.inFile('se-adeno-reads%d_1-id.stdout' % rl),
ph.outFile('se-adeno-reads%d_1-id.stdout' % rl))])
conf_list.append(conf)
# Compute forward/reverse matches only.
for o in ['-r', '-f']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id%s.stdout' % (rl, o)),
args=['--low-memory', '-id', o,
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id%s.razers' % (rl, o))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id%s.razers' % (rl, o)),
ph.outFile('se-adeno-reads%d_1-id%s.razers' % (rl, o))),
(ph.inFile('se-adeno-reads%d_1-id%s.stdout' % (rl, o)),
ph.outFile('se-adeno-reads%d_1-id%s.stdout' % (rl, o)))])
conf_list.append(conf)
# Compute with different identity rates.
for i in range(90, 101):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id-i%d.stdout' % (rl, i)),
args=['--low-memory', '-id', '-i', str(i),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id-i%d.razers' % (rl, i))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id-i%d.razers' % (rl, i)),
ph.outFile('se-adeno-reads%d_1-id-i%d.razers' % (rl, i))),
(ph.inFile('se-adeno-reads%d_1-id-i%d.stdout' % (rl, i)),
ph.outFile('se-adeno-reads%d_1-id-i%d.stdout' % (rl, i)))])
conf_list.append(conf)
# Compute with different output formats.
for suffix in ['razers', 'fa', 'eland', 'gff', 'sam', 'afg']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id.%s.stdout' % (rl, suffix)),
args=['--low-memory', '-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id.%s' % (rl, suffix))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id.%s' % (rl, suffix)),
ph.outFile('se-adeno-reads%d_1-id.%s' % (rl, suffix))),
(ph.inFile('se-adeno-reads%d_1-id.%s.stdout' % (rl, suffix)),
ph.outFile('se-adeno-reads%d_1-id.%s.stdout' % (rl, suffix)))])
conf_list.append(conf)
# Compute with different sort orders.
for so in [0, 1]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id-so%d.stdout' % (rl, so)),
args=['--low-memory', '-id', '-so', str(so),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id-so%d.razers' % (rl, so))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id-so%d.razers' % (rl, so)),
ph.outFile('se-adeno-reads%d_1-id-so%d.razers' % (rl, so))),
(ph.inFile('se-adeno-reads%d_1-id-so%d.stdout' % (rl, so)),
ph.outFile('se-adeno-reads%d_1-id-so%d.stdout' % (rl, so)))])
conf_list.append(conf)
# ============================================================
# Run Adeno Paired-End Tests
# ============================================================
# We run the following for all read lengths we have reads for.
for rl in [36, 100]:
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2.stdout' % rl),
args=['--low-memory',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2.razers' % rl)],
to_diff=[(ph.inFile('pe-adeno-reads%d_2.razers' % rl),
ph.outFile('pe-adeno-reads%d_2.razers' % rl)),
(ph.inFile('pe-adeno-reads%d_2.stdout' % rl),
ph.outFile('pe-adeno-reads%d_2.stdout' % rl))])
conf_list.append(conf)
# Allow indels.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id.stdout' % rl),
args=['--low-memory', '-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id.razers' % rl)],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id.razers' % rl),
ph.outFile('pe-adeno-reads%d_2-id.razers' % rl)),
(ph.inFile('pe-adeno-reads%d_2-id.stdout' % rl),
ph.outFile('pe-adeno-reads%d_2-id.stdout' % rl))])
conf_list.append(conf)
# Compute forward/reverse matches only.
for o in ['-r', '-f']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id%s.stdout' % (rl, o)),
args=['--low-memory', '-id', o,
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id%s.razers' % (rl, o))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id%s.razers' % (rl, o)),
ph.outFile('pe-adeno-reads%d_2-id%s.razers' % (rl, o))),
(ph.inFile('pe-adeno-reads%d_2-id%s.stdout' % (rl, o)),
ph.outFile('pe-adeno-reads%d_2-id%s.stdout' % (rl, o)))])
conf_list.append(conf)
# Compute with different identity rates.
for i in range(90, 101):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id-i%d.stdout' % (rl, i)),
args=['--low-memory', '-id', '-i', str(i),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id-i%d.razers' % (rl, i))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id-i%d.razers' % (rl, i)),
ph.outFile('pe-adeno-reads%d_2-id-i%d.razers' % (rl, i))),
(ph.inFile('pe-adeno-reads%d_2-id-i%d.stdout' % (rl, i)),
ph.outFile('pe-adeno-reads%d_2-id-i%d.stdout' % (rl, i)))])
conf_list.append(conf)
# Compute with different output formats.
for suffix in ['razers', 'fa', 'eland', 'gff', 'sam', 'afg']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id.%s.stdout' % (rl, suffix)),
args=['--low-memory', '-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id.%s' % (rl, suffix))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id.%s' % (rl, suffix)),
ph.outFile('pe-adeno-reads%d_2-id.%s' % (rl, suffix))),
(ph.inFile('pe-adeno-reads%d_2-id.%s.stdout' % (rl, suffix)),
ph.outFile('pe-adeno-reads%d_2-id.%s.stdout' % (rl, suffix)))])
conf_list.append(conf)
# Compute with different sort orders.
for so in [0, 1]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id-so%d.stdout' % (rl, so)),
args=['--low-memory', '-id', '-so', str(so),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id-so%d.razers' % (rl, so))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id-so%d.razers' % (rl, so)),
ph.outFile('pe-adeno-reads%d_2-id-so%d.razers' % (rl, so))),
(ph.inFile('pe-adeno-reads%d_2-id-so%d.stdout' % (rl, so)),
ph.outFile('pe-adeno-reads%d_2-id-so%d.stdout' % (rl, so)))])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['razers2'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| h-2/seqan | core/apps/razers2/tests/run_tests.py | Python | bsd-3-clause | 12,963 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010, 2degrees Limited <egoddard@tech.2degreesnetwork.com>.
# All Rights Reserved.
#
# This file is part of djangoaudit <https://launchpad.net/django-audit/>,
# which is subject to the provisions of the BSD at
# <http://dev.2degreesnetwork.com/p/2degrees-license.html>. A copy of the
# license should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS"
# AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST
# INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for djangoaudit"""
from datetime import datetime, timedelta, date
from decimal import Decimal
import os
# Have to set this here to ensure this is Django-like
os.environ['DJANGO_SETTINGS_MODULE'] = "tests.fixtures.sampledjango.settings"
from django.conf import settings
from django.db.models import Sum
from nose.tools import (eq_, ok_, assert_false, assert_not_equal, assert_raises,
raises)
from pymongo.errors import PyMongoError
from fixture.django_testcase import FixtureTestCase
#from mongofixture import MongoFixtureTestCase
from djangoaudit.models import (_coerce_data_to_model_types, _audit_model,
_coerce_to_bson_compatible, AuditedModel)
from djangoaudit.connection import MONGO_CONNECTION
from tests.fixtures.sampledjango.bsg.models import *
from tests.fixtures.sampledjango.bsg.fixtures import *
class TestEnsureBSONCompatible(object):
"""Test for :func:`_coerce_to_bson_compatible`"""
def test_decimal_to_float(self):
"""Ensure that :class:`Decimal` is converted to :class:`float`"""
got = _coerce_to_bson_compatible(Decimal('1234.5678'))
expected = 1234.5678
eq_(got, expected,
"Expected %r, got %r for Decimal to float conversion" %
(expected, got))
def test_date_to_datetime(self):
"""Ensure that :class:`date` is converted to :class:`datetime`"""
got = _coerce_to_bson_compatible(date(2001, 9, 11))
expected = datetime(2001, 9, 11)
eq_(got, expected,
"Expected %r, got %r for date to datetime conversion" %
(expected, got))
class MockModelMeta(object):
""" Mock of :class:`django.db.options.Options` """
def __init__(self, app_label, model_name):
self.app_label = app_label
self.object_name = model_name
class MockModel(object):
""" Mock of :class:`django.db.models.base.Model` """
def __init__(self, app_label, model_name, pk):
self._meta = MockModelMeta(app_label, model_name)
self.pk = pk
class TestAuditModel(object):
""" Tests for :func:`djangoaudit.models.audit_model` """
def setup(self):
self.audit_collection_name = "audit_data"
self.auditing_collection = MONGO_CONNECTION\
.get_collection(self.audit_collection_name)
self.profile = MockModel("profiles", "Profile", 123)
def fetch_record_by_id(self, id):
return self.auditing_collection.find_one({"_id":id})
def test_no_changes_empty_dicts(self):
"""Check that passing two empty value dicts results in a no-op"""
result = _audit_model(self.profile, {}, {})
eq_(result, None, "No changes should not result in anything being "
"written to the database")
def test_no_changes_same_values(self):
"""Check that passing two identical dicts results in a no-op"""
result = _audit_model(self.profile,
{'foo': 1, 'bar': 'wibble', 'empty': None,
'my_date': datetime(2001, 1, 1, 9, 12)},
{'foo': 1, 'bar': 'wibble', 'empty': None,
'my_date': datetime(2001, 1, 1, 9, 12)})
eq_(result, None, "No changes should not result in anything being "
"written to the database")
def test_single_change_no_other_diff(self):
"""Check that a single changed value is correctly recorded"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['foo'], 'bar',
"The saved record should contain a single difference key")
def test_model_data_write_out(self):
"""Check the correct data is written out for the model"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['object_app'], self.profile._meta.app_label)
eq_(saved_record['object_model'], self.profile._meta.object_name)
eq_(saved_record['object_pk'], self.profile.pk)
def test_date_stamping(self):
"""Check that a date stamp is stored in along with the record"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
record_date_stamp = saved_record['audit_date_stamp']
now = datetime.utcnow()
ok_((now - timedelta(seconds=1)) < record_date_stamp < now,
"Date stamp should be almost the same as now (now: %s, got: %s"
% (now, record_date_stamp))
def test_addition_parameter_write_out(self):
"""Check that additional parameters are correctly stored"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
def test_single_change_others_same(self):
"""Check that a single changed value is correctly recorded when there are no other differences"""
result = _audit_model(self.profile, dict(foo=None, wibble=0),
dict(foo='bar', wibble=0))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['foo'], 'bar',
"The saved record should contain a single difference key")
ok_('wibble' not in saved_record, "There should be no "
"record of changes to the `wibble` key")
def test_multi_change_no_others(self):
"""Check that multiple changed values are correctly recorded when there are no other items"""
result = _audit_model(self.profile, dict(foo=None, wibble=0),
dict(foo='bar', wibble=1))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['foo'], 'bar',
"The saved record should contain a difference for key `foo`")
eq_(saved_record['wibble'], 1,
"The saved record should contain a difference for key `wibble`")
def test_multi_change_others_same(self):
"""Check that multiple changed values are correctly recorded when there are no other differences"""
result = _audit_model(self.profile, dict(foo=None, wibble=0, body_count=1.00),
dict(foo='bar', wibble=1, body_count=1.00))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['foo'], 'bar',
"The saved record should contain a difference for key `foo`")
eq_(saved_record['wibble'], 1,
"The saved record should contain a difference for key `wibble`")
ok_('body_count' not in saved_record, "There should be no "
"record of changes to the `body_count` key")
class TestCoerceDataToModelTypes(object):
"""Tests for :func:`_coerce_data_to_model_types`"""
def setup(self):
checks = (
('age', '40', 40),
('last_flight', date(2010, 1, 1), datetime(2010, 1, 1)),
('fastest_landing',71.10, Decimal("71.10")),
('is_cylon', 0, False),
)
self.initial_data, self.final_data = {}, {}
for key, initial, final in checks:
self.initial_data[key] = initial
self.final_data[key] = final
def test_for_instance(self):
"""Test _coerce_data_to_model_types for model instances"""
pilot = Pilot()
result = _coerce_data_to_model_types(pilot, self.initial_data)
eq_(result, self.final_data,
"Expected to get: %r, got %r" % (result, self.final_data))
def test_for_class(self):
"""Test _coerce_data_to_model_types for the model itself"""
result = _coerce_data_to_model_types(Pilot, self.initial_data)
eq_(result, self.final_data,
"Expected to get: %r, got %r" % (result, self.final_data))
class TestAuditedModel(FixtureTestCase):
"""Tests for AuditedModel"""
datasets = [PilotData, VesselData]
def setUp(self):
self.audit_collection_name = "audit_data"
self.auditing_collection = MONGO_CONNECTION\
.get_collection(self.audit_collection_name)
# Now set up the records:
self.helo = Pilot.objects.filter(call_sign="Helo")[0] # wtf - no idea why fixture seems to be putting two of these in the DB
self.athena = Pilot.objects.get(call_sign="Athena")
self.starbuck = Pilot.objects.get(call_sign="Starbuck")
self.apollo = Pilot.objects.get(call_sign="Apollo")
self.longshot = Pilot.objects.get(call_sign="Longshot")
self.raptor259 = Vessel.objects.get(name=VesselData.Raptor259.name)
@raises(AttributeError)
def test_meta_class(self):
"""Check that any values specified in log_fields which are no fields on the AuditedModel class cause an AttributeError to be raised"""
class NaughtyAuditedModel(AuditedModel):
log_fields = ['foo', 'bar', 'wibble']
def test_no_changes_no_extra(self):
"""Check that when there are no changes to a AuditedModel instance, no changes are recorded"""
# Set up the operator and some notes:
self.helo.set_audit_info(operator='me',
notes='This should not be recorded')
# Save a model with no changes:
self.helo.save()
# Now read back the log to see whether anything was put in there:
num_log_items = len(list(self.helo.get_audit_log()))
eq_(num_log_items, 1, "There should be only be one log entry for this "
"object - the creation log (found %d log entries)." % num_log_items)
def test_change_non_logger_field(self):
"""Check that altering non-logged fields doesn't result in a log entry being generated"""
self.helo.craft = 0
# Set up the operator and some notes:
self.helo.set_audit_info(operator='me',
notes='This should not be recorded')
self.helo.save()
# Now read back the log to see whether anything was put in there:
num_log_items = len(list(self.helo.get_audit_log()))
eq_(num_log_items, 1, "There should be one log entry for this object - "
"the creation log (found %d log entries)." % num_log_items)
def test_create_fresh_record(self):
"""Check that creation of a record logs all the fields correctly"""
self.athena.delete()
params = dict(first_name="Sharon",
last_name="Agathon",
call_sign="Athena",
age=29,
last_flight=datetime(2000, 3, 4, 7, 18),
craft=1,
is_cylon=True,
fastest_landing=Decimal("77.90"))
new_athena = Pilot(**params)
new_athena.save()
log = list(new_athena.get_audit_log())
# Check we've only got one log entry:
eq_(len(log), 1, "There should only be one entry for this object (found"
" %d)" % len(log))
entry = log[0]
# Now verify that we've only got the correct keys in the log, once we've
# popped off the extra ones:
object_app = entry.pop('object_app')
object_model = entry.pop('object_model')
object_pk = entry.pop('object_pk')
id = entry.pop('_id')
audit_date_stamp = entry.pop('audit_date_stamp')
eq_(object_app, "bsg",
"object_app should be 'bsg', got %r" % object_app)
eq_(object_model, "Pilot",
"object_model should be 'Pilot', got %r" % object_model)
eq_(object_pk, new_athena.pk, "object_pk should be %r, got %r" %
(new_athena.pk, object_pk))
# Our resulting entry should have only the audit_changes key as there is
# only audited_data remaining:
expected_keys = set(('audit_changes',))#set(new_athena.log_fields)
found_keys = set(entry.keys())
eq_(expected_keys, found_keys, "Mismatch between expected fields in the"
" log. Expected %r, got %r" % (expected_keys, found_keys))
# Now verify that what's on the new model is what was logged:
for key, value in entry['audit_changes'].iteritems():
expected = (None, getattr(new_athena, key))
eq_(value, expected, "Expected to find %r with value: %r, got %r" %
(key, expected, value))
def test_partial_update(self):
"""Check that partial data updates are recorded correctly"""
orig_name = self.longshot.last_name
self.longshot.last_name = "New name"
orig_age = self.longshot.age
self.longshot.age = 30
orig_fastest_landing = self.longshot.fastest_landing
self.longshot.fastest_landing = Decimal("75.00")
# Ensure we've got some operator testing too:
operator, notes = "me", "This record should be updated"
self.longshot.set_audit_info(operator=operator,notes=notes)
# Now do the save:
self.longshot.save()
# Read back the log:
log = list(self.longshot.get_audit_log())
eq_(len(log), 2, "There should only be two entires for this object ("
"found %d)" % len(log))
entry = log[-1]
# Now verify that we've only got the correct keys in the log, once we've
# popped off the extra ones:
object_app = entry.pop('object_app')
object_model = entry.pop('object_model')
object_pk = entry.pop('object_pk')
id = entry.pop('_id')
audit_date_stamp = entry.pop('audit_date_stamp')
audit_operator = entry.pop('audit_operator')
audit_notes = entry.pop('audit_notes')
eq_(object_app, "bsg",
"object_app should be 'bsg', got %r" % object_app)
eq_(object_model, "Pilot",
"object_model should be 'Pilot', got %r" % object_model)
eq_(object_pk, self.longshot.pk, "object_pk should be %r, got %r" %
(self.longshot.pk, object_pk))
eq_(audit_operator, operator,
"operator should be %r, got %r" % (operator, audit_operator))
eq_(audit_notes, notes,
"notes should be %r, got %r" % (notes, audit_notes))
# Check we've only got one key left (audit_changes):
expected_keys = ['audit_changes']
found_keys = entry.keys()
eq_(expected_keys, found_keys, "Expected to find keys: %r, gor %r" %
(expected_keys, found_keys))
# Ensure that the new values were correctly recorded:
changes= entry['audit_changes']
eq_(changes['last_name'], (orig_name, self.longshot.last_name))
eq_(changes['age'], (orig_age, self.longshot.age))
eq_(changes['fastest_landing'], (orig_fastest_landing,
self.longshot.fastest_landing))
def test_dual_update(self):
"""Test that two log entries are generated for dual updates"""
self.apollo.age = 40
self.apollo.save()
self.apollo.age = 30
self.apollo.save()
log = list(self.apollo.get_audit_log())
eq_(len(log), 3, "There should be three entries in the log, got %d" %
len(log))
expected_ages = [(28, 40), (40, 30)]
for entry, age in zip(log[1:], expected_ages):
eq_(entry['audit_changes']['age'], age,
"Expected age to be %r, got %r" % (entry['audit_changes']['age'], age))
def test_delete(self):
"""Check that delete() records the final state of the model prior to deletion"""
# Define the lookup key we'll need parameters to look up the record:
pk = self.starbuck.pk
self.starbuck.delete()
# Delete another to make sure we don't get log cross-over:
apollo_pk = self.apollo.pk
self.apollo.set_audit_info(notes="Extra note")
self.apollo.delete()
# Get hold of the delete log:
log = list(Pilot.get_deleted_log(pk))
# Make sure there's only one entry:
eq_(len(log), 1,
"There should only be one deleted item for this pk (found %d)" %
len(log))
entry = log[0]
for field in Pilot.log_fields:
expected = getattr(PilotData.Starbuck, field)
found = entry[field]
eq_(expected, found,
"For field %r, expected %r, got %r" % (field, expected, found))
delete_note = "Object deleted. These are the attributes at delete time."
eq_(entry['audit_notes'], delete_note,
"Expected to find notes as: %r, got %r" %
(delete_note, entry['audit_notes']))
# Get hold of the delete log for apollo to check the delete note:
entry = list(Pilot.get_deleted_log(apollo_pk))[0]
got = entry['audit_notes']
expected = "%s\nExtra note" % delete_note
eq_(expected, got, "Expected note: %r, got %r" % (expected, got))
# Since we've deleted two items we can check that we've got the log for
# both of these:
log = list(Pilot.get_deleted_log())
eq_(len(log), 2,
"There should be two deleted log entries for this class (found %d)"
% len(log))
def test_arbitrary_audit(self):
"""Test the arbitrary auditing of data against a model"""
data = dict(hair_colour="Blond",
children=0,
kill_percentage=Decimal('98.7'))
self.starbuck.set_audit_info(**data)
self.starbuck.save()
log = list(self.starbuck.get_audit_log())
eq_(len(log), 2,
"There should only be two entries in the log (found %d)" % len(log))
entry = log[-1]
object_app = entry.pop('object_app')
object_model = entry.pop('object_model')
object_pk = entry.pop('object_pk')
id = entry.pop('_id')
audit_date_stamp = entry.pop('audit_date_stamp')
eq_(object_app, "bsg",
"object_app should be 'bsg', got %r" % object_app)
eq_(object_model, "Pilot",
"object_model should be 'Pilot', got %r" % object_model)
eq_(object_pk, self.starbuck.pk, "object_pk should be %r, got %r" %
(self.starbuck.pk, object_pk))
# Mongo stores Decimals as floats, so coerce what we expect:
data['kill_percentage'] = float(data['kill_percentage'])
eq_(entry, data, "Expecting %r, got %r" % (data, entry))
def test_foreign_keys(self):
"""Test the foreign keyed fields don't interfere with AuditedModel"""
# Due to a call in the metaclass of AuditedModel, the
# _meta.get_all_field_names does not behave correctly unless the cache
# is cleared after this call. Aggregation is one area where this
# manifests itself - here we're ensuring this doesn't fail:
field_names = Pilot._meta.get_all_field_names()
ok_("vessels" in field_names,
"The field names for the Pilot model should contain 'vessels', got "
"%s" % field_names)
# Now verify in aggregation this works:
vessel_sum = Pilot.objects.aggregate(Sum('vessels'))['vessels__sum']
eq_(vessel_sum, 1, "There should only be one vessel, got %r"
% vessel_sum)
def test_get_creation_log(self):
"""Test that the creation log can be retrieved correctly"""
# Create a new object:
hot_dog = Pilot(
first_name="Brendan",
last_name="Costanza",
call_sign="Hot Dog",
age=25,
last_flight=datetime(2000, 6, 4, 23, 01),
craft=1,
is_cylon=False,
fastest_landing=Decimal("101.67")
)
hot_dog.set_audit_info(operator="Admin",
flight_deck="Port side")
hot_dog.save()
# Retrieve the log as a check:
initial_log = hot_dog.get_creation_log()
# Make another entry:
hot_dog.fastest_landing = Decimal("99.98")
hot_dog.save()
# Check we've got two items in the log now:
found_logs = len(list(hot_dog.get_audit_log()))
eq_(2, found_logs, "Expected to find 2 logs, got %d" % found_logs)
# Now check the creation log:
creation_log = hot_dog.get_creation_log()
eq_(creation_log, initial_log, "Expecting initial log entry to be the "
"same as the creation log. Expected:\n%r,\n\ngot\n%r" %
(initial_log, creation_log))
# Test that fail gracefully when no creation log exists:
for item in hot_dog.get_audit_log():
self.auditing_collection.remove(item['_id'])
empty_log = hot_dog.get_creation_log()
eq_(empty_log, None, "The creation log should be None")
def test_get_deletion_log(self):
"""Test that deleted data can be retrieved"""
pre_delete_data = {}
for field in self.apollo.log_fields:
pre_delete_data[field] = getattr(self.apollo, field)
pk = self.apollo.pk
self.apollo.delete()
# Get the deletion log:
entry = list(Pilot.get_deleted_log(pk))[0]
object_app = entry.pop('object_app')
object_model = entry.pop('object_model')
object_pk = entry.pop('object_pk')
id = entry.pop('_id')
audit_date_stamp = entry.pop('audit_date_stamp')
audit_is_delete = entry.pop('audit_is_delete')
audit_notes = entry.pop('audit_notes')
ok_(audit_is_delete, "Should have audit_is_delete is True")
eq_(audit_notes,
'Object deleted. These are the attributes at delete time.')
eq_(pre_delete_data, entry,
"Expected to find deletion log as: %r, got %r" %
(pre_delete_data, entry)) | rdkls/django-audit-mongodb | tests/test_models.py | Python | bsd-3-clause | 25,093 |
"""
A framework for data processing and data preparation DAG (directed acyclic graph) pipelines.
The examples in the documentation assume
>>> from __future__ import print_function
if running pre Py3K, as well as
>>> from dagpype import *
"""
import types
from . import _core
from . import _src
from . import _filt
from . import _snk
from . import _subgroup_filt
try:
from ._core import *
from ._src import *
from ._filt import *
from ._snk import *
from ._subgroup_filt import *
from ._csv_utils import *
except ValueError:
from _core import *
from _src import *
from _filt import *
from _snk import *
from _subgroup_filt import *
from _csv_utils import *
from . import np
from . import plot
__all__ = []
for m in [_core, _src, _filt, _snk, _subgroup_filt]:
for s in dir(m):
if s[0] == '_':
continue
if eval('not isinstance(m.%s, types.ModuleType)' % s):
__all__.append(s)
__all__.extend(['np', 'plot'])
__version__ = '0.1.0.3'
__author__ = 'Ami Tavory <atavory at gmail.com>'
| garywu/pypedream | pypedream/__init__.py | Python | bsd-3-clause | 1,083 |
def plot_kinematics(signal, background, nbins=100,
mass_range=(50., 110.), pt_range=(200., 500.),
mass_pad=10, pt_pad=50,
linewidth=1, title=None):
import numpy as np
from matplotlib import pyplot as plt
import h5py
pt_min, pt_max = pt_range
mass_min, mass_max = mass_range
plt.style.use('seaborn-white')
signal_h5file_events = h5py.File(signal, 'r')
signal_aux = signal_h5file_events['auxvars']
background_h5file_events = h5py.File(background, 'r')
background_aux = background_h5file_events['auxvars']
signal_selection = ((signal_aux['mass_trimmed'] > mass_min) &
(signal_aux['mass_trimmed'] < mass_max) &
(signal_aux['pt_trimmed'] > pt_min) &
(signal_aux['pt_trimmed'] < pt_max))
background_selection = ((background_aux['mass_trimmed'] > mass_min) &
(background_aux['mass_trimmed'] < mass_max) &
(background_aux['pt_trimmed'] > pt_min) &
(background_aux['pt_trimmed'] < pt_max))
#signal_selection = slice(0, None)
#background_selection = slice(0, None)
if 'weights' in signal_aux.dtype.names:
signal_weights = signal_aux['weights']
else:
signal_weights = np.ones(len(signal_aux))
if 'weights' in background_aux.dtype.names:
background_weights = background_aux['weights']
else:
background_weights = np.ones(len(background_aux))
signal_weights = signal_weights[signal_selection]
background_weights = background_weights[background_selection]
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
if title is not None:
fig.suptitle(title, fontsize=16)
vals1, _, _ = ax[0, 0].hist(signal_aux['pt_trimmed'][signal_selection],
bins=np.linspace(pt_min - pt_pad, pt_max + pt_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'Signal', weights=signal_weights)
vals2, _, _ = ax[0, 0].hist(background_aux['pt_trimmed'][background_selection],
bins=np.linspace(pt_min - pt_pad, pt_max + pt_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD Background', weights=background_weights)
ax[0, 0].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[0, 0].set_ylabel('Normalized to Unity')
ax[0, 0].set_xlabel(r'Trimmed $p_{T}$ [GeV]', fontsize=12)
p1, = ax[0, 0].plot([0, 0], label='Signal', color='blue')
p2, = ax[0, 0].plot([0, 0], label='QCD Background', color='black', linestyle='dotted')
ax[0, 0].legend([p1, p2], ['Signal', 'QCD Background'], frameon=False, handlelength=3)
ax[0, 0].set_xlim((pt_min - pt_pad, pt_max + pt_pad))
ax[0, 0].ticklabel_format(style='sci', scilimits=(0,0), axis='y')
vals1, _, _ = ax[0, 1].hist(signal_aux['mass_trimmed'][signal_selection],
bins=np.linspace(mass_min - mass_pad, mass_max + mass_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'Signal', weights=signal_weights)
vals2, _, _ = ax[0, 1].hist(background_aux['mass_trimmed'][background_selection],
bins=np.linspace(mass_min - mass_pad, mass_max + mass_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD Background', weights=background_weights)
ax[0, 1].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[0, 1].set_ylabel('Normalized to Unity')
ax[0, 1].set_xlabel(r'Trimmed Mass [GeV]', fontsize=12)
p1, = ax[0, 1].plot([0, 0], label='Signal', color='blue')
p2, = ax[0, 1].plot([0, 0], label='QCD Background', color='black', linestyle='dotted')
ax[0, 1].legend([p1, p2], ['Signal', 'QCD Background'], frameon=False, handlelength=3)
ax[0, 1].set_xlim((mass_min - mass_pad, mass_max + mass_pad))
signal_tau32 = np.true_divide(signal_aux['tau_3'], signal_aux['tau_2'])[signal_selection]
background_tau32 = np.true_divide(background_aux['tau_3'], background_aux['tau_2'])[background_selection]
# remove NaN infinity and zero
signal_tau32_nonan = ~np.isnan(signal_tau32) & ~np.isinf(signal_tau32) & (signal_tau32 != 0)
background_tau32_nonan = ~np.isnan(background_tau32) & ~np.isinf(background_tau32) & (background_tau32 != 0)
vals1, _, _ = ax[1, 0].hist(signal_tau32[signal_tau32_nonan],
bins=np.linspace(0, 1, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'W jets', weights=signal_weights[signal_tau32_nonan])
vals2, _, _ = ax[1, 0].hist(background_tau32[background_tau32_nonan],
bins=np.linspace(0, 1, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD jets', weights=background_weights[background_tau32_nonan])
ax[1, 0].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[1, 0].set_ylabel('Normalized to Unity')
ax[1, 0].set_xlabel(r'$\tau_{32}$', fontsize=12)
p1, = ax[1, 0].plot([0, 0], label='W jets', color='blue')
p2, = ax[1, 0].plot([0, 0], label='QCD jets', color='black', linestyle='dotted')
ax[1, 0].legend([p1, p2], ['W jets', 'QCD jets'], frameon=False, handlelength=3)
ax[1, 0].set_xlim((0, 1))
vals1, _, _ = ax[1, 1].hist(signal_aux['subjet_dr'][signal_selection],
bins=np.linspace(0, 1.2, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'W jets', weights=signal_weights)
vals2, _, _ = ax[1, 1].hist(background_aux['subjet_dr'][background_selection],
bins=np.linspace(0, 1.2, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD jets', weights=background_weights)
ax[1, 1].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[1, 1].set_ylabel('Normalized to Unity')
ax[1, 1].set_xlabel(r'Subjets $\Delta R$', fontsize=12)
p1, = ax[1, 1].plot([0, 0], label='W jets', color='blue')
p2, = ax[1, 1].plot([0, 0], label='QCD jets', color='black', linestyle='dotted')
ax[1, 1].legend([p1, p2], ['W jets', 'QCD jets'], frameon=False, handlelength=3)
ax[1, 1].set_xlim((0, 1.2))
fig.tight_layout()
if title is not None:
plt.subplots_adjust(top=0.93)
return fig
| deepjets/deepjets | etc/plotting_NR.py | Python | bsd-3-clause | 6,730 |
from datetime import date
from django import forms
from django.utils.translation import ugettext_lazy as _lazy
class AnnouncementForm(forms.Form):
"""Form for collecting information about an announcement.
This is not a ModelForm, and does not include the group or locale fields,
because it should only be used in a context where the group or locale is
implicit, and should not be user controllable. If you need a user
controllable locale or group, use the admin interface.
"""
content = forms.CharField(label=_lazy("Content"), max_length=10000, widget=forms.Textarea)
show_after = forms.DateField(
label=_lazy("Show after"), initial=date.today, input_formats=["%Y-%m-%d"]
)
show_until = forms.DateField(
label=_lazy("Show until"), required=False, input_formats=["%Y-%m-%d"]
)
| mozilla/kitsune | kitsune/announcements/forms.py | Python | bsd-3-clause | 842 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# @file ostap/math/random_ext.py
# The simple extention for the standard python module random
# @author Vanya BELYAEV
# @date 2012-04-28
# =============================================================================
""" The simple extension for the standard python module random
- bifurcated gaussian
- gaussian using Ostap.Math.ValueWithError as argument
- poisson (missing in python random module)
"""
# =============================================================================
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__version__ = "$Revision$"
__date__ = "2012-04-28"
# =============================================================================
__all__ = (
'bifur' , ## bifurcated gaussian
've_gauss' , ## gaussian using ValueWithError construction
'poisson' , ## poisson (missing in python random module)
)
# =============================================================================
import sys
from builtins import range
# =============================================================================
from ostap.logger.logger import getLogger
# =============================================================================
if '__main__' == __name__ : logger = getLogger ( 'ostap.math.random_ext')
else : logger = getLogger ( __name__ )
# =============================================================================
## generate bifurcated gaussian
# @code
# value = bifur ( 0 , -1 , +2 )
# @endcode
def _bifur_ ( self , mu , sigma1 , sigma2 ) :
"""Generate the bifurcated gaussian
>>> value = bifur ( 0 , -1 , +2 )
"""
if sigma1 * sigma2 > 0.0 :
raise ValueError( 'Lower and upper errors must have opposite signs' )
_as1 = abs ( float ( sigma1 ) )
_as2 = abs ( float ( sigma2 ) )
_frac = _as1 / ( _as1 + _as2 )
_aux = self.random ()
_gau = abs ( self.gauss ( 0 , 1 ) )
if _aux <= _frac : return mu + sigma1 * _gau
else : return mu + sigma2 * _gau
# ==============================================================================
_fmin = 1000 * sys.float_info.min
# =============================================================================
## generate Cauchy random numbers
# - rely on the distribution of the ratio for two Gaussian variables
# @see https://en.wikipedia.org/wiki/Cauchy_distribution
def _cauchy_ ( self , mu , gamma ) :
"""Generate Cauchy random numbers
- rely on the distribution of the ratio for two Gaussian variables
- see https://en.wikipedia.org/wiki/Cauchy_distribution
"""
g1 = self.gauss ( 0.0 , 1.0 )
while abs ( g1 ) < _fmin : g1 = self.gauss ( 0.0 , 1.0 )
g2 = self.gauss ( 0.0 , 1.0 )
return 1.0 * mu + ( 1.0 * g2 / g1 ) * gamma
# =============================================================================
## generate bifurcated gaussian using Value
# @see Ostap::Math::ValueWithError
def _ve_gauss_ ( self , val ) :
"""Generate the gaussian according to Ostap.Math.ValueWithError
>>> ve = VE ( 1 , 2 )
>>> value = ve_gauss ( ve )
"""
mean = val.value ()
sigma = val.error ()
return self.gauss ( mean , sigma )
# =============================================================================
_poisson = None
if not _poisson :
try :
from numpy.random import poisson as _poisson
def _poisson_ ( self , mu ) : return _poisson ( mu )
logger.debug ('use numpy.random.poisson')
except ImportError :
pass
if not _poisson :
try :
from scipy.random import poisson as _poisson
def _poisson_ ( self , mu ) : return _poisson ( mu )
logger.debug ('use scipy.random.poisson')
except ImportError :
pass
if not _poisson :
logger.dbug ('Use home-made replacement for poisson')
_MAX = 30.0
import math
_sqrt = math.sqrt
_exp = math.exp
import ROOT,cppyy
_round = cppyy.gbl.Ostap.Math.round
## hand-made replacement for poisson random number generator
def _poisson_ ( self , mu ) :
mu = float ( mu )
if _MAX <= mu :
r = -1
while r < 0 : r = self.gauss ( mu , _sqrt( mu ) )
return max ( _round ( r ) , 0 )
x = 0
p = _exp ( -mu )
s = p
u = self.uniform ( 0 , 1 )
while s < u :
x += 1
p *= mu / x
s += p
return x
import random
if not hasattr ( random.Random , 'bifur' ) : random.Random.bifur = _bifur_
if not hasattr ( random , 'bifur' ) : random.bifur = random._inst.bifur
if not hasattr ( random.Random , 've_gauss' ) : random.Random.ve_gauss = _ve_gauss_
if not hasattr ( random , 've_gauss' ) : random.ve_gauss = random._inst.ve_gauss
if not hasattr ( random.Random , 'poisson' ) : random.Random.poisson = _poisson_
if not hasattr ( random , 'poisson' ) : random.poisson = random._inst.poisson
if not hasattr ( random.Random , 'cauchy' ) : random.Random.cauchy = _cauchy_
if not hasattr ( random , 'cauchy' ) : random.cauchy = random._inst.cauchy
bifur = random.bifur
ve_gauss = random.ve_gauss
poisson = random.poisson
cauchy = random.cauchy
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
from ostap.stats.counters import SE
cnt = SE()
mu = 0.4
for i in range(10000) :
cnt += poisson(0.4)
logger.info ( 'Poisson(mu=%s) : %s' % ( mu , cnt ) )
logger.info ( 80*'*' )
# =============================================================================
# The END
# =============================================================================
| OstapHEP/ostap | ostap/math/random_ext.py | Python | bsd-3-clause | 6,045 |
from AutoNetkit.examples.examples import *
import AutoNetkit.examples.examples
| sk2/ank_le | AutoNetkit/examples/__init__.py | Python | bsd-3-clause | 80 |
import bleach
from django.db.models.fields import TextField
from django.utils.encoding import smart_text
from .widgets import RichTextareaWidget
class RichTextarea(TextField):
"""
"""
def to_python(self, value):
"""
"""
if value:
html = value.replace(' ', ' ')
html = smart_text(html.encode('utf-8'))
ALLOWED_TAGS = [
'p',
'br',
'i',
'strong',
'b',
'ul',
'li',
'ol',
'table',
'tr',
'th',
'td',
]
ALLOWED_ATTRIBUTES = {
}
html = bleach.clean(html, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True)
return html
else:
return value
def formfield(self, **kwargs):
kwargs['widget'] = RichTextareaWidget
return super(RichTextarea, self).formfield(**kwargs) | Merino/poc-cbb | vesper/fields.py | Python | bsd-3-clause | 1,042 |
import collections.abc
import tempfile
import sys
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.compat import pickle
import pathlib
import builtins
from decimal import Decimal
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, IS_PYSTON, HAS_REFCOUNT, assert_array_less,
runstring, temppath, suppress_warnings, break_cycles,
)
from numpy.testing._private.utils import _no_tracing
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags:
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_any_base(self):
# Ensure that any base being writeable is sufficient to change flag;
# this is especially interesting for arrays from an array interface.
arr = np.arange(10)
class subclass(np.ndarray):
pass
# Create subclass so base will not be collapsed, this is OK to change
view1 = arr.view(subclass)
view2 = view1[...]
arr.flags.writeable = False
view2.flags.writeable = False
view2.flags.writeable = True # Can be set to True again.
arr = np.arange(10)
class frominterface:
def __init__(self, arr):
self.arr = arr
self.__array_interface__ = arr.__array_interface__
view1 = np.asarray(frominterface)
view2 = view1[...]
view2.flags.writeable = False
view2.flags.writeable = True
view1.flags.writeable = False
view2.flags.writeable = False
with assert_raises(ValueError):
# Must assume not writeable, since only base is not:
view2.flags.writeable = True
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_writeable_from_c_data(self):
# Test that the writeable flag can be changed for an array wrapping
# low level C-data, but not owning its data.
# Also see that this is deprecated to change from python.
from numpy.core._multiarray_tests import get_c_wrapping_array
arr_writeable = get_c_wrapping_array(True)
assert not arr_writeable.flags.owndata
assert arr_writeable.flags.writeable
view = arr_writeable[...]
# Toggling the writeable flag works on the view:
view.flags.writeable = False
assert not view.flags.writeable
view.flags.writeable = True
assert view.flags.writeable
# Flag can be unset on the arr_writeable:
arr_writeable.flags.writeable = False
arr_readonly = get_c_wrapping_array(False)
assert not arr_readonly.flags.owndata
assert not arr_readonly.flags.writeable
for arr in [arr_writeable, arr_readonly]:
view = arr[...]
view.flags.writeable = False # make sure it is readonly
arr.flags.writeable = False
assert not arr.flags.writeable
with assert_raises(ValueError):
view.flags.writeable = True
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
with assert_raises(DeprecationWarning):
arr.flags.writeable = True
with assert_warns(DeprecationWarning):
arr.flags.writeable = True
def test_warnonwrite(self):
a = np.arange(10)
a.flags._warn_on_write = True
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always')
a[1] = 10
a[2] = 10
# only warn once
assert_(len(w) == 1)
@pytest.mark.parametrize(["flag", "flag_value", "writeable"],
[("writeable", True, True),
# Delete _warn_on_write after deprecation and simplify
# the parameterization:
("_warn_on_write", True, False),
("writeable", False, False)])
def test_readonly_flag_protocols(self, flag, flag_value, writeable):
a = np.arange(10)
setattr(a.flags, flag, flag_value)
class MyArr():
__array_struct__ = a.__array_struct__
assert memoryview(a).readonly is not writeable
assert a.__array_interface__['data'][1] is not writeable
assert np.asarray(MyArr()).flags.writeable is writeable
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash:
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes:
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
# int_ doesn't inherit from Python int, because it's not fixed-width
assert_(not isinstance(numpy_int, int))
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
# test 0d
arr_0d = np.array(0)
arr_0d.strides = ()
assert_raises(TypeError, set_strides, arr_0d, None)
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction:
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2], dtype=object)
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_bad_arguments_error(self, func):
with pytest.raises(TypeError):
func(3, dtype="bad dtype")
with pytest.raises(TypeError):
func() # missing arguments
with pytest.raises(TypeError):
func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_array_as_keyword(self, func):
# This should likely be made positional only, but do not change
# the name accidentally.
if func is np.array:
func(object=3)
else:
func(a=3)
class TestAssignment:
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence:
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr:
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank:
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
# strides and shape must be the same length
with pytest.raises(ValueError):
np.ndarray((2,), strides=())
with pytest.raises(ValueError):
np.ndarray((), strides=(2,))
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing:
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation:
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x:
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert arr.dtype == 'V8' # current default
# Same length scalars (those that go to the same void) work:
arr = np.array([b"1234", b"1234"], dtype="V")
assert arr.dtype == "V4"
# Promoting different lengths will fail (pre 1.20 this worked)
# by going via S5 and casting to V5.
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="V")
with pytest.raises(TypeError):
np.array([b"12345", b"1234"], dtype="V")
# Check the same for the casting path:
arr = np.array([b"1234", b"1234"], dtype="O").astype("V")
assert arr.dtype == "V4"
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="O").astype("V")
@pytest.mark.parametrize("idx",
[pytest.param(Ellipsis, id="arr"), pytest.param((), id="scalar")])
def test_structured_void_promotion(self, idx):
arr = np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i')[idx]],
dtype="V")
assert_array_equal(arr, np.array([(1, 1), (2, 2)], dtype="i,i"))
# The following fails to promote the two dtypes, resulting in an error
with pytest.raises(TypeError):
np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]],
dtype="V")
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
@pytest.mark.skipif(np.dtype(np.intp).itemsize != 8,
reason="malloc may not fail on 32 bit systems")
def test_malloc_fails(self):
# This test is guaranteed to fail due to a too large allocation
with assert_raises(np.core._exceptions._ArrayMemoryError):
np.empty(np.iinfo(np.intp).max, dtype=np.uint8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogeneous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail:
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map:
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
a = np.array(C()) # segfault?
assert_equal(len(a), 0)
def test_false_len_iterable(self):
# Special case where a bad __getitem__ makes us fall back on __iter__:
class C:
def __getitem__(self, x):
raise Exception
def __iter__(self):
return iter(())
def __len__(self):
return 2
a = np.empty(2)
with assert_raises(ValueError):
a[:] = C() # Segfault!
np.array(C()) == list(C())
def test_failed_len_sequence(self):
# gh-7393
class A:
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def _ragged_creation(self, seq):
# without dtype=object, the ragged object should raise
with assert_warns(np.VisibleDeprecationWarning):
a = np.array(seq)
b = np.array(seq, dtype=object)
assert_equal(a, b)
return b
def test_ragged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = self._ragged_creation([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_ragged_shape_object(self):
# The ragged dimension of a list is turned into an object array
a = self._ragged_creation([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2], [3, 3]])
assert a.shape == (3,)
assert a.dtype == object
def test_array_of_ragged_array(self):
outer = np.array([None, None])
outer[0] = outer[1] = np.array([1, 2, 3])
assert np.array(outer).shape == (2,)
assert np.array([outer]).shape == (1, 2)
outer_ragged = np.array([None, None])
outer_ragged[0] = np.array([1, 2, 3])
outer_ragged[1] = np.array([1, 2, 3, 4])
# should both of these emit deprecation warnings?
assert np.array(outer_ragged).shape == (2,)
assert np.array([outer_ragged]).shape == (1, 2,)
def test_deep_nonragged_object(self):
# None of these should raise, even though they are missing dtype=object
a = np.array([[[Decimal(1)]]])
a = np.array([1, Decimal(1)])
a = np.array([[1], [Decimal(1)]])
class TestStructured:
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(ValueError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_scalar_assignment(self):
with assert_raises(ValueError):
arr = np.arange(25).reshape(5, 5)
arr.itemset(3)
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
def test_structured_asarray_is_view(self):
# A scalar viewing an array preserves its view even when creating a
# new array. This test documents behaviour, it may not be the best
# desired behaviour.
arr = np.array([1], dtype="i,i")
scalar = arr[0]
assert not scalar.flags.owndata # view into the array
assert np.asarray(scalar).base is scalar
# But never when a dtype is passed in:
assert np.asarray(scalar, dtype=scalar.dtype).base is None
# A scalar which owns its data does not have this property.
# It is not easy to create one, one method is to use pickle:
scalar = pickle.loads(pickle.dumps(scalar))
assert scalar.flags.owndata
assert np.asarray(scalar).base is None
class TestBool:
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible:
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, str)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhs')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhs')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_dumps(self):
zs = self._zeros(10, int)
assert_equal(zs, pickle.loads(zs.dumps()))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods:
sort_kinds = ['quicksort', 'heapsort', 'stable']
def test_all_where(self):
a = np.array([[True, False, True],
[False, False, False],
[True, True, True]])
wh_full = np.array([[True, False, True],
[False, False, False],
[True, False, True]])
wh_lower = np.array([[False],
[False],
[True]])
for _ax in [0, None]:
assert_equal(a.all(axis=_ax, where=wh_lower),
np.all(a[wh_lower[:,0],:], axis=_ax))
assert_equal(np.all(a, axis=_ax, where=wh_lower),
a[wh_lower[:,0],:].all(axis=_ax))
assert_equal(a.all(where=wh_full), True)
assert_equal(np.all(a, where=wh_full), True)
assert_equal(a.all(where=False), True)
assert_equal(np.all(a, where=False), True)
def test_any_where(self):
a = np.array([[True, False, True],
[False, False, False],
[True, True, True]])
wh_full = np.array([[False, True, False],
[True, True, True],
[False, False, False]])
wh_middle = np.array([[False],
[True],
[False]])
for _ax in [0, None]:
assert_equal(a.any(axis=_ax, where=wh_middle),
np.any(a[wh_middle[:,0],:], axis=_ax))
assert_equal(np.any(a, axis=_ax, where=wh_middle),
a[wh_middle[:,0],:].any(axis=_ax))
assert_equal(a.any(where=wh_full), False)
assert_equal(np.any(a, where=wh_full), False)
assert_equal(a.any(where=False), False)
assert_equal(np.any(a, where=False), False)
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
out = np.array(0)
ret = np.choose(np.array(1), [10, 20, 30], out=out)
assert out is ret
assert_equal(out[()], 20)
# gh-6272 check overlap on out
x = np.arange(5)
y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')
assert_equal(y, np.array([0, 1, 2]))
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert out is res
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
@pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.longdouble])
def test_sort_unsigned(self, dtype):
a = np.arange(101, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype',
[np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64, np.longdouble])
def test_sort_signed(self, dtype):
a = np.arange(-50, 51, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar sort, kind=%s" % (kind)
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble])
@pytest.mark.parametrize('part', ['real', 'imag'])
def test_sort_complex(self, part, dtype):
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
cdtype = {
np.single: np.csingle,
np.double: np.cdouble,
np.longdouble: np.clongdouble,
}[dtype]
a = np.arange(-50, 51, dtype=dtype)
b = a[::-1].copy()
ai = (a * (1+1j)).astype(cdtype)
bi = (b * (1+1j)).astype(cdtype)
setattr(ai, part, 1)
setattr(bi, part, 1)
for kind in self.sort_kinds:
msg = "complex sort, %s part == 1, kind=%s" % (part, kind)
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
def test_sort_complex_byte_swapping(self):
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
@pytest.mark.parametrize('dtype', [np.bytes_, np.unicode_])
def test_sort_string(self, dtype):
# np.array will perform the encoding to bytes for us in the bytes test
a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_object(self):
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_structured(self):
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]'])
def test_sort_time(self, dtype):
# test datetime64 and timedelta64 sorts.
a = np.arange(0, 101, dtype=dtype)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_axis(self):
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
def test_sort_size_0(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_sort_bad_ordering(self):
# test generic class with bogus ordering,
# should not segfault.
class Boom:
def __lt__(self, other):
return True
a = np.array([Boom()] * 100, dtype=object)
for kind in self.sort_kinds:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser:
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', 'U5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
for dtype in [np.int32, np.uint32, np.float32]:
a = np.arange(101, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype)
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='left')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='right')
assert_equal(b, np.arange(1, 4), msg)
# check keyword arguments
a.searchsorted(v=1)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='left')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='right')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'left')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'right')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'left')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'right')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'left')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'right')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'left')
assert_equal(b, out)
b = a.searchsorted(a, 'right')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'left')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'left')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode_)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_invalid_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0,
sorter=np.array((1, (2, 3)), dtype=object))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
def test_searchsorted_with_sorter(self):
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='left', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='right', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'left', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'right', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'left', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'right', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'left', s)
assert_equal(b, out)
b = a.searchsorted(a, 'right', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'left', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'left', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'left', s)
assert_equal(b, out)
b = a.searchsorted(a, 'right', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'left'), A))
assert_(not isinstance(a.searchsorted(b, 'right'), A))
assert_(not isinstance(a.searchsorted(b, 'left', s), A))
assert_(not isinstance(a.searchsorted(b, 'right', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A:
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
out = np.array(1)
ret = a.trace(out=out)
assert ret is out
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(TypeError, lambda: a.conj())
assert_raises(TypeError, lambda: a.conjugate())
def test_conjugate_out(self):
# Minimal test for the out argument being passed on correctly
# NOTE: The ability to pass `out` is currently undocumented!
a = np.array([1-1j, 1+1j, 23+23.0j])
out = np.empty_like(a)
res = a.conjugate(out)
assert res is out
assert_array_equal(out, a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods:
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop:
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
'matmul': (np.matmul, False, float),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs
np.modf(dummy, out=a)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass:
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide:
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI:
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting:
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling:
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
if sys.version_info[:2] in ((3, 6), (3, 7)):
# For the specific case of python3.6 and 3.7, raise a clear import
# error about the pickle5 backport when trying to use protocol=5
# without the pickle5 package
with pytest.raises(ImportError):
array.__reduce_ex__(5)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
break_cycles()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
return pickle.loads(obj, encoding='latin1')
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
def test_datetime64_byteorder(self):
original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')
original_byte_reversed = original.copy(order='K')
original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')
original_byte_reversed.byteswap(inplace=True)
new = pickle.loads(pickle.dumps(original_byte_reversed))
assert_equal(original.dtype, new.dtype)
class TestFancyIndexing:
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare:
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmaxArgminCommon:
sizes = [(), (3,), (3, 2), (2, 3),
(3, 3), (2, 3, 4), (4, 3, 2),
(1, 2, 3, 4), (2, 3, 4, 1),
(3, 4, 1, 2), (4, 1, 2, 3)]
@pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
for axis in list(range(-len(size), len(size))) + [None]]
for size in sizes]))
@pytest.mark.parametrize('method', [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method):
arr = np.random.normal(size=size)
# contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(res.shape, dtype=res.dtype)
res1 = method(arr, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
# non-contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)[::-1]
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr.T, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr.T, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(new_shape[::-1], dtype=res.dtype)
outarray = outarray.T
res1 = method(arr.T, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
# one dimension lesser for non-zero sized
# array should raise an error
with pytest.raises(ValueError):
method(arr[0], axis=axis,
out=outarray, keepdims=True)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
@pytest.mark.parametrize('method', ['max', 'min'])
def test_all(self, method):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
arg_method = getattr(a, 'arg' + method)
val_method = getattr(a, method)
for i in range(a.ndim):
a_maxmin = val_method(i)
aarg_maxmin = arg_method(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(a_maxmin == aarg_maxmin.choose(
*a.transpose(i, *axes))))
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_output_shape(self, method):
# see also gh-616
a = np.ones((10, 5))
arg_method = getattr(a, method)
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones(10, dtype=np.int_)
arg_method(-1, out=out)
assert_equal(out, arg_method(-1))
@pytest.mark.parametrize('ndim', [0, 1])
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_ret_is_out(self, ndim, method):
a = np.ones((4,) + (3,)*ndim)
arg_method = getattr(a, method)
out = np.empty((3,)*ndim, dtype=np.intp)
ret = arg_method(axis=0, out=out)
assert ret is out
@pytest.mark.parametrize('np_array, method, idx, val',
[(np.zeros, 'argmax', 5942, "as"),
(np.ones, 'argmin', 6001, "0")])
def test_unicode(self, np_array, method, idx, val):
d = np_array(6031, dtype='<U9')
arg_method = getattr(d, method)
d[idx] = val
assert_equal(arg_method(), idx)
@pytest.mark.parametrize('arr_method, np_method',
[('argmax', np.argmax),
('argmin', np.argmin)])
def test_np_vs_ndarray(self, arr_method, np_method):
# make sure both ndarray.argmax/argmin and
# numpy.argmax/argmin support out/axis args
a = np.random.normal(size=(2, 3))
arg_method = getattr(a, arr_method)
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(arg_method(1, out1), np_method(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(arg_method(out=out1, axis=0),
np_method(a, out=out2, axis=0))
assert_equal(out1, out2)
@pytest.mark.leaks_references(reason="replaces None with NULL.")
@pytest.mark.parametrize('method, vals',
[('argmax', (10, 30)),
('argmin', (30, 10))])
def test_object_with_NULLs(self, method, vals):
# See gh-6032
a = np.empty(4, dtype='O')
arg_method = getattr(a, method)
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(arg_method(), 0)
a[3] = vals[0]
assert_equal(arg_method(), 3)
a[1] = vals[1]
assert_equal(arg_method(), 1)
class TestArgmax:
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
@pytest.mark.parametrize('data', nan_arr)
def test_combinations(self, data):
arr, pos = data
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr)
def test_maximum_signed_integers(self):
a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**15 - 1, -2**15], dtype=np.int16)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**31 - 1, -2**31], dtype=np.int32)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**63 - 1, -2**63], dtype=np.int64)
assert_equal(np.argmax(a), 1)
class TestArgmin:
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1, 2**7 - 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1, 2**15 - 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1, 2**31 - 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1, 2**63 - 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
class TestMinMax:
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# Do not ignore NaT
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[3] = 'NaT'
assert_equal(np.amin(a), a[3])
assert_equal(np.amax(a), a[3])
class TestNewaxis:
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip:
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
# The tests that call us pass clip_min and clip_max that
# might not fit in the destination dtype. They were written
# assuming the previous unsafe casting, which now must be
# passed explicitly to avoid a warning.
x.clip(clip_min, clip_max, x, casting='unsafe')
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress:
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask:
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], np.array(val, T))
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
# Also test string of a length which uses an untypical length
dt = np.dtype("S3")
self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_overlaps(self):
# gh-6272 check overlap
x = np.array([True, False, True, False])
np.putmask(x[1:4], [True, True, True], x[:3])
assert_equal(x, np.array([True, True, False, True]))
x = np.array([True, False, True, False])
np.putmask(x[1:4], x[:3], [True, False, True])
assert_equal(x, np.array([True, True, True, True]))
def test_writeable(self):
a = np.arange(5)
a.flags.writeable = False
with pytest.raises(ValueError):
np.putmask(a, a >= 2, 3)
class TestTake:
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
# Also test string of a length which uses an untypical length
self.tst_basic(x.astype("S3"))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
def test_out_overlap(self):
# gh-6272 check overlap on out
x = np.arange(5)
y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
assert_equal(y, np.array([1, 2, 3]))
@pytest.mark.parametrize('shape', [(1, 2), (1,), ()])
def test_ret_is_out(self, shape):
# 0d arrays should not be an exception to this rule
x = np.arange(5)
inds = np.zeros(shape, dtype=np.intp)
out = np.zeros(shape, dtype=x.dtype)
ret = np.take(x, inds, out=out)
assert ret is out
class TestLexsort:
@pytest.mark.parametrize('dtype',[
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64
])
def test_basic(self, dtype):
a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
assert_array_equal(a[idx], np.sort(a))
def test_mixed(self):
a = np.array([1, 2, 1, 3, 1, 5])
b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO:
"""Test tofile, fromfile, tobytes, and fromstring"""
@pytest.fixture()
def x(self):
shape = (2, 4, 3)
rand = np.random.random
x = rand(shape) + rand(shape).astype(complex) * 1j
x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan]
return x
@pytest.fixture(params=["string", "path_obj"])
def tmp_filename(self, tmp_path, request):
# This fixture covers two cases:
# one where the filename is a string and
# another where it is a pathlib object
filename = tmp_path / "file"
if request.param == "string":
filename = str(filename)
yield filename
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(OSError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(OSError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_fromstring_count0(self):
d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0)
assert d.shape == (0,)
def test_empty_files_text(self, tmp_filename):
with open(tmp_filename, 'w') as f:
pass
y = np.fromfile(tmp_filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_binary(self, tmp_filename):
with open(tmp_filename, 'wb') as f:
pass
y = np.fromfile(tmp_filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self, x, tmp_filename):
with open(tmp_filename, 'wb') as f:
x.tofile(f)
# NB. doesn't work with flush+seek, due to use of C stdio
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip(self, x, tmp_filename):
x.tofile(tmp_filename)
y = np.fromfile(tmp_filename, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip_dump_pathlib(self, x, tmp_filename):
p = pathlib.Path(tmp_filename)
x.dump(p)
y = np.load(p, allow_pickle=True)
assert_array_equal(y, x)
def test_roundtrip_binary_str(self, x):
s = x.tobytes()
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flat)
s = x.tobytes('F')
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flatten('F'))
def test_roundtrip_str(self, x):
x = x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self, x):
x = x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self, x, tmp_filename):
# gh-6246
x.tofile(tmp_filename)
def fail(*args, **kwargs):
raise OSError('Can not tell or seek')
with io.open(tmp_filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(OSError, np.fromfile, f, dtype=x.dtype)
def test_io_open_unbuffered_fromfile(self, x, tmp_filename):
# gh-6632
x.tofile(tmp_filename)
with io.open(tmp_filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_largish_file(self, tmp_filename):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(tmp_filename)
assert_equal(os.path.getsize(tmp_filename), d.nbytes)
assert_array_equal(d, np.fromfile(tmp_filename))
# check offset
with open(tmp_filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
# check append mode (gh-8329)
open(tmp_filename, "w").close() # delete file contents
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(tmp_filename))
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self, x, tmp_filename):
# gh-6632
x.tofile(tmp_filename)
with io.open(tmp_filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_file_position_after_fromfile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
with open(tmp_filename, mode) as f:
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
with open(tmp_filename, 'r+b') as f:
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self, tmp_filename):
# gh-12300
with open(tmp_filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(tmp_filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, tmp_filename, dtype=object)
def test_fromfile_offset(self, x, tmp_filename):
with open(tmp_filename, 'wb') as f:
x.tofile(f)
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype, offset=0)
assert_array_equal(y, x.flat)
with open(tmp_filename, 'rb') as f:
count_items = len(x.flat) // 8
offset_items = len(x.flat) // 4
offset_bytes = x.dtype.itemsize * offset_items
y = np.fromfile(
f, dtype=x.dtype, count=count_items, offset=offset_bytes
)
assert_array_equal(
y, x.flat[offset_items:offset_items+count_items]
)
# subsequent seeks should stack
offset_bytes = x.dtype.itemsize
z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes)
assert_array_equal(z, x.flat[offset_items+count_items+1:])
with open(tmp_filename, 'wb') as f:
x.tofile(f, sep=",")
with open(tmp_filename, 'rb') as f:
assert_raises_regex(
TypeError,
"'offset' argument only permitted for binary files",
np.fromfile, tmp_filename, dtype=x.dtype,
sep=",", offset=1)
@pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t")
def test_fromfile_bad_dup(self, x, tmp_filename):
def dup_str(fd):
return 'abc'
def dup_bigint(fd):
return 2**68
old_dup = os.dup
try:
with open(tmp_filename, 'wb') as f:
x.tofile(f)
for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)):
os.dup = dup
assert_raises(exc, np.fromfile, f)
finally:
os.dup = old_dup
def _check_from(self, s, value, filename, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
with open(filename, 'wb') as f:
f.write(s)
y = np.fromfile(filename, **kw)
assert_array_equal(y, value)
@pytest.fixture(params=["period", "comma"])
def decimal_sep_localization(self, request):
"""
Including this fixture in a test will automatically
execute it with both types of decimal separator.
So::
def test_decimal(decimal_sep_localization):
pass
is equivalent to the following two tests::
def test_decimal_period_separator():
pass
def test_decimal_comma_separator():
with CommaDecimalPointLocale():
pass
"""
if request.param == "period":
yield
elif request.param == "comma":
with CommaDecimalPointLocale():
yield
else:
assert False, request.param
def test_nan(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
tmp_filename,
sep=' ')
def test_inf(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
tmp_filename,
sep=' ')
def test_numbers(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133],
tmp_filename,
sep=' ')
def test_binary(self, tmp_filename):
self._check_from(
b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
tmp_filename,
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self, tmp_filename):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',')
def test_counted_string(self, tmp_filename, decimal_sep_localization):
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',')
def test_string_with_ws(self, tmp_filename):
self._check_from(
b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ')
def test_counted_string_with_ws(self, tmp_filename):
self._check_from(
b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int,
sep=' ')
def test_ascii(self, tmp_filename, decimal_sep_localization):
self._check_from(
b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',')
def test_malformed(self, tmp_filename, decimal_sep_localization):
with assert_warns(DeprecationWarning):
self._check_from(
b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ')
def test_long_sep(self, tmp_filename):
self._check_from(
b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_')
def test_dtype(self, tmp_filename):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_)
def test_dtype_bool(self, tmp_filename):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
with open(tmp_filename, 'wb') as f:
f.write(s)
y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',')
with open(tmp_filename, 'r') as f:
s = f.read()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',', format='%.2f')
with open(tmp_filename, 'r') as f:
s = f.read()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_tofile_cleanup(self, tmp_filename):
x = np.zeros((10), dtype=object)
with open(tmp_filename, 'wb') as f:
assert_raises(OSError, lambda: x.tofile(f, sep=''))
# Dup-ed file handle should be closed or remove will fail on Windows OS
os.remove(tmp_filename)
# Also make sure that we close the Python handle
assert_raises(OSError, lambda: x.tofile(tmp_filename))
os.remove(tmp_filename)
def test_fromfile_subarray_binary(self, tmp_filename):
# Test subarray dtypes which are absorbed into the shape
x = np.arange(24, dtype="i4").reshape(2, 3, 4)
x.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(3,4)i4")
assert_array_equal(x, res)
x_str = x.tobytes()
with assert_warns(DeprecationWarning):
# binary fromstring is deprecated
res = np.fromstring(x_str, dtype="(3,4)i4")
assert_array_equal(x, res)
def test_parsing_subarray_unsupported(self, tmp_filename):
# We currently do not support parsing subarray dtypes
data = "12,42,13," * 50
with pytest.raises(ValueError):
expected = np.fromstring(data, dtype="(3,)i", sep=",")
with open(tmp_filename, "w") as f:
f.write(data)
with pytest.raises(ValueError):
np.fromfile(tmp_filename, dtype="(3,)i", sep=",")
def test_read_shorter_than_count_subarray(self, tmp_filename):
# Test that requesting more values does not cause any problems
# in conjunction with subarray dimensions being absorbed into the
# array dimension.
expected = np.arange(511 * 10, dtype="i").reshape(-1, 10)
binary = expected.tobytes()
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
np.fromstring(binary, dtype="(10,)i", count=10000)
expected.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000)
assert_array_equal(res, expected)
class TestFromBuffer:
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
class TestFlat:
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
# for 1.14 all are set to non-writeable on the way to replacing the
# UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
with assert_warns(DeprecationWarning):
assert_(c.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(d.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(e.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
# UPDATEIFCOPY is removed.
assert_(f.flags.updateifcopy is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# includes regression test for reference count error gh-13165
inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]
indtype = np.dtype(np.intp)
rc_indtype = sys.getrefcount(indtype)
for ind in inds:
rc_ind = sys.getrefcount(ind)
for _ in range(100):
try:
self.a.flat[ind]
except IndexError:
pass
assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
def test_index_getset(self):
it = np.arange(10).reshape(2, 1, 5).flat
with pytest.raises(AttributeError):
it.index = 10
for _ in it:
pass
# Check the value of `.index` is updated correctly (see also gh-19153)
# If the type was incorrect, this would show up on big-endian machines
assert it.index == it.base.size
class TestResize:
@_no_tracing
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
@_no_tracing
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
@_no_tracing
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
@_no_tracing
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
@_no_tracing
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
def test_empty_view(self):
# check that sizes containing a zero don't trigger a reallocate for
# already empty arrays
x = np.zeros((10, 0), int)
x_view = x[...]
x_view.resize((0, 10))
x_view.resize((0, 100))
def test_check_weakref(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xref = weakref.ref(x)
assert_raises(ValueError, x.resize, (5, 1))
del xref # avoid pyflakes unused variable warning.
class TestRecord:
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_dtype_init():
np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(TypeError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
assert_equal(x['a'][0], 0)
assert_equal(x['b'][0], 1)
def test_unicode_order(self):
# Test that we can sort with order as a unicode field name in both Python 2 and
# 3:
name = u'b'
x = np.array([1, 3, 2], dtype=[(name, int)])
x.sort(order=name)
assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
b = a.copy()
fn1 = str('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = str('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = str('f3')
sfn1 = str('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = str('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
def test_multifield_indexing_view(self):
a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
v = a[['a', 'c']]
assert_(v.base is a)
assert_(v.dtype == np.dtype({'names': ['a', 'c'],
'formats': ['i4', 'u4'],
'offsets': [0, 8]}))
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
class TestView:
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats:
funcs = [_mean, _var, _std]
def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_mean_axis_error(self):
# Ensure that AxisError is raised instead of IndexError when axis is
# out of bounds, see gh-15817.
with assert_raises(np.core._exceptions.AxisError):
np.arange(10).mean(axis=2)
def test_mean_where(self):
a = np.arange(16).reshape((4, 4))
wh_full = np.array([[False, True, False, True],
[True, False, True, False],
[True, True, False, False],
[False, False, True, True]])
wh_partial = np.array([[False],
[True],
[True],
[False]])
_cases = [(1, True, [1.5, 5.5, 9.5, 13.5]),
(0, wh_full, [6., 5., 10., 9.]),
(1, wh_full, [2., 5., 8.5, 14.5]),
(0, wh_partial, [6., 7., 8., 9.])]
for _ax, _wh, _res in _cases:
assert_allclose(a.mean(axis=_ax, where=_wh),
np.array(_res))
assert_allclose(np.mean(a, axis=_ax, where=_wh),
np.array(_res))
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[1.5, 5.5], [9.5, 13.5]]
assert_allclose(a3d.mean(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.mean(a3d, axis=2, where=_wh_partial),
np.array(_res))
with pytest.warns(RuntimeWarning) as w:
assert_allclose(a.mean(axis=1, where=wh_partial),
np.array([np.nan, 5.5, 9.5, np.nan]))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.mean(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.mean(a, where=False), np.nan)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
@pytest.mark.parametrize(('complex_dtype', 'ndec'), (
('complex64', 6),
('complex128', 7),
('clongdouble', 7),
))
def test_var_complex_values(self, complex_dtype, ndec):
# Test fast-paths for every builtin complex type
for axis in [0, 1, None]:
mat = self.cmat.copy().astype(complex_dtype)
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt, decimal=ndec)
def test_var_dimensions(self):
# _var paths for complex number introduce additions on views that
# increase dimensions. Ensure this generalizes to higher dims
mat = np.stack([self.cmat]*3)
for axis in [0, 1, 2, -1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_var_complex_byteorder(self):
# Test that var fast-path does not cause failures for complex arrays
# with non-native byteorder
cmat = self.cmat.copy().astype('complex128')
cmat_swapped = cmat.astype(cmat.dtype.newbyteorder())
assert_almost_equal(cmat.var(), cmat_swapped.var())
def test_var_axis_error(self):
# Ensure that AxisError is raised instead of IndexError when axis is
# out of bounds, see gh-15817.
with assert_raises(np.core._exceptions.AxisError):
np.arange(10).var(axis=2)
def test_var_where(self):
a = np.arange(25).reshape((5, 5))
wh_full = np.array([[False, True, False, True, True],
[True, False, True, True, False],
[True, True, False, False, True],
[False, True, True, False, True],
[True, False, True, True, False]])
wh_partial = np.array([[False],
[True],
[True],
[False],
[True]])
_cases = [(0, True, [50., 50., 50., 50., 50.]),
(1, True, [2., 2., 2., 2., 2.])]
for _ax, _wh, _res in _cases:
assert_allclose(a.var(axis=_ax, where=_wh),
np.array(_res))
assert_allclose(np.var(a, axis=_ax, where=_wh),
np.array(_res))
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[0.25, 0.25], [0.25, 0.25]]
assert_allclose(a3d.var(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.var(a3d, axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.var(a, axis=1, where=wh_full),
np.var(a[wh_full].reshape((5, 3)), axis=1))
assert_allclose(np.var(a, axis=0, where=wh_partial),
np.var(a[wh_partial[:,0]], axis=0))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.var(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.var(a, where=False), np.nan)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_where(self):
a = np.arange(25).reshape((5,5))[::-1]
whf = np.array([[False, True, False, True, True],
[True, False, True, False, True],
[True, True, False, True, False],
[True, False, True, True, False],
[False, True, False, True, True]])
whp = np.array([[False],
[False],
[True],
[True],
[False]])
_cases = [
(0, True, 7.07106781*np.ones((5))),
(1, True, 1.41421356*np.ones((5))),
(0, whf,
np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])),
(0, whp, 2.5*np.ones((5)))
]
for _ax, _wh, _res in _cases:
assert_allclose(a.std(axis=_ax, where=_wh), _res)
assert_allclose(np.std(a, axis=_ax, where=_wh), _res)
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[0.5, 0.5], [0.5, 0.5]]
assert_allclose(a3d.std(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.std(a3d, axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(a.std(axis=1, where=whf),
np.std(a[whf].reshape((5,3)), axis=1))
assert_allclose(np.std(a, axis=1, where=whf),
(a[whf].reshape((5,3))).std(axis=1))
assert_allclose(a.std(axis=0, where=whp),
np.std(a[whp[:,0]], axis=0))
assert_allclose(np.std(a, axis=0, where=whp),
(a[whp[:,0]]).std(axis=0))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.std(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.std(a, where=False), np.nan)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot:
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot:
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec:
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon:
"""Common tests for '@' operator and numpy.matmul.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDGO"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
if dt != "O":
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
tgt = np.array([6, 8])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt)
res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?').reshape(1, -1)
res = self.matmul(vec[:, 0], vec)
assert_equal(res, True)
def test_vector_vector_values(self):
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
tgt1 = np.array([11])
tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt1)
# no broadcast, we must make v1 into a 2d ndarray
res = self.matmul(v2, v1.reshape(1, -1))
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
msg = "Cannot cast ufunc .* output"
out = np.zeros((5, 2), dtype=np.int32)
assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
# test out with type upcast to complex
out = np.zeros((5, 2), dtype=np.complex128)
c = self.matmul(a, b, out=out)
assert_(c is out)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, '')
c = c.astype(tgt.dtype)
assert_array_equal(c, tgt)
def test_empty_out(self):
# Check that the output cannot be broadcast, so that it cannot be
# size zero when the outer dimensions (iterator size) has size zero.
arr = np.ones((0, 1, 1))
out = np.ones((1, 1, 1))
assert self.matmul(arr, arr).shape == (0, 1, 1)
with pytest.raises(ValueError, match=r"non-broadcastable"):
self.matmul(arr, arr, out=out)
def test_out_contiguous(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
v = np.array([1, 3], dtype=float)
tgt = np.dot(a, b)
tgt_mv = np.dot(a, v)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.base is out
assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
c = self.matmul(v, a.T, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
# test out contiguous in only last dim
out = np.ones((10, 2), dtype=float)
c = self.matmul(a, b, out=out[::2, :])
assert_array_equal(c, tgt)
# test transposes of out, args
out = np.ones((5, 2), dtype=float)
c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt)
m1 = np.arange(15.).reshape(5, 3)
m2 = np.arange(21.).reshape(3, 7)
m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
vc = np.arange(10.)
vr = np.arange(6.)
m0 = np.zeros((3, 0))
@pytest.mark.parametrize('args', (
# matrix-matrix
(m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
# matrix-matrix-transpose, contiguous and non
(m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
(m3, m3.T), (m3.T, m3),
# matrix-matrix non-contiguous
(m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
# vector-matrix, matrix-vector, contiguous
(m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
# vector-matrix, matrix-vector, vector non-contiguous
(m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
# vector-matrix, matrix-vector, matrix non-contiguous
(m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
# vector-matrix, matrix-vector, both non-contiguous
(m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
# size == 0
(m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
))
def test_dot_equivalent(self, args):
r1 = np.matmul(*args)
r2 = np.dot(*args)
assert_equal(r1, r2)
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
def test_matmul_object(self):
import fractions
f = np.vectorize(fractions.Fraction)
def random_ints():
return np.random.randint(1, 1000, size=(10, 3, 3))
M1 = f(random_ints(), random_ints())
M2 = f(random_ints(), random_ints())
M3 = self.matmul(M1, M2)
[N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
assert_allclose(N3, self.matmul(N1, N2))
def test_matmul_object_type_scalar(self):
from fractions import Fraction as F
v = np.array([F(2,3), F(5,7)])
res = self.matmul(v, v)
assert_(type(res) is F)
def test_matmul_empty(self):
a = np.empty((3, 0), dtype=object)
b = np.empty((0, 3), dtype=object)
c = np.zeros((3, 3))
assert_array_equal(np.matmul(a, b), c)
def test_matmul_exception_multiply(self):
# test that matmul fails if `__mul__` is missing
class add_not_multiply():
def __add__(self, other):
return self
a = np.full((3,3), add_not_multiply())
with assert_raises(TypeError):
b = np.matmul(a, a)
def test_matmul_exception_add(self):
# test that matmul fails if `__add__` is missing
class multiply_not_add():
def __mul__(self, other):
return self
a = np.full((3,3), multiply_not_add())
with assert_raises(TypeError):
b = np.matmul(a, a)
def test_matmul_bool(self):
# gh-14439
a = np.array([[1, 0],[1, 1]], dtype=bool)
assert np.max(a.view(np.uint8)) == 1
b = np.matmul(a, a)
# matmul with boolean output should always be 0, 1
assert np.max(b.view(np.uint8)) == 1
rg = np.random.default_rng(np.random.PCG64(43))
d = rg.integers(2, size=4*5, dtype=np.int8)
d = d.reshape(4, 5) > 0
out1 = np.matmul(d, d.reshape(5, 4))
out2 = np.dot(d, d.reshape(5, 4))
assert_equal(out1, out2)
c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
assert not np.any(c)
class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A:
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_raises(self):
assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc'))
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
assert_raises(TypeError, exec, "a @= b", globals(), locals())
def test_matmul_axes():
a = np.arange(3*4*5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
assert c.shape == (3, 4, 4)
d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
assert d.shape == (4, 4, 3)
e = np.swapaxes(d, 0, 2)
assert_array_equal(e, c)
f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
assert f.shape == (4, 5)
class TestInner:
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestAlen:
def test_basic(self):
with pytest.warns(DeprecationWarning):
m = np.array([1, 2, 3])
assert_equal(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
assert_equal(np.alen(m), 2)
m = [1, 2, 3]
assert_equal(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
assert_equal(np.alen(m), 2)
def test_singleton(self):
with pytest.warns(DeprecationWarning):
assert_equal(np.alen(5), 1)
class TestChoose:
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
@pytest.mark.parametrize("ops",
[(1000, np.array([1], dtype=np.uint8)),
(-1, np.array([1], dtype=np.uint8)),
(1., np.float32(3)),
(1., np.array([3], dtype=np.float32))],)
def test_output_dtype(self, ops):
expected_dt = np.result_type(*ops)
assert(np.choose([0], ops).dtype == expected_dt)
class TestRepeat:
def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
class TestNeighborhoodIter:
# Simple, 2d tests
def test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test with start in the middle
r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2)
assert_array_equal(l, r)
def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Simple, 1d tests
def test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test mirror modes
def test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[1], NEIGH_MODE['mirror'])
assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
# Circular mode
def test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter:
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings:
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType:
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype:
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align*(1 + (n-1)//align)
base = dict(formats=['i'], names=['f0'])
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align*(1 + (n-1)//align))
self._check('(3)T{ix}', (dict(
names=['f0'],
formats=['i'],
offsets=[0],
itemsize=aligned(size + 1)
), (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol:
""" Test PEP3118 buffers """
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.sctypeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_invalid_buffer_format(self):
# datetime64 cannot be used fully in a buffer yet
# Should be fixed in the next Numpy major release
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(3, dt)
assert_raises((ValueError, BufferError), memoryview, a)
assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, ())
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, ())
assert_equal(y.ndim, 0)
assert_equal(y.strides, ())
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError,
_multiarray_tests.get_buffer_info,
np.arange(5)[::2], ('SIMPLE',))
@pytest.mark.parametrize(["obj", "error"], [
pytest.param(np.array([1, 2], dtype=rational), ValueError, id="array"),
pytest.param(rational(1, 2), TypeError, id="scalar")])
def test_export_and_pickle_user_dtype(self, obj, error):
# User dtypes should export successfully when FORMAT was not requested.
with pytest.raises(error):
_multiarray_tests.get_buffer_info(obj, ("STRIDED_RO", "FORMAT"))
_multiarray_tests.get_buffer_info(obj, ("STRIDED_RO",))
# This is currently also necessary to implement pickling:
pickle_obj = pickle.dumps(obj)
res = pickle.loads(pickle_obj)
assert_array_equal(res, obj)
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
@pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')):
# Note: c defined as parameter so that it is persistent and leak
# checks will notice gh-16934 (buffer info cache leak).
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
@pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
@pytest.mark.skipif(not np.ones((10, 1), order="C").flags.f_contiguous,
reason="Test is unnecessary (but fails) without relaxed strides.")
def test_relaxed_strides_buffer_info_leak(self, arr=np.ones((1, 10))):
"""Test that alternating export of C- and F-order buffers from
an array which is both C- and F-order when relaxed strides is
active works.
This test defines array in the signature to ensure leaking more
references every time the test is run (catching the leak with
pytest-leaks).
"""
for i in range(10):
_, s = _multiarray_tests.get_buffer_info(arr, ['F_CONTIGUOUS'])
assert s == (8, 8)
_, s = _multiarray_tests.get_buffer_info(arr, ['C_CONTIGUOUS'])
assert s == (80, 8)
def test_out_of_order_fields(self):
dt = np.dtype(dict(
formats=['<i4', '<i4'],
names=['one', 'two'],
offsets=[4, 0],
itemsize=8
))
# overlapping fields cannot be represented by PEP3118
arr = np.empty(1, dt)
with assert_raises(ValueError):
memoryview(arr)
def test_max_dims(self):
a = np.ones((1,) * 32)
self._check_roundtrip(a)
@pytest.mark.slow
def test_error_too_many_dims(self):
def make_ctype(shape, scalar_type):
t = scalar_type
for dim in shape[::-1]:
t = dim * t
return t
# construct a memoryview with 33 dimensions
c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
m = memoryview(c_u8_33d())
assert_equal(m.ndim, 33)
assert_raises_regex(
RuntimeError, "ndim",
np.array, m)
# The above seems to create some deep cycles, clean them up for
# easier reference count debugging:
del c_u8_33d, m
for i in range(33):
if gc.collect() == 0:
break
def test_error_pointer_type(self):
# gh-6741
m = memoryview(ctypes.pointer(ctypes.c_uint8()))
assert_('&' in m.format)
assert_raises_regex(
ValueError, "format string",
np.array, m)
def test_error_message_unsupported(self):
# wchar has no corresponding numpy type - if this changes in future, we
# need a better way to construct an invalid memoryview format.
t = ctypes.c_wchar * 4
with assert_raises(ValueError) as cm:
np.array(t())
exc = cm.exception
with assert_raises_regex(
NotImplementedError,
r"Unrepresentable .* 'u' \(UCS-2 strings\)"
):
raise exc.__cause__
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
value = c_integer(42)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
np.asarray(value)
def test_ctypes_struct_via_memoryview(self):
# gh-10528
class foo(ctypes.Structure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
f = foo(a=1, b=2)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
arr = np.asarray(f)
assert_equal(arr['a'], 1)
assert_equal(arr['b'], 2)
f.a = 3
assert_equal(arr['a'], 3)
@pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]])
def test_error_if_stored_buffer_info_is_corrupted(self, obj):
"""
If a user extends a NumPy array before 1.20 and then runs it
on NumPy 1.20+. A C-subclassed array might in theory modify
the new buffer-info field. This checks that an error is raised
if this happens (for buffer export), an error is written on delete.
This is a sanity check to help users transition to safe code, it
may be deleted at any point.
"""
# corrupt buffer info:
_multiarray_tests.corrupt_or_fix_bufferinfo(obj)
name = type(obj)
with pytest.raises(RuntimeError,
match=f".*{name} appears to be C subclassed"):
memoryview(obj)
# Fix buffer info again before we delete (or we lose the memory)
_multiarray_tests.corrupt_or_fix_bufferinfo(obj)
class TestArrayAttributeDeletion:
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
class TestArrayInterface():
class Foo:
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
@pytest.mark.parametrize('val, iface, expected', [
(f, {}, 0.5),
([f], {}, [0.5]),
([f, f], {}, [0.5, 0.5]),
(f, {'shape': ()}, 0.5),
(f, {'shape': None}, TypeError),
(f, {'shape': (1, 1)}, [[0.5]]),
(f, {'shape': (2,)}, ValueError),
(f, {'strides': ()}, 0.5),
(f, {'strides': (2,)}, ValueError),
(f, {'strides': 16}, TypeError),
])
def test_scalar_interface(self, val, iface, expected):
# Test scalar coercion within the array interface
self.f.iface = {'typestr': 'f8'}
self.f.iface.update(iface)
if HAS_REFCOUNT:
pre_cnt = sys.getrefcount(np.dtype('f8'))
if isinstance(expected, type):
assert_raises(expected, np.array, val)
else:
result = np.array(val)
assert_equal(np.array(val), expected)
assert result.dtype == 'f8'
del result
if HAS_REFCOUNT:
post_cnt = sys.getrefcount(np.dtype('f8'))
assert_equal(pre_cnt, post_cnt)
def test_interface_no_shape():
class ArrayLike:
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_array_interface_empty_shape():
# See gh-7994
arr = np.array([1, 2, 3])
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
class DummyArray1:
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
# the interface data to bytes would invoke the bug this tests for, that
# __array_interface__ with shape=() is not allowed if the data is an object
# exposing the buffer interface
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
class DummyArray2:
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
arr2 = np.asarray(DummyArray2())
arr3 = arr[:1].reshape(())
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
def test_array_interface_offset():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['data'] = memoryview(arr)
interface['shape'] = (2,)
interface['offset'] = 4
class DummyArray:
__array_interface__ = interface
arr1 = np.asarray(DummyArray())
assert_equal(arr1, arr[1:])
def test_array_interface_unicode_typestr():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['typestr'] = '\N{check mark}'
class DummyArray:
__array_interface__ = interface
# should not be UnicodeEncodeError
with pytest.raises(TypeError):
np.asarray(DummyArray())
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook:
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
_multiarray_tests.test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
break_cycles()
_multiarray_tests.test_pydatamem_seteventhook_end()
class TestMapIter:
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
_multiarray_tests.test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
_multiarray_tests.test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray:
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = _multiarray_tests.test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion:
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
class NotConvertible:
def __bool__(self):
raise NotImplementedError
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
if IS_PYSTON:
pytest.skip("Pyston disables recursion checking")
self_containing = np.array([None])
self_containing[0] = self_containing
try:
Error = RecursionError
except NameError:
Error = RuntimeError # python < 3.5
assert_raises(Error, bool, self_containing) # previously stack overflow
self_containing[0] = None # resolve circular reference
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
assert_equal(int_func(np.array(0)), 0)
assert_equal(int_func(np.array([1])), 1)
assert_equal(int_func(np.array([[42]])), 42)
assert_raises(TypeError, int_func, np.array([1, 2]))
# gh-9972
assert_equal(4, int_func(np.array('4')))
assert_equal(5, int_func(np.bytes_(b'5')))
assert_equal(6, int_func(np.unicode_(u'6')))
class HasTrunc:
def __trunc__(self):
return 3
assert_equal(3, int_func(np.array(HasTrunc())))
assert_equal(3, int_func(np.array([HasTrunc()])))
class NotConvertible:
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError,
int_func, np.array(NotConvertible()))
assert_raises(NotImplementedError,
int_func, np.array([NotConvertible()]))
class TestWhere:
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
def test_empty_result(self):
# pass empty where result through an assignment which reads the data of
# empty arrays, error detectable with valgrind, see gh-8922
x = np.zeros((1, 1))
ibad = np.vstack(np.where(x == 99.))
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
def test_largedim(self):
# invalid read regression gh-9304
shape = [10, 2, 3, 4, 5, 6]
np.random.seed(2)
array = np.random.rand(*shape)
for i in range(10):
benchmark = array.nonzero()
result = array.nonzero()
assert_array_equal(benchmark, result)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf:
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
@_no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing:
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
assert_(not isinstance(x, collections.abc.Hashable))
class TestArrayPriority:
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other:
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero:
def test_empty_bstring_array_is_falsey(self):
assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
assert_(a)
class TestUnicodeEncoding:
"""
Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping
issues, etc
"""
def test_round_trip(self):
""" Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """
# gh-15363
arr = np.zeros(shape=(), dtype="U1")
for i in range(1, sys.maxunicode + 1):
expected = chr(i)
arr[()] = expected
assert arr[()] == expected
assert arr.item() == expected
def test_assign_scalar(self):
# gh-3258
l = np.array(['aa', 'bb'])
l[:] = np.unicode_('cc')
assert_equal(l, ['cc', 'cc'])
def test_fill_scalar(self):
# gh-7227
l = np.array(['aa', 'bb'])
l.fill(np.unicode_('cc'))
assert_equal(l, ['cc', 'cc'])
class TestUnicodeArrayNonzero:
def test_empty_ustring_array_is_falsey(self):
assert_(not np.array([''], dtype=np.unicode_))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0 \0'
assert_(a)
class TestFormat:
def test_0d(self):
a = np.array(np.pi)
assert_equal('{:0.3g}'.format(a), '3.14')
assert_equal('{:0.3g}'.format(a[()]), '3.14')
def test_1d_no_format(self):
a = np.array([np.pi])
assert_equal('{}'.format(a), str(a))
def test_1d_format(self):
# until gh-5543, ensure that the behaviour matches what it used to be
a = np.array([np.pi])
assert_raises(TypeError, '{:30}'.format, a)
from numpy.testing import IS_PYPY
class TestCTypes:
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_equal(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
from numpy.core import _internal
_internal.ctypes = None
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_(isinstance(test_arr.ctypes._ctypes,
_internal._missing_ctypes))
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
def _make_readonly(x):
x.flags.writeable = False
return x
@pytest.mark.parametrize('arr', [
np.array([1, 2, 3]),
np.array([['one', 'two'], ['three', 'four']]),
np.array((1, 2), dtype='i4,i4'),
np.zeros((2,), dtype=
np.dtype(dict(
formats=['<i4', '<i4'],
names=['a', 'b'],
offsets=[0, 2],
itemsize=6
))
),
np.array([None], dtype=object),
np.array([]),
np.empty((0, 0)),
_make_readonly(np.array([1, 2, 3])),
], ids=[
'1d',
'2d',
'structured',
'overlapping',
'object',
'empty',
'empty-2d',
'readonly'
])
def test_ctypes_data_as_holds_reference(self, arr):
# gh-9647
# create a copy to ensure that pytest does not mess with the refcounts
arr = arr.copy()
arr_ref = weakref.ref(arr)
ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)
# `ctypes_ptr` should hold onto `arr`
del arr
break_cycles()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
if IS_PYPY:
# Pypy does not recycle arr objects immediately. Trigger gc to
# release arr. Cpython uses refcounts. An explicit call to gc
# should not be needed here.
break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
def test_ctypes_as_parameter_holds_reference(self):
arr = np.array([None]).copy()
arr_ref = weakref.ref(arr)
ctypes_ptr = arr.ctypes._as_parameter_
# `ctypes_ptr` should hold onto `arr`
del arr
break_cycles()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
if IS_PYPY:
break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
class TestWritebackIfCopy:
# all these tests use the WRITEBACKIFCOPY mechanism
def test_argmax_with_out(self):
mat = np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmax(mat, 0, out=out)
assert_equal(res, range(5))
def test_argmin_with_out(self):
mat = -np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_insert
np.place(a, a>2, [44, 55])
assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
# hit one of the failing paths
assert_raises(ValueError, np.place, a, a>20, [])
def test_put_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
np.put(a, [0, 2], [44, 55])
assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
def test_putmask_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_putmask
np.putmask(a, a>2, a**2)
assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
def test_take_mode_raise(self):
a = np.arange(6, dtype='int')
out = np.empty(2, dtype='int')
np.take(a, [0, 2], out=out, mode='raise')
assert_equal(out, np.array([0, 2]))
def test_choose_mod_raise(self):
a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
out = np.empty((3,3), dtype='int')
choices = [-10, 10]
np.choose(a, choices, out=out, mode='raise')
assert_equal(out, np.array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]]))
def test_flatiter__array__(self):
a = np.arange(9).reshape(3,3)
b = a.T.flat
c = b.__array__()
# triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
del c
def test_dot_out(self):
# if HAVE_CBLAS, will use WRITEBACKIFCOPY
a = np.arange(9, dtype=float).reshape(3,3)
b = np.dot(a, a, out=a)
assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
def test_view_assign(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
arr = np.arange(9).reshape(3, 3).T
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_resolve(arr_wb)
# arr changes after resolve, even though we assigned to arr_wb
assert_equal(arr, -100)
# after resolve, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, -100)
@pytest.mark.leaks_references(
reason="increments self in dealloc; ignore since deprecated path.")
def test_dealloc_warning(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
arr = np.arange(9).reshape(3, 3)
v = arr.T
_multiarray_tests.npy_abuse_writebackifcopy(v)
assert len(sup.log) == 1
def test_view_discard_refcount(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
arr = np.arange(9).reshape(3, 3).T
orig = arr.copy()
if HAS_REFCOUNT:
arr_cnt = sys.getrefcount(arr)
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_discard(arr_wb)
# arr remains unchanged after discard
assert_equal(arr, orig)
# after discard, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
if HAS_REFCOUNT:
assert_equal(arr_cnt, sys.getrefcount(arr))
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, orig)
class TestArange:
def test_infinite(self):
assert_raises_regex(
ValueError, "size exceeded",
np.arange, 0, np.inf
)
def test_nan_step(self):
assert_raises_regex(
ValueError, "cannot compute length",
np.arange, 0, 1, np.nan
)
def test_zero_step(self):
assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
# empty range
assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
def test_require_range(self):
assert_raises(TypeError, np.arange)
assert_raises(TypeError, np.arange, step=3)
assert_raises(TypeError, np.arange, dtype='int64')
assert_raises(TypeError, np.arange, start=4)
def test_start_stop_kwarg(self):
keyword_stop = np.arange(stop=3)
keyword_zerotostop = np.arange(start=0, stop=3)
keyword_start_stop = np.arange(start=3, stop=9)
assert len(keyword_stop) == 3
assert len(keyword_zerotostop) == 3
assert len(keyword_start_stop) == 6
assert_array_equal(keyword_stop, keyword_zerotostop)
class TestArrayFinalize:
""" Tests __array_finalize__ """
def test_receives_base(self):
# gh-11237
class SavesBase(np.ndarray):
def __array_finalize__(self, obj):
self.saved_base = self.base
a = np.array(1).view(SavesBase)
assert_(a.saved_base is a.base)
def test_bad_finalize(self):
class BadAttributeArray(np.ndarray):
@property
def __array_finalize__(self):
raise RuntimeError("boohoo!")
with pytest.raises(RuntimeError, match="boohoo!"):
np.arange(10).view(BadAttributeArray)
def test_lifetime_on_error(self):
# gh-11237
class RaisesInFinalize(np.ndarray):
def __array_finalize__(self, obj):
# crash, but keep this object alive
raise Exception(self)
# a plain object can't be weakref'd
class Dummy: pass
# get a weak reference to an object within an array
obj_arr = np.array(Dummy())
obj_ref = weakref.ref(obj_arr[()])
# get an array that crashed in __array_finalize__
with assert_raises(Exception) as e:
obj_arr.view(RaisesInFinalize)
obj_subarray = e.exception.args[0]
del e
assert_(isinstance(obj_subarray, RaisesInFinalize))
# reference should still be held by obj_arr
break_cycles()
assert_(obj_ref() is not None, "object should not already be dead")
del obj_arr
break_cycles()
assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
del obj_subarray
break_cycles()
assert_(obj_ref() is None, "no references should remain")
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
class MyAlwaysEqual:
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
class MyAlwaysEqualOld(MyAlwaysEqual):
__array_priority__ = 10000
class MyAlwaysEqualNew(MyAlwaysEqual):
__array_ufunc__ = None
array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
my_always_equal = my_always_equal_cls()
assert_equal(my_always_equal == array, 'eq')
assert_equal(array == my_always_equal, 'eq')
assert_equal(my_always_equal != array, 'ne')
assert_equal(array != my_always_equal, 'ne')
@pytest.mark.parametrize(
["fun", "npfun"],
[
(_multiarray_tests.npy_cabs, np.absolute),
(_multiarray_tests.npy_carg, np.angle)
]
)
@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__())
def test_npymath_complex(fun, npfun, x, y, test_dtype):
# Smoketest npymath functions
z = test_dtype(complex(x, y))
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_npymath_real():
# Smoketest npymath functions
from numpy.core._multiarray_tests import (
npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
funcs = {npy_log10: np.log10,
npy_cosh: np.cosh,
npy_sinh: np.sinh,
npy_tan: np.tan,
npy_tanh: np.tanh}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.float32, np.float64, np.longdouble)
with np.errstate(all='ignore'):
for fun, npfun in funcs.items():
for x, t in itertools.product(vals, types):
z = t(x)
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_uintalignment_and_alignment():
# alignment code needs to satisfy these requirements:
# 1. numpy structs match C struct layout
# 2. ufuncs/casting is safe wrt to aligned access
# 3. copy code is safe wrt to "uint alidned" access
#
# Complex types are the main problem, whose alignment may not be the same
# as their "uint alignment".
#
# This test might only fail on certain platforms, where uint64 alignment is
# not equal to complex64 alignment. The second 2 tests will only fail
# for DEBUG=1.
d1 = np.dtype('u1,c8', align=True)
d2 = np.dtype('u4,c8', align=True)
d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
# check that C struct matches numpy struct size
s = _multiarray_tests.get_struct_alignments()
for d, (alignment, size) in zip([d1,d2,d3], s):
assert_equal(d.alignment, alignment)
assert_equal(d.itemsize, size)
# check that ufuncs don't complain in debug mode
# (this is probably OK if the aligned flag is true above)
src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often
np.exp(src) # assert fails?
# check that copy code doesn't complain in debug mode
dst = np.zeros((2,2), dtype='c8')
dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
class TestAlignment:
# adapted from scipy._lib.tests.test__util.test__aligned_zeros
# Checks that unusual memory alignments don't trip up numpy.
# In particular, check RELAXED_STRIDES don't trip alignment assertions in
# NDEBUG mode for size-0 arrays (gh-12503)
def check(self, shape, dtype, order, align):
err_msg = repr((shape, dtype, order, align))
x = _aligned_zeros(shape, dtype, order, align=align)
if align is None:
align = np.dtype(dtype).alignment
assert_equal(x.__array_interface__['data'][0] % align, 0)
if hasattr(shape, '__len__'):
assert_equal(x.shape, shape, err_msg)
else:
assert_equal(x.shape, (shape,), err_msg)
assert_equal(x.dtype, dtype)
if order == "C":
assert_(x.flags.c_contiguous, err_msg)
elif order == "F":
if x.size > 0:
assert_(x.flags.f_contiguous, err_msg)
elif order is None:
assert_(x.flags.c_contiguous, err_msg)
else:
raise ValueError()
def test_various_alignments(self):
for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
for n in [0, 1, 3, 11]:
for order in ["C", "F", None]:
for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
if dtype == 'O':
# object dtype can't be misaligned
continue
for shape in [n, (1, 2, 3, n)]:
self.check(shape, np.dtype(dtype), order, align)
def test_strided_loop_alignments(self):
# particularly test that complex64 and float128 use right alignment
# code-paths, since these are particularly problematic. It is useful to
# turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
for align in [1, 2, 4, 8, 12, 16, None]:
xf64 = _aligned_zeros(3, np.float64)
xc64 = _aligned_zeros(3, np.complex64, align=align)
xf128 = _aligned_zeros(3, np.longdouble, align=align)
# test casting, both to and from misaligned
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, "Casting complex values")
xc64.astype('f8')
xf64.astype(np.complex64)
test = xc64 + xf64
xf128.astype('f8')
xf64.astype(np.longdouble)
test = xf128 + xf64
test = xf128 + xc64
# test copy, both to and from misaligned
# contig copy
xf64[:] = xf64.copy()
xc64[:] = xc64.copy()
xf128[:] = xf128.copy()
# strided copy
xf64[::2] = xf64[::2].copy()
xc64[::2] = xc64[::2].copy()
xf128[::2] = xf128[::2].copy()
def test_getfield():
a = np.arange(32, dtype='uint16')
if sys.byteorder == 'little':
i = 0
j = 1
else:
i = 1
j = 0
b = a.getfield('int8', i)
assert_equal(b, a)
b = a.getfield('int8', j)
assert_equal(b, 0)
pytest.raises(ValueError, a.getfield, 'uint8', -1)
pytest.raises(ValueError, a.getfield, 'uint8', 16)
pytest.raises(ValueError, a.getfield, 'uint64', 0)
| simongibbons/numpy | numpy/core/tests/test_multiarray.py | Python | bsd-3-clause | 339,352 |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a simple "negative compile" test for C++ on linux.
Sometimes a C++ API needs to ensure that various usages cannot compile. To
enable unittesting of these assertions, we use this python script to
invoke the compiler on a source file and assert that compilation fails.
For more info, see:
http://dev.chromium.org/developers/testing/no-compile-tests
"""
from __future__ import print_function
import StringIO
import ast
import os
import re
import select
import subprocess
import sys
import tempfile
import time
# Matches lines that start with #if and have the substring TEST in the
# conditional. Also extracts the comment. This allows us to search for
# lines like the following:
#
# #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
# #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
# #if NCTEST_NAME_OF_TEST // [r'expected output']
# #elif NCTEST_NAME_OF_TEST // [r'expected output']
# #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
#
# inside the unittest file.
NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
# Matches and removes the defined() preprocesor predicate. This is useful
# for test cases that use the preprocessor if-statement form:
#
# #if defined(NCTEST_NAME_OF_TEST)
#
# Should be used to post-process the results found by NCTEST_CONFIG_RE.
STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
# Used to grab the expectation from comment at the end of an #ifdef. See
# NCTEST_CONFIG_RE's comment for examples of what the format should look like.
#
# The extracted substring should be a python array of regular expressions.
EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
# The header for the result file so that it can be compiled.
RESULT_FILE_HEADER = """
// This file is generated by the no compile test from:
// %s
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
"""
# The log message on a test completion.
LOG_TEMPLATE = """
TEST(%s, %s) took %f secs. Started at %f, ended at %f.
"""
# The GUnit test function to output for a successful or disabled test.
GUNIT_TEMPLATE = """
TEST(%s, %s) { }
"""
# Timeout constants.
NCTEST_TERMINATE_TIMEOUT_SEC = 120
NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
def ValidateInput(compiler, parallelism, sourcefile_path, cflags,
resultfile_path):
"""Make sure the arguments being passed in are sane."""
assert os.path.isfile(compiler)
assert parallelism >= 1
assert type(sourcefile_path) is str
assert type(cflags) is list
for flag in cflags:
assert type(flag) is str
assert type(resultfile_path) is str
def ParseExpectation(expectation_string):
"""Extracts expectation definition from the trailing comment on the ifdef.
See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
Args:
expectation_string: A string like "// [r'some_regex']"
Returns:
A list of compiled regular expressions indicating all possible valid
compiler outputs. If the list is empty, all outputs are considered valid.
"""
assert expectation_string is not None
match = EXTRACT_EXPECTATION_RE.match(expectation_string)
assert match
raw_expectation = ast.literal_eval(match.group(1))
assert type(raw_expectation) is list
expectation = []
for regex_str in raw_expectation:
assert type(regex_str) is str
expectation.append(re.compile(regex_str))
return expectation
def ExtractTestConfigs(sourcefile_path, suite_name):
"""Parses the source file for test configurations.
Each no-compile test in the file is separated by an ifdef macro. We scan
the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
they demark one no-compile test and try to extract the test configuration
from that.
Args:
sourcefile_path: The path to the source file.
suite_name: The name of the test suite.
Returns:
A list of test configurations. Each test configuration is a dictionary of
the form:
{ name: 'NCTEST_NAME'
suite_name: 'SOURCE_FILE_NAME'
expectations: [re.Pattern, re.Pattern] }
The |suite_name| is used to generate a pretty gtest output on successful
completion of the no compile test.
The compiled regexps in |expectations| define the valid outputs of the
compiler. If any one of the listed patterns matches either the stderr or
stdout from the compilation, and the compilation failed, then the test is
considered to have succeeded. If the list is empty, than we ignore the
compiler output and just check for failed compilation. If |expectations|
is actually None, then this specifies a compiler sanity check test, which
should expect a SUCCESSFUL compilation.
"""
sourcefile = open(sourcefile_path, 'r')
# Start with at least the compiler sanity test. You need to always have one
# sanity test to show that compiler flags and configuration are not just
# wrong. Otherwise, having a misconfigured compiler, or an error in the
# shared portions of the .nc file would cause all tests to erroneously pass.
test_configs = []
for line in sourcefile:
match_result = NCTEST_CONFIG_RE.match(line)
if not match_result:
continue
groups = match_result.groups()
# Grab the name and remove the defined() predicate if there is one.
name = groups[0]
strip_result = STRIP_DEFINED_RE.match(name)
if strip_result:
name = strip_result.group(1)
# Read expectations if there are any.
test_configs.append({'name': name,
'suite_name': suite_name,
'expectations': ParseExpectation(groups[1])})
sourcefile.close()
return test_configs
def StartTest(compiler, sourcefile_path, tempfile_dir, cflags, config):
"""Start one negative compile test.
Args:
sourcefile_path: The path to the source file.
tempfile_dir: A directory to store temporary data from tests.
cflags: An array of strings with all the CFLAGS to give to gcc.
config: A dictionary describing the test. See ExtractTestConfigs
for a description of the config format.
Returns:
A dictionary containing all the information about the started test. The
fields in the dictionary are as follows:
{ 'proc': A subprocess object representing the compiler run.
'cmdline': The executed command line.
'name': The name of the test.
'suite_name': The suite name to use when generating the gunit test
result.
'terminate_timeout': The timestamp in seconds since the epoch after
which the test should be terminated.
'kill_timeout': The timestamp in seconds since the epoch after which
the test should be given a hard kill signal.
'started_at': A timestamp in seconds since the epoch for when this test
was started.
'aborted_at': A timestamp in seconds since the epoch for when this test
was aborted. If the test completed successfully,
this value is 0.
'finished_at': A timestamp in seconds since the epoch for when this
test was successfully complete. If the test is aborted,
or running, this value is 0.
'expectations': A dictionary with the test expectations. See
ParseExpectation() for the structure.
}
"""
cmdline = [compiler]
cmdline.extend(cflags)
name = config['name']
expectations = config['expectations']
if expectations is not None:
cmdline.append('-D%s' % name)
cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++',
sourcefile_path])
test_stdout = tempfile.TemporaryFile(dir=tempfile_dir)
test_stderr = tempfile.TemporaryFile(dir=tempfile_dir)
process = subprocess.Popen(cmdline, stdout=test_stdout, stderr=test_stderr)
now = time.time()
return {'proc': process,
'cmdline': ' '.join(cmdline),
'stdout': test_stdout,
'stderr': test_stderr,
'name': name,
'suite_name': config['suite_name'],
'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
'started_at': now,
'aborted_at': 0,
'finished_at': 0,
'expectations': expectations}
def PassTest(resultfile, resultlog, test):
"""Logs the result of a test started by StartTest(), or a disabled test
configuration.
Args:
resultfile: File object for .cc file that results are written to.
resultlog: File object for the log file.
test: An instance of the dictionary returned by StartTest(), a
configuration from ExtractTestConfigs().
"""
resultfile.write(GUNIT_TEMPLATE % (
test['suite_name'], test['name']))
# The 'started_at' key is only added if a test has been started.
if 'started_at' in test:
resultlog.write(LOG_TEMPLATE % (
test['suite_name'], test['name'],
test['finished_at'] - test['started_at'],
test['started_at'], test['finished_at']))
def FailTest(resultfile, test, error, stdout=None, stderr=None):
"""Logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
test: An instance of the dictionary returned by StartTest()
error: The printable reason for the failure.
stdout: The test's output to stdout.
stderr: The test's output to stderr.
"""
resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error))
resultfile.write('#error "compile line: %s"\n' % test['cmdline'])
if stdout and len(stdout) != 0:
resultfile.write('#error "%s stdout:"\n' % test['name'])
for line in stdout.split('\n'):
resultfile.write('#error " %s:"\n' % line)
if stderr and len(stderr) != 0:
resultfile.write('#error "%s stderr:"\n' % test['name'])
for line in stderr.split('\n'):
resultfile.write('#error " %s"\n' % line)
resultfile.write('\n')
def WriteStats(resultlog, suite_name, timings):
"""Logs the peformance timings for each stage of the script.
Args:
resultlog: File object for the log file.
suite_name: The name of the GUnit suite this test belongs to.
timings: Dictionary with timestamps for each stage of the script run.
"""
stats_template = """
TEST(%s): Started %f, Ended %f, Total %fs, Extract %fs, Compile %fs, Process %fs
"""
total_secs = timings['results_processed'] - timings['started']
extract_secs = timings['extract_done'] - timings['started']
compile_secs = timings['compile_done'] - timings['extract_done']
process_secs = timings['results_processed'] - timings['compile_done']
resultlog.write(stats_template % (
suite_name, timings['started'], timings['results_processed'], total_secs,
extract_secs, compile_secs, process_secs))
def ExtractTestOutputAndCleanup(test):
"""Test output is in temp files. Read those and delete them.
Returns: A tuple (stderr, stdout).
"""
outputs = [None, None]
for i, stream_name in ((0, "stdout"), (1, "stderr")):
stream = test[stream_name]
stream.seek(0)
outputs[i] = stream.read()
stream.close()
return outputs
def ProcessTestResult(resultfile, resultlog, test):
"""Interprets and logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
resultlog: File object for the log file.
test: The dictionary from StartTest() to process.
"""
proc = test['proc']
proc.wait()
(stdout, stderr) = ExtractTestOutputAndCleanup(test)
if test['aborted_at'] != 0:
FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
(test['started_at'], test['aborted_at']))
return
if proc.poll() == 0:
# Handle failure due to successful compile.
FailTest(resultfile, test,
'Unexpected successful compilation.',
stdout, stderr)
return
else:
# Check the output has the right expectations. If there are no
# expectations, then we just consider the output "matched" by default.
if len(test['expectations']) == 0:
PassTest(resultfile, resultlog, test)
return
# Otherwise test against all expectations.
for regexp in test['expectations']:
if (regexp.search(stdout) is not None or
regexp.search(stderr) is not None):
PassTest(resultfile, resultlog, test)
return
expectation_str = ', '.join(
["r'%s'" % regexp.pattern for regexp in test['expectations']])
FailTest(resultfile, test,
'Expectations [%s] did not match output.' % expectation_str,
stdout, stderr)
return
def CompleteAtLeastOneTest(executing_tests):
"""Blocks until at least one task is removed from executing_tests.
This function removes completed tests from executing_tests, logging failures
and output. If no tests can be removed, it will enter a poll-loop until one
test finishes or times out. On a timeout, this function is responsible for
terminating the process in the appropriate fashion.
Args:
executing_tests: A dict mapping a string containing the test name to the
test dict return from StartTest().
Returns:
A list of tests that have finished.
"""
finished_tests = []
busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
while len(finished_tests) == 0:
# If we don't make progress for too long, assume the code is just dead.
assert busy_loop_timeout > time.time()
# Select on the output files to block until we have something to
# do. We ignore the return value from select and just poll all
# processes.
read_set = []
for test in executing_tests.values():
read_set.extend([test['stdout'], test['stderr']])
select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
# Now attempt to process results.
now = time.time()
for test in executing_tests.values():
proc = test['proc']
if proc.poll() is not None:
test['finished_at'] = now
finished_tests.append(test)
elif test['terminate_timeout'] < now:
proc.terminate()
test['aborted_at'] = now
elif test['kill_timeout'] < now:
proc.kill()
test['aborted_at'] = now
if len(finished_tests) == 0:
# We had output from some process but no process had
# finished. To avoid busy looping while waiting for a process to
# finish, insert a small 100 ms delay here.
time.sleep(0.1)
for test in finished_tests:
del executing_tests[test['name']]
return finished_tests
def main():
if len(sys.argv) < 6 or sys.argv[5] != '--':
print('Usage: %s <compiler> <parallelism> <sourcefile> <resultfile> '
'-- <cflags...>' % sys.argv[0])
sys.exit(1)
# Force us into the "C" locale so the compiler doesn't localize its output.
# In particular, this stops gcc from using smart quotes when in english UTF-8
# locales. This makes the expectation writing much easier.
os.environ['LC_ALL'] = 'C'
compiler = sys.argv[1]
parallelism = int(sys.argv[2])
sourcefile_path = sys.argv[3]
resultfile_path = sys.argv[4]
cflags = sys.argv[6:]
timings = {'started': time.time()}
ValidateInput(compiler, parallelism, sourcefile_path, cflags, resultfile_path)
# Convert filename from underscores to CamelCase.
words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
words = [w.capitalize() for w in words]
suite_name = 'NoCompile' + ''.join(words)
test_configs = ExtractTestConfigs(sourcefile_path, suite_name)
timings['extract_done'] = time.time()
resultfile = StringIO.StringIO()
resultlog = StringIO.StringIO()
resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
# Run the no-compile tests, but ensure we do not run more than |parallelism|
# tests at once.
timings['header_written'] = time.time()
executing_tests = {}
finished_tests = []
cflags.extend(['-MMD', '-MF', resultfile_path + '.d', '-MT', resultfile_path])
test = StartTest(
compiler,
sourcefile_path,
os.path.dirname(resultfile_path),
cflags,
{ 'name': 'NCTEST_SANITY',
'suite_name': suite_name,
'expectations': None,
})
executing_tests[test['name']] = test
for config in test_configs:
# CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
# acts as a semaphore. We cannot use threads + a real semaphore because
# subprocess forks, which can cause all sorts of hilarity with threads.
if len(executing_tests) >= parallelism:
finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
if config['name'].startswith('DISABLED_'):
PassTest(resultfile, resultlog, config)
else:
test = StartTest(compiler, sourcefile_path,
os.path.dirname(resultfile_path), cflags, config)
assert test['name'] not in executing_tests
executing_tests[test['name']] = test
# If there are no more test to start, we still need to drain the running
# ones.
while len(executing_tests) > 0:
finished_tests.extend(CompleteAtLeastOneTest(executing_tests))
timings['compile_done'] = time.time()
finished_tests = sorted(finished_tests, key=lambda test: test['name'])
for test in finished_tests:
if test['name'] == 'NCTEST_SANITY':
test['proc'].wait()
(stdout, stderr) = ExtractTestOutputAndCleanup(test)
return_code = test['proc'].returncode
if return_code != 0:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
continue
ProcessTestResult(resultfile, resultlog, test)
timings['results_processed'] = time.time()
WriteStats(resultlog, suite_name, timings)
with open(resultfile_path + '.log', 'w') as fd:
fd.write(resultlog.getvalue())
if return_code == 0:
with open(resultfile_path, 'w') as fd:
fd.write(resultfile.getvalue())
resultfile.close()
if return_code != 0:
print("No-compile driver failure with return_code %d. Result log:" %
return_code)
print(resultlog.getvalue())
sys.exit(return_code)
if __name__ == '__main__':
main()
| endlessm/chromium-browser | tools/nocompile_driver.py | Python | bsd-3-clause | 18,515 |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013, LastSeal S.A.
Copyright (c) 2011-2012, Joaquin G. Duo
All rights reserved.
This code is distributed under BSD 3-clause License.
For details check the LICENSE file in the root of the project.
'''
class FrameworkObject(object):
pass
def smokeTestModule():
FrameworkObject()
if __name__ == '__main__':
smokeTestModule()
| joaduo/python-simplerpc | simplerpc/common/abstract/FrameworkObject.py | Python | bsd-3-clause | 383 |
import warnings
from collections import namedtuple
from MySQLdb.constants import FIELD_TYPE
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.datastructures import OrderedSet
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra',))
InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default')
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'SmallIntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface."
"""
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT column_name, data_type, character_maximum_length, numeric_precision,
numeric_scale, extra, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()""", [table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
def to_int(i):
return int(i) if i is not None else i
fields = []
for line in cursor.description:
col_name = force_text(line[0])
fields.append(
FieldInfo(*(
(col_name,) +
line[1:3] +
(
to_int(field_info[col_name].max_len) or line[3],
to_int(field_info[col_name].num_prec) or line[4],
to_int(field_info[col_name].num_scale) or line[5],
line[6],
field_info[col_name].column_default,
field_info[col_name].extra,
)
))
)
return fields
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = DATABASE() AND
kc.table_name = %s
"""
cursor.execute(name_query, [table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = DATABASE() AND
c.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower()
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
| camilonova/django | django/db/backends/mysql/introspection.py | Python | bsd-3-clause | 9,426 |
"""Module will classes related to PV row geometries"""
import numpy as np
from pvfactors.config import COLOR_DIC
from pvfactors.geometry.base import \
BaseSide, _coords_from_center_tilt_length, PVSegment
from shapely.geometry import GeometryCollection, LineString
from pvfactors.geometry.timeseries import \
TsShadeCollection, TsLineCoords, TsSurface
from pvlib.tools import cosd, sind
class TsPVRow(object):
"""Timeseries PV row class: this class is a vectorized version of the
PV row geometries. The coordinates and attributes (front and back sides)
are all vectorized."""
def __init__(self, ts_front_side, ts_back_side, xy_center, index=None,
full_pvrow_coords=None):
"""Initialize timeseries PV row with its front and back sides.
Parameters
----------
ts_front_side : :py:class:`~pvfactors.geometry.pvrow.TsSide`
Timeseries front side of the PV row
ts_back_side : :py:class:`~pvfactors.geometry.pvrow.TsSide`
Timeseries back side of the PV row
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
index : int, optional
index of the PV row (Default = None)
full_pvrow_coords : \
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`, optional
Timeseries coordinates of the full PV row, end to end
(Default = None)
"""
self.front = ts_front_side
self.back = ts_back_side
self.xy_center = xy_center
self.index = index
self.full_pvrow_coords = full_pvrow_coords
@classmethod
def from_raw_inputs(cls, xy_center, width, rotation_vec,
cut, shaded_length_front, shaded_length_back,
index=None, param_names=None):
"""Create timeseries PV row using raw inputs.
Note: shading will always be zero when pv rows are flat.
Parameters
----------
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
width : float
width of the PV rows [m]
rotation_vec : np.ndarray
Timeseries rotation values of the PV row [deg]
cut : dict
Discretization scheme of the PV row. Eg {'front': 2, 'back': 4}.
Will create segments of equal length on the designated sides.
shaded_length_front : np.ndarray
Timeseries values of front side shaded length [m]
shaded_length_back : np.ndarray
Timeseries values of back side shaded length [m]
index : int, optional
Index of the pv row (default = None)
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
Returns
-------
New timeseries PV row object
"""
# Calculate full pvrow coords
pvrow_coords = TsPVRow._calculate_full_coords(
xy_center, width, rotation_vec)
# Calculate normal vectors
dx = pvrow_coords.b2.x - pvrow_coords.b1.x
dy = pvrow_coords.b2.y - pvrow_coords.b1.y
normal_vec_front = np.array([-dy, dx])
# Calculate front side coords
ts_front = TsSide.from_raw_inputs(
xy_center, width, rotation_vec, cut.get('front', 1),
shaded_length_front, n_vector=normal_vec_front,
param_names=param_names)
# Calculate back side coords
ts_back = TsSide.from_raw_inputs(
xy_center, width, rotation_vec, cut.get('back', 1),
shaded_length_back, n_vector=-normal_vec_front,
param_names=param_names)
return cls(ts_front, ts_back, xy_center, index=index,
full_pvrow_coords=pvrow_coords)
@staticmethod
def _calculate_full_coords(xy_center, width, rotation):
"""Method to calculate the full PV row coordinaltes.
Parameters
----------
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
width : float
width of the PV rows [m]
rotation : np.ndarray
Timeseries rotation values of the PV row [deg]
Returns
-------
coords: :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries coordinates of full PV row
"""
x_center, y_center = xy_center
radius = width / 2.
# Calculate coords
x1 = radius * cosd(rotation + 180.) + x_center
y1 = radius * sind(rotation + 180.) + y_center
x2 = radius * cosd(rotation) + x_center
y2 = radius * sind(rotation) + y_center
coords = TsLineCoords.from_array(np.array([[x1, y1], [x2, y2]]))
return coords
def surfaces_at_idx(self, idx):
"""Get all PV surface geometries in timeseries PV row for a certain
index.
Parameters
----------
idx : int
Index to use to generate PV surface geometries
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface` objects
List of PV surfaces
"""
pvrow = self.at(idx)
return pvrow.all_surfaces
def plot_at_idx(self, idx, ax, color_shaded=COLOR_DIC['pvrow_shaded'],
color_illum=COLOR_DIC['pvrow_illum'],
with_surface_index=False):
"""Plot timeseries PV row at a certain index.
Parameters
----------
idx : int
Index to use to plot timeseries PV rows
ax : :py:class:`matplotlib.pyplot.axes` object
Axes for plotting
color_shaded : str, optional
Color to use for plotting the shaded surfaces (Default =
COLOR_DIC['pvrow_shaded'])
color_shaded : str, optional
Color to use for plotting the illuminated surfaces (Default =
COLOR_DIC['pvrow_illum'])
with_surface_index : bool, optional
Plot the surfaces with their index values (Default = False)
"""
pvrow = self.at(idx)
pvrow.plot(ax, color_shaded=color_shaded,
color_illum=color_illum, with_index=with_surface_index)
def at(self, idx):
"""Generate a PV row geometry for the desired index.
Parameters
----------
idx : int
Index to use to generate PV row geometry
Returns
-------
pvrow : :py:class:`~pvfactors.geometry.pvrow.PVRow`
"""
front_geom = self.front.at(idx)
back_geom = self.back.at(idx)
original_line = LineString(
self.full_pvrow_coords.as_array[:, :, idx])
pvrow = PVRow(front_side=front_geom, back_side=back_geom,
index=self.index, original_linestring=original_line)
return pvrow
def update_params(self, new_dict):
"""Update timeseries surface parameters of the PV row.
Parameters
----------
new_dict : dict
Parameters to add or update for the surfaces
"""
self.front.update_params(new_dict)
self.back.update_params(new_dict)
@property
def n_ts_surfaces(self):
"""Number of timeseries surfaces in the ts PV row"""
return self.front.n_ts_surfaces + self.back.n_ts_surfaces
@property
def all_ts_surfaces(self):
"""List of all timeseries surfaces"""
return self.front.all_ts_surfaces + self.back.all_ts_surfaces
@property
def centroid(self):
"""Centroid point of the timeseries pv row"""
centroid = (self.full_pvrow_coords.centroid
if self.full_pvrow_coords is not None else None)
return centroid
@property
def length(self):
"""Length of both sides of the timeseries PV row"""
return self.front.length + self.back.length
@property
def highest_point(self):
"""Timeseries point coordinates of highest point of PV row"""
high_pt = (self.full_pvrow_coords.highest_point
if self.full_pvrow_coords is not None else None)
return high_pt
class TsSide(object):
"""Timeseries side class: this class is a vectorized version of the
BaseSide geometries. The coordinates and attributes (list of segments,
normal vector) are all vectorized."""
def __init__(self, segments, n_vector=None):
"""Initialize timeseries side using list of timeseries segments.
Parameters
----------
segments : list of :py:class:`~pvfactors.geometry.pvrow.TsSegment`
List of timeseries segments of the side
n_vector : np.ndarray, optional
Timeseries normal vectors of the side (Default = None)
"""
self.list_segments = segments
self.n_vector = n_vector
@classmethod
def from_raw_inputs(cls, xy_center, width, rotation_vec, cut,
shaded_length, n_vector=None, param_names=None):
"""Create timeseries side using raw PV row inputs.
Note: shading will always be zero when PV rows are flat.
Parameters
----------
xy_center : tuple of float
x and y coordinates of the PV row center point (invariant)
width : float
width of the PV rows [m]
rotation_vec : np.ndarray
Timeseries rotation values of the PV row [deg]
cut : int
Discretization scheme of the PV side.
Will create segments of equal length.
shaded_length : np.ndarray
Timeseries values of side shaded length from lowest point [m]
n_vector : np.ndarray, optional
Timeseries normal vectors of the side
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
Returns
-------
New timeseries side object
"""
mask_tilted_to_left = rotation_vec >= 0
# Create Ts segments
x_center, y_center = xy_center
radius = width / 2.
segment_length = width / cut
is_not_flat = rotation_vec != 0.
# Calculate coords of shading point
r_shade = radius - shaded_length
x_sh = np.where(
mask_tilted_to_left,
r_shade * cosd(rotation_vec + 180.) + x_center,
r_shade * cosd(rotation_vec) + x_center)
y_sh = np.where(
mask_tilted_to_left,
r_shade * sind(rotation_vec + 180.) + y_center,
r_shade * sind(rotation_vec) + y_center)
# Calculate coords
list_segments = []
for i in range(cut):
# Calculate segment coords
r1 = radius - i * segment_length
r2 = radius - (i + 1) * segment_length
x1 = r1 * cosd(rotation_vec + 180.) + x_center
y1 = r1 * sind(rotation_vec + 180.) + y_center
x2 = r2 * cosd(rotation_vec + 180) + x_center
y2 = r2 * sind(rotation_vec + 180) + y_center
segment_coords = TsLineCoords.from_array(
np.array([[x1, y1], [x2, y2]]))
# Determine lowest and highest points of segment
x_highest = np.where(mask_tilted_to_left, x2, x1)
y_highest = np.where(mask_tilted_to_left, y2, y1)
x_lowest = np.where(mask_tilted_to_left, x1, x2)
y_lowest = np.where(mask_tilted_to_left, y1, y2)
# Calculate illum and shaded coords
x2_illum, y2_illum = x_highest, y_highest
x1_shaded, y1_shaded, x2_shaded, y2_shaded = \
x_lowest, y_lowest, x_lowest, y_lowest
mask_all_shaded = (y_sh > y_highest) & (is_not_flat)
mask_partial_shaded = (y_sh > y_lowest) & (~ mask_all_shaded) \
& (is_not_flat)
# Calculate second boundary point of shade
x2_shaded = np.where(mask_all_shaded, x_highest, x2_shaded)
x2_shaded = np.where(mask_partial_shaded, x_sh, x2_shaded)
y2_shaded = np.where(mask_all_shaded, y_highest, y2_shaded)
y2_shaded = np.where(mask_partial_shaded, y_sh, y2_shaded)
x1_illum = x2_shaded
y1_illum = y2_shaded
illum_coords = TsLineCoords.from_array(
np.array([[x1_illum, y1_illum], [x2_illum, y2_illum]]))
shaded_coords = TsLineCoords.from_array(
np.array([[x1_shaded, y1_shaded], [x2_shaded, y2_shaded]]))
# Create illuminated and shaded collections
is_shaded = False
illum = TsShadeCollection(
[TsSurface(illum_coords, n_vector=n_vector,
param_names=param_names, shaded=is_shaded)],
is_shaded)
is_shaded = True
shaded = TsShadeCollection(
[TsSurface(shaded_coords, n_vector=n_vector,
param_names=param_names, shaded=is_shaded)],
is_shaded)
# Create segment
segment = TsSegment(segment_coords, illum, shaded,
n_vector=n_vector, index=i)
list_segments.append(segment)
return cls(list_segments, n_vector=n_vector)
def surfaces_at_idx(self, idx):
"""Get all PV surface geometries in timeseries side for a certain
index.
Parameters
----------
idx : int
Index to use to generate PV surface geometries
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface` objects
List of PV surfaces
"""
side_geom = self.at(idx)
return side_geom.all_surfaces
def at(self, idx):
"""Generate a side geometry for the desired index.
Parameters
----------
idx : int
Index to use to generate side geometry
Returns
-------
side : :py:class:`~pvfactors.geometry.base.BaseSide`
"""
list_geom_segments = []
for ts_seg in self.list_segments:
list_geom_segments.append(ts_seg.at(idx))
side = BaseSide(list_geom_segments)
return side
def plot_at_idx(self, idx, ax, color_shaded=COLOR_DIC['pvrow_shaded'],
color_illum=COLOR_DIC['pvrow_illum']):
"""Plot timeseries side at a certain index.
Parameters
----------
idx : int
Index to use to plot timeseries side
ax : :py:class:`matplotlib.pyplot.axes` object
Axes for plotting
color_shaded : str, optional
Color to use for plotting the shaded surfaces (Default =
COLOR_DIC['pvrow_shaded'])
color_shaded : str, optional
Color to use for plotting the illuminated surfaces (Default =
COLOR_DIC['pvrow_illum'])
"""
side_geom = self.at(idx)
side_geom.plot(ax, color_shaded=color_shaded, color_illum=color_illum,
with_index=False)
@property
def shaded_length(self):
"""Timeseries shaded length of the side."""
length = 0.
for seg in self.list_segments:
length += seg.shaded.length
return length
@property
def length(self):
"""Timeseries length of side."""
length = 0.
for seg in self.list_segments:
length += seg.length
return length
def get_param_weighted(self, param):
"""Get timeseries parameter for the side, after weighting by
surface length.
Parameters
----------
param : str
Name of parameter
Returns
-------
np.ndarray
Weighted parameter values
"""
return self.get_param_ww(param) / self.length
def get_param_ww(self, param):
"""Get timeseries parameter from the side's surfaces with weight, i.e.
after multiplying by the surface lengths.
Parameters
----------
param: str
Surface parameter to return
Returns
-------
np.ndarray
Timeseries parameter values multiplied by weights
Raises
------
KeyError
if parameter name not in a surface parameters
"""
value = 0.
for seg in self.list_segments:
value += seg.get_param_ww(param)
return value
def update_params(self, new_dict):
"""Update timeseries surface parameters of the side.
Parameters
----------
new_dict : dict
Parameters to add or update for the surfaces
"""
for seg in self.list_segments:
seg.update_params(new_dict)
@property
def n_ts_surfaces(self):
"""Number of timeseries surfaces in the ts side"""
n_ts_surfaces = 0
for ts_segment in self.list_segments:
n_ts_surfaces += ts_segment.n_ts_surfaces
return n_ts_surfaces
@property
def all_ts_surfaces(self):
"""List of all timeseries surfaces"""
all_ts_surfaces = []
for ts_segment in self.list_segments:
all_ts_surfaces += ts_segment.all_ts_surfaces
return all_ts_surfaces
class TsSegment(object):
"""A TsSegment is a timeseries segment that has a timeseries shaded
collection and a timeseries illuminated collection."""
def __init__(self, coords, illum_collection, shaded_collection,
index=None, n_vector=None):
"""Initialize timeseries segment using segment coordinates and
timeseries illuminated and shaded surfaces.
Parameters
----------
coords : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries coordinates of full segment
illum_collection : \
:py:class:`~pvfactors.geometry.timeseries.TsShadeCollection`
Timeseries collection for illuminated part of segment
shaded_collection : \
:py:class:`~pvfactors.geometry.timeseries.TsShadeCollection`
Timeseries collection for shaded part of segment
index : int, optional
Index of segment (Default = None)
n_vector : np.ndarray, optional
Timeseries normal vectors of the side (Default = None)
"""
self.coords = coords
self.illum = illum_collection
self.shaded = shaded_collection
self.index = index
self.n_vector = n_vector
def surfaces_at_idx(self, idx):
"""Get all PV surface geometries in timeseries segment for a certain
index.
Parameters
----------
idx : int
Index to use to generate PV surface geometries
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface` objects
List of PV surfaces
"""
segment = self.at(idx)
return segment.all_surfaces
def plot_at_idx(self, idx, ax, color_shaded=COLOR_DIC['pvrow_shaded'],
color_illum=COLOR_DIC['pvrow_illum']):
"""Plot timeseries segment at a certain index.
Parameters
----------
idx : int
Index to use to plot timeseries segment
ax : :py:class:`matplotlib.pyplot.axes` object
Axes for plotting
color_shaded : str, optional
Color to use for plotting the shaded surfaces (Default =
COLOR_DIC['pvrow_shaded'])
color_shaded : str, optional
Color to use for plotting the illuminated surfaces (Default =
COLOR_DIC['pvrow_illum'])
"""
segment = self.at(idx)
segment.plot(ax, color_shaded=color_shaded, color_illum=color_illum,
with_index=False)
def at(self, idx):
"""Generate a PV segment geometry for the desired index.
Parameters
----------
idx : int
Index to use to generate PV segment geometry
Returns
-------
segment : :py:class:`~pvfactors.geometry.base.PVSegment`
"""
# Create illum collection
illum_collection = self.illum.at(idx)
# Create shaded collection
shaded_collection = self.shaded.at(idx)
# Create PV segment
segment = PVSegment(illum_collection=illum_collection,
shaded_collection=shaded_collection,
index=self.index)
return segment
@property
def length(self):
"""Timeseries length of segment."""
return self.illum.length + self.shaded.length
@property
def shaded_length(self):
"""Timeseries length of shaded part of segment."""
return self.shaded.length
@property
def centroid(self):
"""Timeseries point coordinates of the segment's centroid"""
return self.coords.centroid
def get_param_weighted(self, param):
"""Get timeseries parameter for the segment, after weighting by
surface length.
Parameters
----------
param : str
Name of parameter
Returns
-------
np.ndarray
Weighted parameter values
"""
return self.get_param_ww(param) / self.length
def get_param_ww(self, param):
"""Get timeseries parameter from the segment's surfaces with weight,
i.e. after multiplying by the surface lengths.
Parameters
----------
param: str
Surface parameter to return
Returns
-------
np.ndarray
Timeseries parameter values multiplied by weights
"""
return self.illum.get_param_ww(param) + self.shaded.get_param_ww(param)
def update_params(self, new_dict):
"""Update timeseries surface parameters of the segment.
Parameters
----------
new_dict : dict
Parameters to add or update for the surfaces
"""
self.illum.update_params(new_dict)
self.shaded.update_params(new_dict)
@property
def highest_point(self):
"""Timeseries point coordinates of highest point of segment"""
return self.coords.highest_point
@property
def lowest_point(self):
"""Timeseries point coordinates of lowest point of segment"""
return self.coords.lowest_point
@property
def all_ts_surfaces(self):
"""List of all timeseries surfaces in segment"""
return self.illum.list_ts_surfaces + self.shaded.list_ts_surfaces
@property
def n_ts_surfaces(self):
"""Number of timeseries surfaces in the segment"""
return self.illum.n_ts_surfaces + self.shaded.n_ts_surfaces
class PVRowSide(BaseSide):
"""A PV row side represents the whole surface of one side of a PV row.
At its core it will contain a fixed number of
:py:class:`~pvfactors.geometry.base.PVSegment` objects that will together
constitue one side of a PV row: a PV row side can also be
"discretized" into multiple segments"""
def __init__(self, list_segments=[]):
"""Initialize PVRowSide using its base class
:py:class:`pvfactors.geometry.base.BaseSide`
Parameters
----------
list_segments : list of :py:class:`~pvfactors.geometry.base.PVSegment`
List of PV segments for PV row side.
"""
super(PVRowSide, self).__init__(list_segments)
class PVRow(GeometryCollection):
"""A PV row is made of two PV row sides, a front and a back one."""
def __init__(self, front_side=PVRowSide(), back_side=PVRowSide(),
index=None, original_linestring=None):
"""Initialize PV row.
Parameters
----------
front_side : :py:class:`~pvfactors.geometry.pvrow.PVRowSide`, optional
Front side of the PV Row (Default = Empty PVRowSide)
back_side : :py:class:`~pvfactors.geometry.pvrow.PVRowSide`, optional
Back side of the PV Row (Default = Empty PVRowSide)
index : int, optional
Index of PV row (Default = None)
original_linestring : :py:class:`shapely.geometry.LineString`, optional
Full continuous linestring that the PV row will be made of
(Default = None)
"""
self.front = front_side
self.back = back_side
self.index = index
self.original_linestring = original_linestring
self._all_surfaces = None
super(PVRow, self).__init__([self.front, self.back])
@classmethod
def from_linestring_coords(cls, coords, shaded=False, normal_vector=None,
index=None, cut={}, param_names=[]):
"""Create a PV row with a single PV surface and using linestring
coordinates.
Parameters
----------
coords : list
List of linestring coordinates for the surface
shaded : bool, optional
Shading status desired for the PVRow sides (Default = False)
normal_vector : list, optional
Normal vector for the surface (Default = None)
index : int, optional
Index of PV row (Default = None)
cut : dict, optional
Scheme to decide how many segments to create on each side.
Eg {'front': 3, 'back': 2} will lead to 3 segments on front side
and 2 segments on back side. (Default = {})
param_names : list of str, optional
Names of the surface parameters, eg reflectivity, total incident
irradiance, temperature, etc. (Default = [])
Returns
-------
:py:class:`~pvfactors.geometry.pvrow.PVRow` object
"""
index_single_segment = 0
front_side = PVRowSide.from_linestring_coords(
coords, shaded=shaded, normal_vector=normal_vector,
index=index_single_segment, n_segments=cut.get('front', 1),
param_names=param_names)
if normal_vector is not None:
back_n_vec = - np.array(normal_vector)
else:
back_n_vec = - front_side.n_vector
back_side = PVRowSide.from_linestring_coords(
coords, shaded=shaded, normal_vector=back_n_vec,
index=index_single_segment, n_segments=cut.get('back', 1),
param_names=param_names)
return cls(front_side=front_side, back_side=back_side, index=index,
original_linestring=LineString(coords))
@classmethod
def from_center_tilt_width(cls, xy_center, tilt, width, surface_azimuth,
axis_azimuth, shaded=False, normal_vector=None,
index=None, cut={}, param_names=[]):
"""Create a PV row using mainly the coordinates of the line center,
a tilt angle, and its length.
Parameters
----------
xy_center : tuple
x, y coordinates of center point of desired linestring
tilt : float
surface tilt angle desired [deg]
length : float
desired length of linestring [m]
surface_azimuth : float
Surface azimuth of PV surface [deg]
axis_azimuth : float
Axis azimuth of the PV surface, i.e. direction of axis of rotation
[deg]
shaded : bool, optional
Shading status desired for the PVRow sides (Default = False)
normal_vector : list, optional
Normal vector for the surface (Default = None)
index : int, optional
Index of PV row (Default = None)
cut : dict, optional
Scheme to decide how many segments to create on each side.
Eg {'front': 3, 'back': 2} will lead to 3 segments on front side
and 2 segments on back side. (Default = {})
param_names : list of str, optional
Names of the surface parameters, eg reflectivity, total incident
irradiance, temperature, etc. (Default = [])
Returns
-------
:py:class:`~pvfactors.geometry.pvrow.PVRow` object
"""
coords = _coords_from_center_tilt_length(xy_center, tilt, width,
surface_azimuth, axis_azimuth)
return cls.from_linestring_coords(coords, shaded=shaded,
normal_vector=normal_vector,
index=index, cut=cut,
param_names=param_names)
def plot(self, ax, color_shaded=COLOR_DIC['pvrow_shaded'],
color_illum=COLOR_DIC['pvrow_illum'], with_index=False):
"""Plot the surfaces of the PV Row.
Parameters
----------
ax : :py:class:`matplotlib.pyplot.axes` object
Axes for plotting
color_shaded : str, optional
Color to use for plotting the shaded surfaces (Default =
COLOR_DIC['pvrow_shaded'])
color_shaded : str, optional
Color to use for plotting the illuminated surfaces (Default =
COLOR_DIC['pvrow_illum'])
with_index : bool
Flag to annotate surfaces with their indices (Default = False)
"""
self.front.plot(ax, color_shaded=color_shaded, color_illum=color_illum,
with_index=with_index)
self.back.plot(ax, color_shaded=color_shaded, color_illum=color_illum,
with_index=with_index)
@property
def boundary(self):
"""Boundaries of the PV Row's orginal linestring."""
return self.original_linestring.boundary
@property
def highest_point(self):
"""Highest point of the PV Row."""
b1, b2 = self.boundary
highest_point = b1 if b1.y > b2.y else b2
return highest_point
@property
def lowest_point(self):
"""Lowest point of the PV Row."""
b1, b2 = self.boundary
lowest_point = b1 if b1.y < b2.y else b2
return lowest_point
@property
def all_surfaces(self):
"""List of all the surfaces in the PV row."""
if self._all_surfaces is None:
self._all_surfaces = []
self._all_surfaces += self.front.all_surfaces
self._all_surfaces += self.back.all_surfaces
return self._all_surfaces
@property
def surface_indices(self):
"""List of all surface indices in the PV Row."""
list_indices = []
list_indices += self.front.surface_indices
list_indices += self.back.surface_indices
return list_indices
def update_params(self, new_dict):
"""Update surface parameters for both front and back sides.
Parameters
----------
new_dict : dict
Parameters to add or update for the surface
"""
self.front.update_params(new_dict)
self.back.update_params(new_dict)
| SunPower/pvfactors | pvfactors/geometry/pvrow.py | Python | bsd-3-clause | 31,109 |
#!/usr/bin/env python
from sys import argv
from sys import stdout
stdout.write('argv ')
print(argv)
stdout.write('argv[1:] ')
print(argv[1:])
stdout.write('argv[:] ')
print(argv[:])
stdout.write('len(argv) ')
print(len(argv))
stdout.write('len(argv[1:]) ')
print(len(argv[1:]))
stdout.write('len(argv[:]) ')
print(len(argv[:]))
testvar = argv[1]
stdout.write('type(argv) ')
print(type(argv))
testvar = argv[1]
stdout.write('type(testvar) ')
print(type(testvar))
testvar = argv[1]
stdout.write('len(testvar) ')
print(len(testvar))
for i in range(len(argv)):
stdout.write(argv[i] + ' ')
print # -%
| talapus/Ophidian | Commandline_input/argv_experiment.py | Python | bsd-3-clause | 613 |
import yaml as yaml
from astropy import units
import re
class DefaultParser:
"""Not invented here syndrome"""
__check = {}
__convert = {}
__list_of_leaf_types = []
def __init__(self, default_dict):
self.__register_leaf('list')
self.__register_leaf('int')
self.__register_leaf('float')
self.__register_leaf('quantity')
self.__register_leaf('string')
self.__register_leaf('container-declaration')
self.__mandatory = False
self.__allowed_value = None
self.__allowed_type = None
self.__config_value = None
self.__path = None
self.__default_dict = default_dict
if not 'property_type' in default_dict:
self.__property_type = 'arbitrary'
else:
self.__property_type = default_dict['property_type']
if not self.__property_type in self.__check:
raise ValueError
if 'allowed_value' in default_dict:
self.__allowed_value = self.__convert_av_in_pt(default_dict['allowed_value'], self.__property_type)
if 'allowed_type' in default_dict:
self.__allowed_type = default_dict['allowed_type']
self.__lower, self.__upper = self.__parse_allowed_type(self.__allowed_type)
if 'default' in default_dict:
self.set_default(default_dict['default'])
if 'mandatory' in default_dict:
self.__mandatory = default_dict['mandatory']
self.is_leaf = self.__is_leaf(self.__property_type)
def get_default(self):
return self.__default_value
def set_default(self, value):
if value != None:
if self.is_valid(value):
self.__default_value = value
else:
raise ValueError('Default value violates property constraint.')
else:
self.__default_value = None
def is_mandatory(self):
return self.__mandatory
def has_default(self):
try:
if self.__default_value:
return True
else:
return False
except NameError:
pass
def set_path_in_dic(self, path):
self.__path = path
def get_path_in_dict(self):
return self.__path
def set_config_value(self, value):
self.__config_value = value
def get_value(self):
if self.__config_value is not None and self.is_valid(self.__config_value):
return self.__config_value
else:
if self.has_default():
return self.__default_value
else:
raise ValueError('No default value given.')
def is_container(self):
return self.__is_container(None)
def get_container_dic(self):
if self.__is_container(None):
return self.__container_dic
def update_container_dic(self, container_dic, current_entry_name):
if reduce(lambda a,b: a or b, [container_dic.has_key(i) for i in ['and','or']], True):
if 'or' in container_dic:
if current_entry_name in container_dic['or']:
container_dic['or'] = []
return container_dic
if 'and' in container_dic:
if current_entry_name in container_dic['and']:
current_entry_name['and'].remove(current_entry_name)
return container_dic
def is_valid(self, value):
if not self.__check[self.__property_type](self,value):
return False
if self.__allowed_value:
if not self.__is_allowed_value(self,value, self.__allowed_value):
return False
if self.__allowed_type:
if not self.__check_value(self,value, self.__lower, self.__upper):
return False
return True
def __register_leaf(self,type_name):
print(type_name)
if not type_name in self.__list_of_leaf_types:
self.__list_of_leaf_types.append(type_name)
def __is_leaf(self, type_name):
return type_name in self.__list_of_leaf_types
def __is_container(self, value):
if self.__property_type == 'container-property':
try:
self.__container_dic = self.__default_dict['type']['containers']
return True
except:
return False
else:
return False
__check['container-property'] = __is_container
def __is_container_declaration(self, value):
pass
def __is_type_arbitrary(self, value):
self.is_leaf = False
return True
__check['arbitrary'] = __is_type_arbitrary
def __is_type_list(self, value):
self.__register_leaf('list')
try:
return isinstance(value, list)
except ValueError:
return False
__check['list'] = __is_type_list
def __is_type_int(self, value):
self.__register_leaf('int')
try:
int(value)
if float.is_integer(float(value)):
return True
else:
return False
except ValueError:
return False
__check['int'] = __is_type_int
def __is_type_float(self, value):
self.__register_leaf('float')
try:
float(value)
return True
except ValueError:
return False
__check['float'] = __is_type_float
def __is_type_quantity(self, value):
self.__register_leaf('quantity')
try:
quantity_value, quantity_unit = value.strip().split()
float(quantity_value)
units.Unit(quantity_unit)
return True
except ValueError:
return False
__check['quantity'] = __is_type_quantity
def __is_type_string(self, value):
self.__register_leaf('string')
try:
str(value)
return True
except ValueError:
return False
__check['string'] = __is_type_string
def __to_quantity(self, value):
quantity_value, quantity_unit = value.strip().split()
float(quantity_value)
units.Unit(quantity_unit)
return (quantity_value, quantity_unit)
__convert['quantity'] = __to_quantity
def __to_int(self, value):
return int(value)
__convert['int'] = __to_int
def __to_float(self, value):
return float(value)
__convert['float'] = __to_float
def __to_string(self, value):
return str(value)
__convert['string'] = __to_string
def __to_list(self, value):
if isinstance(value, list):
return value
elif isinstance(value, basestring):
return value.split()
else:
return []
__convert['list'] = __to_list
def __convert_av_in_pt(self,allowed_value, property_type):
"""
Converts the allowed values to the property type.
"""
if not len([]) == 0:
return [self.__convert[property_type](a) for a in property_value]
else:
return []
def __is_allowed_value(self, value, allowed_value):
if value in _allowed_value:
return True
else:
return False
def __parse_allowed_type(self, allowed_type):
string = allowed_type.strip()
upper = None
lower = None
if string.find("<") or string.find(">"):
#like x < a
match = re.compile('[<][\s]*[0-9.+^*eE]*$').findall(string)
if match:
value = re.compile('[0-9.+^*eE]+').findall(string)[0]
upper = float(value)
#like a > x"
match = re.compile('^[\s0-9.+^*eE]*[\s]*[<]$').findall(string)
if match:
value = re.compile('[0-9.+^*eE]+').findall(string)[0]
upper = float(value)
#like x > a
match = re.compile('[>][\s]*[0-9.+^*eE]*$').findall(string)
if match:
value = re.compile('[0-9.+^*eE]+').findall(string)[0]
lower = float(value)
#like a < x
match = re.compile('^[\s0-9.+^*eE]*[\s]*[<]$').findall(string)
if match:
value = re.compile('[0-9.+^*eE]+').findall(string)[0]
lower = float(value)
return lower, upper
def __check_value(self, value, lower_lim, upper_lim):
upper, lower = True, True
if upper_lim != None:
upper = value < upper_lim
if lower_lim != None:
lower = value > lower_lim
return upper and lower
class PropertyBase:
children = {}
def __init__(self, default_dic, config_dic, default_section, is_bottom_of_config_dict=False):
print("")
print("")
self.__default_property = DefaultParser(default_dic)
if self.__default_property.is_leaf:
pass
#Check conf or get help,default
else:
tmp = {}
for child in default_dic.keys():
print('----')
try:
print(self.__default_property.is_leaf)
print(child)
print(default_dic[child])
print(config_dic[child])
except:
pass
if config_dic != None:
print('!-!-!')
print(config_dic)
print('####')
pass
try:
config_dic[child]
#self.children[child] = PropertyBase(default_dic[child], config_dic[child], child)
tmp[child]= PropertyBase(default_dic[child], config_dic[child], child)
except (AttributeError,TypeError, KeyError):
#self.children[child] = PropertyBase(default_dic[child], config_dic, child)
tmp[child]= PropertyBase(default_dic[child], config_dic, child)
self.children[default_section] = tmp
class Container(DefaultParser):
def __init__(self, container_default_dict, container_dict):
#self.__register_leaf('list')
#self.__register_leaf('int')
#self.__register_leaf('float')
#self.__register_leaf('quantity')
#self.__register_leaf('string')
self.__allowed_value = None
self.__allowed_type = None
self.__config_value = None
self.__path = None
self.__property_type = 'container-property'
self.__default_container = {}
self.__config_container = {}
#check if it is a valid default container
if not 'type' in container_default_dict:
raise ValueError('The given default contaienr is no valid')
#set allowed containers
try:
self.__allowed_container = container_default_dict['type']['containers']
except:
raise ValueError('No container names specified')
#check if the specified container in the config is allowed
try:
if not container_dict['type'] in self.__allowed_container:
raise ValueError('Wrong container type')
else:
type_dict = container_dict['type']
except KeyError:
raise ValueError('No container type specified')
#get selected container from conf
try:
self.__selected_container = container_dict['type']
except KeyError:
self.__selected_container = None
raise ValueError('No container type specified in config')
#look for necessary items
entry_name = '_' + self.__selected_container
try:
necessary_items = container_default_dict['type'][entry_name]
except KeyError:
raise ValueError('Container insufficient specified')
def parse_container_items(top_default, top_config, level_name, path):
print('START NEW PARSE')
tmp_conf_ob = {}
path_in_dic = []
tmp_conf_val = {}
if isinstance(top_default,dict):
print(top_default)
default_property = DefaultParser(top_default)
print(default_property.is_leaf)
if not default_property.is_leaf:
print(top_default.items())
for k,v in top_default.items():
print('>>--<<')
print(top_config)
print(k)
print(v)
tmp_conf_ob[k], tmp_conf_val[k] = parse_container_items(v, top_config, k, path + [k])
return tmp_conf_ob, tmp_conf_val
else:
default_property.set_path_in_dic(path)
try:
print('>>>>>>>>>>>')
print(path)
print(top_config)
conf_value = get_property_by_path(top_config, path)
print('conf_value: %s'%conf_value)
except:
conf_value = None
if conf_value is not None:
default_property.set_config_value(conf_value)
return default_property, default_property.get_value()
def get_property_by_path(conf_dict, path):
for key in path:
conf_dict = conf_dict[key]
return conf_dict
for item in necessary_items:
if not item in container_dict.keys():
raise ValueError('Entry %s is missing in container'%str(item))
self.__default_container, self.__config_container = parse_container_items(container_default_dict[item], container_dict[item], item, [])
pass
#go through all items and create an conf object thereby check the conf
self.__container_ob = self.__default_container
self.__conf = self.__config_container
def get_container_ob(self):
return self.__container_ob
def get_container_conf(self):
return self.__conf
class Config:
def __init__(self, default_conf, conf):
self.mandatories = {}
self.fulfilled = {}
self.__create_default_conf(default_conf)
self.__parse_config(default_conf, conf)
def __mandatory_key(self, path):
return ':'.join(path)
def register_mandatory(self, name, path):
self.mandatories[self.__mandatory_key(path)] = name
def deregister_mandatory(self, name, path):
self.fulfilled[self.__mandatory_key(path)] = name
def is_mandatory_fulfilled(self):
if len(set(self.mandatories.keys()) - set(self.fulfilled.keys()))<=0:
return True
else:
return False
def __parse_config(self, default_conf, conf,):
def PF( top_v):
tmp = {}
default_property = DefaultParser(top_v)
print(top_v)
if not default_property.is_leaf:
tmp['branch_properties'] = default_property
print(top_v)
for k,v in top_v.items():
print("key is %s"%str(k))
tmp[k] = PF(v)
return tmp
else:
return default_property
def finditem( obj, key):
if key in obj: return obj[key]
for k, v in obj.items():
if isinstance(v,dict):
item = finditem(v, key)
if item is not None:
return item
def is_path_valid(conf_dict, path):
try:
for key in path:
conf_dict = conf_dict[key]
return True
except KeyError:
return False
def get_property_by_path(conf_dict, path):
for key in path:
conf_dict = conf_dict[key]
return conf_dict
def recursive_parser(top_v, conf, level_name, path):
tmp_conf_ob = {}
path_in_dic = []
tmp_conf_val = {}
if isinstance(top_v,dict):
default_property = DefaultParser(top_v)
if default_property.is_mandatory():
self.register_mandatory(self, path)
self.deregister_mandatory(self, path)
if default_property.is_container():
container_conf = get_property_by_path(conf, path)
ccontainer = Container(top_v, container_conf)
return ccontainer.get_container_ob(), ccontainer.get_container_conf()
elif not default_property.is_leaf:
for k,v in top_v.items():
tmp_conf_ob[k], tmp_conf_val[k] = recursive_parser(v, conf, k, path + [k])
return tmp_conf_ob, tmp_conf_val
else:
default_property.set_path_in_dic(path)
try:
conf_value = get_property_by_path(conf, path)
except:
conf_value = None
if conf_value is not None:
default_property.set_config_value(conf_value)
return default_property, default_property.get_value()
self.__conf_o, self.__conf_v = recursive_parser(default_conf, conf, 'main', [])
def __create_default_conf(self, default_conf):
def recursive_default_parser(top_v,level_name, path):
tmp_default = {}
path_in_dic = []
if isinstance(top_v,dict):
default_property = DefaultParser(top_v)
if not default_property.is_container():
if not default_property.is_leaf:
for k,v in top_v.items():
tmp_default[k] = recursive_default_parser(v, k, path + [k])
return tmp_default
else:
default_property.set_path_in_dic(path)
if default_property.has_default():
return default_property.get_default()
else:
return None
self.__default_config = recursive_default_parser(default_conf, 'main', [])
def get_config(self):
return self.__conf_v
def get_default_config(self):
return self.__default_config
def get_config_object(self):
return self.__conf_o
"""
def get_config(self):
return self.conf_v
def get_default(self):
def __finditem(self, obj, key):
if key in obj: return obj[key]
for k, v in obj.items():
if isinstance(v,dict):
item = _finditem(v, key)
if item is not None:
return item
"""
| mklauser/tardis-OLD | tardis/io/default_config_parser.py | Python | bsd-3-clause | 19,137 |
#===========================================================================
# Copyright (c) 2011-2012, the PyFACT developers
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the PyFACT developers nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE PYFACT DEVELOPERS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===========================================================================
# Imports
import sys, time, logging, os, datetime, math
import numpy as np
import scipy.optimize
import scipy.special
import scipy.ndimage
import pyfits
import pyfact as pf
#===========================================================================
# Functions & classes
#---------------------------------------------------------------------------
class SkyCoord:
"""Sky coordinate in RA and Dec. All units should be degree."""
def __init__(self, ra, dec) :
"""
Sky coordinate in RA and Dec. All units should be degree.
In the current implementation it should also work with arrays, though one has to be careful in dist.
Parameters
----------
ra : float/array
Right ascension of the coordinate.
dec : float/array
Declination of the coordinate.
"""
self.ra, self.dec = ra, dec
def dist(self, c) :
"""
Return the distance of the coordinates in degree following the haversine formula,
see e.g. http://en.wikipedia.org/wiki/Great-circle_distance.
Parameters
----------
c : SkyCoord
Returns
-------
distance : float
Return the distance of the coordinates in degree following the haversine formula.
Notes
-----
http://en.wikipedia.org/wiki/Great-circle_distance
"""
return 2. * np.arcsin(np.sqrt(np.sin((self.dec - c.dec) / 360. * np.pi) ** 2.
+ np.cos(self.dec / 180. * np.pi) * np.cos(c.dec / 180. * np.pi)\
* np.sin((self.ra - c.ra) / 360. * np.pi) ** 2.)) / np.pi * 180.
#---------------------------------------------------------------------------
class SkyCircle:
"""A circle on the sky."""
def __init__(self, c, r) :
"""
A circle on the sky.
Parameters
----------
coord : SkyCoord
Coordinates of the circle center (RA, Dec)
r : float
Radius of the circle (deg).
"""
self.c, self.r = c, r
def contains(self, c) :
"""
Checks if the coordinate lies inside the circle.
Parameters
----------
c : SkyCoord
Returns
-------
contains : bool
True if c lies in the SkyCircle.
"""
return self.c.dist(c) <= self.r
def intersects(self, sc) :
"""
Checks if two sky circles overlap.
Parameters
----------
sc : SkyCircle
"""
return self.c.dist(sc.c) <= self.r + sc.r
#---------------------------------------------------------------------------
def skycircle_from_str(cstr) :
"""Creates SkyCircle from circle region string."""
x, y, r = eval(cstr.upper().replace('CIRCLE', ''))
return SkyCircle(SkyCoord(x, y), r)
#---------------------------------------------------------------------------
def get_cam_acc(camdist, rmax=4., nbins=None, exreg=None, fit=False, fitfunc=None, p0=None) :
"""
Calculates the camera acceptance histogram from a given list with camera distances (event list).
Parameters
----------
camdist : array
Numpy array of camera distances (event list).
rmax : float, optional
Maximum radius for the acceptance histogram.
nbins : int, optional
Number of bins for the acceptance histogram (default = 0.1 deg).
exreg : array, optional
Array of exclusion regions. Exclusion regions are given by an aray of size 2
[r, d] with r = radius, d = distance to camera center
fit : bool, optional
Fit acceptance histogram (default=False).
"""
if not nbins :
nbins = int(rmax / .1)
# Create camera distance histogram
n, bins = np.histogram(camdist, bins=nbins, range=[0., rmax])
nerr = np.sqrt(n)
# Bin center array
r = (bins[1:] + bins[:-1]) / 2.
# Bin area (ring) array
r_a = (bins[1:] ** 2. - bins[:-1] ** 2.) * np.pi
# Deal with exclusion regions
ex_a = None
if exreg :
ex_a = np.zeros(len(r))
t = np.ones(len(r))
for reg in exreg :
ex_a += (pf.circle_circle_intersection_a(bins[1:], t * reg[0], t * reg[1])
- pf.circle_circle_intersection_a(bins[:-1], t * reg[0], t * reg[1]))
ex_a /= r_a
# Fit the data
fitter = None
if fit :
#fitfunc = lambda p, x: p[0] * x ** p[1] * (1. + (x / p[2]) ** p[3]) ** ((p[1] + p[4]) / p[3])
if not fitfunc :
fitfunc = lambda p, x: p[0] * x ** 0. * (1. + (x / p[1]) ** p[2]) ** ((0. + p[3]) / p[2])
#fitfunc = lambda p, x: p[0] * x ** 0. * (1. + (x / p[1]) ** p[2]) ** ((0. + p[3]) / p[2]) + p[4] / (np.exp(p[5] * (x - p[6])) + 1.)
if not p0 :
p0 = [n[0] / r_a[0], 1.5, 3., -5.] # Initial guess for the parameters
#p0 = [.5 * n[0] / r_a[0], 1.5, 3., -5., .5 * n[0] / r_a[0], 100., .5] # Initial guess for the parameters
fitter = pf.ChisquareFitter(fitfunc)
m = (n > 0.) * (nerr > 0.) * (r_a != 0.) * ((1. - ex_a) != 0.)
if np.sum(m) <= len(p0) :
logging.error('Could not fit camera acceptance (dof={0}, bins={1})'.format(len(p0), np.sum(m)))
else :
# ok, this _should_ be improved !!!
x, y, yerr = r[m], n[m] / r_a[m] / (1. - ex_a[m]) , nerr[m] / r_a[m] / (1. - ex_a[m])
m = np.isfinite(x) * np.isfinite(y) * np.isfinite(yerr) * (yerr != 0.)
if np.sum(m) <= len(p0) :
logging.error('Could not fit camera acceptance (dof={0}, bins={1})'.format(len(p0), np.sum(m)))
else :
fitter.fit_data(p0, x[m], y[m], yerr[m])
return (n, bins, nerr, r, r_a, ex_a, fitter)
#---------------------------------------------------------------------------
def get_sky_mask_circle(r, bin_size) :
"""
Returns a 2d numpy histogram with (2. * r / bin_size) bins per axis
where a circle of radius has bins filled 1.s, all other bins are 0.
Parameters
----------
r : float
Radius of the circle.
bin_size : float
Physical size of the bin, same units as rmin, rmax.
Returns
-------
sky_mask : 2d numpy array
Returns a 2d numpy histogram with (2. * r / bin_size) bins per axis
where a circle of radius has bins filled 1.s, all other bins are 0.
"""
nbins = int(np.ceil(2. * r / bin_size))
sky_x = np.ones((nbins, nbins)) * np.linspace(bin_size / 2., 2. * r - bin_size / 2., nbins)
sky_y = np.transpose(sky_x)
sky_mask = np.where(np.sqrt((sky_x - r) ** 2. + (sky_y - r) ** 2.) < r, 1., 0.)
return sky_mask
#---------------------------------------------------------------------------
def get_sky_mask_ring(rmin, rmax, bin_size) :
"""
Returns a 2d numpy histogram with (2. * rmax / bin_size) bins per axis
filled with a ring with inner radius rmin and outer radius rmax of 1.,
all other bins are 0..
Parameters
----------
rmin : float
Inner radius of the ring.
rmax : float
Outer radius of the ring.
bin_size : float
Physical size of the bin, same units as rmin, rmax.
Returns
-------
sky_mask : 2d numpy array
Returns a 2d numpy histogram with (2. * rmax / bin_size) bins per axis
filled with a ring with inner radius rmin and outer radius rmax of 1.,
all other bins are 0..
"""
nbins = int(np.ceil(2. * rmax / bin_size))
sky_x = np.ones((nbins, nbins)) * np.linspace(bin_size / 2., 2. * rmax - bin_size / 2., nbins)
sky_y = np.transpose(sky_x)
sky_mask = np.where((np.sqrt((sky_x - rmax) ** 2. + (sky_y - rmax) ** 2.) < rmax) * (np.sqrt((sky_x - rmax) ** 2. + (sky_y - rmax) ** 2.) > rmin), 1., 0.)
return sky_mask
#---------------------------------------------------------------------------
def get_exclusion_region_map(map, rarange, decrange, exreg) :
"""
Creates a map (2d numpy histogram) with all bins inside of exclusion regions set to 0. (others 1.).
Dec is on the 1st axis (x), RA is on the 2nd (y).
Parameters
----------
map : 2d array
rarange : array
decrange : array
exreg : array-type of SkyCircle
"""
xnbins, ynbins = map.shape
xstep, ystep = (decrange[1] - decrange[0]) / float(xnbins), (rarange[1] - rarange[0]) / float(ynbins)
sky_mask = np.ones((xnbins, ynbins))
for x, xval in enumerate(np.linspace(decrange[0] + xstep / 2., decrange[1] - xstep / 2., xnbins)) :
for y, yval in enumerate(np.linspace(rarange[0] + ystep / 2., rarange[1] - ystep / 2., ynbins)) :
for reg in exreg :
if reg.contains(SkyCoord(yval, xval)) :
sky_mask[x, y] = 0.
return sky_mask
#---------------------------------------------------------------------------
def oversample_sky_map(sky, mask, exmap=None) :
"""
Oversamples a 2d numpy histogram with a given mask.
Parameters
----------
sky : 2d array
mask : 2d array
exmap : 2d array
"""
sky = np.copy(sky)
sky_nx, sky_ny = sky.shape[0], sky.shape[1]
mask_nx, mask_ny = mask.shape[0], mask.shape[1]
mask_centerx, mask_centery = (mask_nx - 1) / 2, (mask_ny - 1) / 2
# new oversampled sky plot
sky_overs = np.zeros((sky_nx, sky_ny))
# 2d hist keeping the number of bins used (alpha)
sky_alpha = np.ones((sky_nx, sky_ny))
sky_base = np.ones((sky_nx, sky_ny))
if exmap != None :
sky *= exmap
sky_base *= exmap
scipy.ndimage.convolve(sky, mask, sky_overs, mode='constant')
scipy.ndimage.convolve(sky_base, mask, sky_alpha, mode='constant')
return (sky_overs, sky_alpha)
#===========================================================================
| mraue/pyfact | pyfact/map.py | Python | bsd-3-clause | 11,616 |
import unittest
from llvm.core import (Module, Type, Builder)
from .support import TestCase, tests
class TestExact(TestCase):
def make_module(self):
mod = Module.new('asdfa')
fnty = Type.function(Type.void(), [Type.int()] * 2)
func = mod.add_function(fnty, 'foo')
bldr = Builder.new(func.append_basic_block(''))
return mod, func, bldr
def has_exact(self, inst, op):
self.assertTrue(('%s exact' % op) in str(inst), "exact flag does not work")
def _test_template(self, opf, opname):
mod, func, bldr = self.make_module()
a, b = func.args
self.has_exact(opf(bldr, a, b, exact=True), opname)
def test_udiv_exact(self):
self._test_template(Builder.udiv, 'udiv')
def test_sdiv_exact(self):
self._test_template(Builder.sdiv, 'sdiv')
def test_lshr_exact(self):
self._test_template(Builder.lshr, 'lshr')
def test_ashr_exact(self):
self._test_template(Builder.ashr, 'ashr')
tests.append(TestExact)
if __name__ == '__main__':
unittest.main()
| llvmpy/llvmpy | llvm/tests/test_exact.py | Python | bsd-3-clause | 1,076 |
import numpy as np
import amnet
import z3
from numpy.linalg import norm
import sys
import unittest
import itertools
VISUALIZE = True # output graphviz drawings
if VISUALIZE:
import amnet.vis
class TestSmt(unittest.TestCase):
@classmethod
def setUpClass(cls):
print 'Setting up test floats.'
cls.floatvals = np.concatenate(
(np.linspace(-5., 5., 11), np.linspace(-5., 5., 10)),
axis=0
)
cls.floatvals2 = np.concatenate(
(np.linspace(-5., 5., 3), np.linspace(-.5, .5, 2)),
axis=0
)
cls.floatvals3 = np.linspace(-5., 5., 3)
cls.FPTOL = 1e-8
# set up global z3 parameters
# parameters from https://stackoverflow.com/a/12516269
#z3.set_param('auto_config', False)
#z3.set_param('smt.case_split', 5)
#z3.set_param('smt.relevancy', 2)
def validate_outputs(self, phi, onvals, true_f=None, verbose=False):
# encode phi using default context and solver
enc = amnet.smt.SmtEncoder(phi=phi, solver=None)
# tap the input and output vars
invar = enc.var_of_input()
outvar = enc.var_of(phi)
# check dimensions
self.assertEqual(phi.indim, len(invar))
self.assertEqual(phi.outdim, len(outvar))
# go through inputs
for val in onvals:
# get a new value
fpval = np.array(val)
self.assertEqual(len(fpval), phi.indim)
# evaluate using the Amn tree
fpeval = phi.eval(fpval)
self.assertEqual(len(fpeval), phi.outdim)
if verbose:
print 'inp:', fpval
print 'fpeval: ', fpeval
# compare to true floating point function, if it's provided
if true_f is not None:
true_eval = true_f(fpval)
if verbose: print 'true_eval: ', true_eval
self.assertAlmostEqual(norm(true_eval - fpeval), 0)
# set the z3 input
enc.solver.push()
for i in range(len(invar)):
enc.solver.add(invar[i] == fpval[i])
# run z3 to check for satisfiability
result = enc.solver.check()
#if verbose: print enc.solver
self.assertTrue(result == z3.sat)
# extract the output
model = enc.solver.model()
smteval = np.zeros(len(outvar))
for i in range(len(outvar)):
smteval[i] = amnet.util.mfp(model, outvar[i])
# check that the outputs match
if verbose: print 'smteval: ', smteval
self.assertAlmostEqual(norm(smteval - fpeval), 0)
enc.solver.pop()
def donot_test_SmtEncoder_mu_big(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
y = amnet.atoms.select(xyz, 1)
z = amnet.atoms.select(xyz, 2)
w = amnet.Mu(x, y, z)
def true_mu(fpin):
x, y, z = fpin
return x if z <= 0 else y
self.validate_outputs(
phi=w,
onvals=itertools.product(self.floatvals, repeat=w.indim),
true_f=true_mu
)
def test_SmtEncoder_mu_small(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
y = amnet.atoms.select(xyz, 1)
z = amnet.atoms.select(xyz, 2)
w = amnet.Mu(x, y, z)
def true_mu(fpin):
x, y, z = fpin
return x if z <= 0 else y
self.validate_outputs(
phi=w,
onvals=itertools.product(self.floatvals2, repeat=w.indim),
true_f=true_mu
)
if VISUALIZE: amnet.vis.quick_vis(phi=w, title='mu')
def test_SmtEncoder_max_all_2(self):
xy = amnet.Variable(2, name='xy')
phi_max2 = amnet.atoms.max_all(xy)
self.assertEqual(phi_max2.indim, 2)
def true_max2(fpin):
x, y = fpin
return max(x, y)
self.validate_outputs(
phi=phi_max2,
onvals=itertools.product(self.floatvals, repeat=phi_max2.indim),
true_f=true_max2
)
def test_SmtEncoder_min_all_2(self):
xy = amnet.Variable(2, name='xy')
phi_min2 = amnet.atoms.min_all(xy)
self.assertEqual(phi_min2.indim, 2)
def true_min2(fpin):
x, y = fpin
return min(x, y)
self.validate_outputs(
phi=phi_min2,
onvals=itertools.product(self.floatvals, repeat=phi_min2.indim),
true_f=true_min2
)
def test_SmtEncoder_max_all_3_small(self):
xyz = amnet.Variable(3, name='xy')
phi_max3 = amnet.atoms.max_all(xyz)
self.assertEqual(phi_max3.indim, 3)
def true_max3(fpin):
x, y, z = fpin
return max(x, y, z)
self.validate_outputs(
phi=phi_max3,
onvals=itertools.product(self.floatvals2, repeat=phi_max3.indim),
true_f=true_max3
)
def test_SmtEncoder_min_all_3_small(self):
xyz = amnet.Variable(3, name='xy')
phi_min3 = amnet.atoms.min_all(xyz)
self.assertEqual(phi_min3.indim, 3)
def true_min3(fpin):
x, y, z = fpin
return min(x, y, z)
self.validate_outputs(
phi=phi_min3,
onvals=itertools.product(self.floatvals2, repeat=phi_min3.indim),
true_f=true_min3
)
def test_SmtEncoder_add_all(self):
xyz = amnet.Variable(3, name='xyz')
phi_add = amnet.atoms.add_all(xyz)
self.assertEqual(phi_add.outdim, 1)
self.assertEqual(phi_add.indim, 3)
def true_add(fpin):
return sum(fpin)
self.validate_outputs(
phi=phi_add,
onvals=itertools.product(self.floatvals2, repeat=phi_add.indim),
true_f=true_add
)
def test_SmtEncoder_add_list(self):
xyz = amnet.Variable(2+2+2, name='xyz')
x = amnet.Linear(np.eye(2, 6, 0), xyz)
y = amnet.Linear(np.eye(2, 6, 2), xyz)
z = amnet.Linear(np.eye(2, 6, 4), xyz)
phi_add_list = amnet.atoms.add_list([x, y, z])
self.assertEqual(x.outdim, 2)
self.assertEqual(y.outdim, 2)
self.assertEqual(z.outdim, 2)
self.assertEqual(phi_add_list.outdim, 2)
self.assertEqual(phi_add_list.indim, 6)
def true_add(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4:6]
return x + y + z
self.validate_outputs(
phi=phi_add_list,
onvals=itertools.product(self.floatvals3, repeat=phi_add_list.indim),
true_f=true_add
)
def test_SmtEncoder_triplexer(self):
np.random.seed(1)
TOTAL_RUNS=5
#print ""
for iter in range(TOTAL_RUNS):
#print "Testing random triplexer [%d/%d]..." % (iter+1, TOTAL_RUNS),
# create a random triplexer
x = amnet.Variable(1, name='x')
a = 3 * (2 * np.random.rand(4) - 1)
b = 3 * (2 * np.random.rand(4) - 1)
c = 3 * (2 * np.random.rand(4) - 1)
d = 3 * (2 * np.random.rand(4) - 1)
e = 3 * (2 * np.random.rand(4) - 1)
f = 3 * (2 * np.random.rand(4) - 1)
phi_tri = amnet.atoms.triplexer(x, a, b, c, d, e, f)
def true_tri(fpin):
return amnet.atoms.fp_triplexer(fpin, a, b, c, d, e, f)
xvals = 50 * (2 * np.random.rand(100) - 1)
onvals = itertools.product(xvals, repeat=1)
self.validate_outputs(
phi=phi_tri,
onvals=onvals,
true_f=true_tri
)
#print "done!"
def test_SmtEncoder_max_aff(self):
np.random.seed(1)
m = 10
n = 4
A = np.random.randint(-5, 6, m*n).reshape((m, n))
b = np.random.randint(-5, 6, m).reshape((m,))
b[np.random.randint(0, n)] = 0 # make sure there is a Linear term
x = amnet.Variable(n, name='x')
y = amnet.atoms.max_aff(A, x, b)
self.assertEqual(y.indim, n)
self.assertEqual(y.outdim, 1)
def true_max_aff(fpin):
vals = np.dot(A, fpin) + b
assert len(vals) == m
return np.max(vals)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals3, repeat=y.indim),
true_f=true_max_aff
)
# visualize max_aff
if VISUALIZE: amnet.vis.quick_vis(y, title='max_aff')
def test_SmtEncoder_min_aff(self):
np.random.seed(1)
m = 10
n = 4
A = np.random.randint(-5, 6, m*n).reshape((m, n))
b = np.random.randint(-5, 6, m).reshape((m,))
b[np.random.randint(0, n)] = 0 # make sure there is a Linear term
x = amnet.Variable(n, name='x')
y = amnet.atoms.min_aff(A, x, b)
self.assertEqual(y.indim, n)
self.assertEqual(y.outdim, 1)
def true_min_aff(fpin):
vals = np.dot(A, fpin) + b
assert len(vals) == m
return np.min(vals)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals3, repeat=y.indim),
true_f=true_min_aff
)
# visualize min_aff
if VISUALIZE: amnet.vis.quick_vis(y, title='min_aff')
def test_SmtEncoder_dag(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
yz = amnet.Linear(
np.array([[0, 1, 0], [0, 0, 1]]),
xyz
)
maxyz = amnet.atoms.max_all(yz)
twoxp1 = amnet.Affine(
np.array([[2]]),
x,
np.array([1])
)
twox = amnet.atoms.add2(x, x)
threex = amnet.atoms.add2(x, twox)
fivexp1 = amnet.atoms.add2(twoxp1, threex)
phi = amnet.atoms.add2(fivexp1, maxyz)
def true_dag(fpin):
x, y, z = fpin
return 5*x + 1 + max(y, z)
self.validate_outputs(
phi=phi,
onvals=itertools.product(self.floatvals2, repeat=3),
true_f=true_dag
)
# visualize dag
if VISUALIZE: amnet.vis.quick_vis(phi, title='dag')
def test_SmtEncoder_relu_1(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.relu(x)
def true_relu(fpin):
return max(fpin[0], 0)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f=true_relu
)
def test_SmtEncoder_relu_2(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.relu(x)
def true_relu(fpin):
return np.maximum(fpin, 0)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_relu
)
# visualize relu
if VISUALIZE: amnet.vis.quick_vis(y, title='relu_2')
def test_SmtEncoder_relu_old(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.relu_old(x)
def true_relu(fpin):
return np.maximum(fpin, 0)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_relu
)
# visualize relu_old
if VISUALIZE: amnet.vis.quick_vis(y, title='relu_old')
def test_SmtEncoder_gates(self):
xy_z1z2 = amnet.Variable(2+2+1+1, name='xyz1z2')
x = amnet.Linear(
np.eye(2, 6, 0),
xy_z1z2
)
y = amnet.Linear(
np.eye(2, 6, 2),
xy_z1z2
)
z1 = amnet.atoms.select(xy_z1z2, 4)
z2 = amnet.atoms.select(xy_z1z2, 5)
phi_and = amnet.atoms.gate_and(x, y, z1, z2)
phi_or = amnet.atoms.gate_or(x, y, z1, z2)
phi_xor = amnet.atoms.gate_xor(x, y, z1, z2)
phi_not = amnet.atoms.gate_not(x, y, z1)
# check dimensions
self.assertEqual(xy_z1z2.outdim, 6)
self.assertEqual(x.outdim, 2)
self.assertEqual(y.outdim, 2)
self.assertEqual(z1.outdim, 1)
self.assertEqual(z2.outdim, 1)
self.assertEqual(phi_and.outdim, 2)
self.assertEqual(phi_or.outdim, 2)
self.assertEqual(phi_xor.outdim, 2)
self.assertEqual(phi_not.outdim, 2)
# true gate functions
def true_and(fpin):
return fpin[0:2] if (fpin[4] <= 0 and fpin[5] <= 0) else fpin[2:4]
def true_or(fpin):
return fpin[0:2] if (fpin[4] <= 0 or fpin[5] <= 0) else fpin[2:4]
def true_xor(fpin):
return fpin[0:2] if ((fpin[4] <= 0) != (fpin[5] <= 0)) else fpin[2:4]
def true_not(fpin): # ignores last input
return fpin[2:4] if (fpin[4] <= 0) else fpin[0:2]
# evaluate
vals = np.array([1, -2, -3, 4])
sels = itertools.product([-1, 0, 1], repeat=2)
onvals = [np.concatenate((vals, sel), axis=0) for sel in sels]
self.validate_outputs(phi=phi_and, onvals=onvals, true_f=true_and)
self.validate_outputs(phi=phi_or, onvals=onvals, true_f=true_or)
self.validate_outputs(phi=phi_xor, onvals=onvals, true_f=true_xor)
self.validate_outputs(phi=phi_not, onvals=onvals, true_f=true_not)
def test_SmtEncoder_cmp(self):
xyz = amnet.Variable(2+2+1, name='xyz')
x = amnet.Linear(
np.eye(2, 5, 0),
xyz
)
y = amnet.Linear(
np.eye(2, 5, 2),
xyz
)
z = amnet.atoms.select(xyz, 4)
phi_eq = amnet.atoms.cmp_eq(x, y, z)
phi_neq = amnet.atoms.cmp_neq(x, y, z)
phi_ge = amnet.atoms.cmp_ge(x, y, z)
phi_gt = amnet.atoms.cmp_gt(x, y, z)
phi_le = amnet.atoms.cmp_le(x, y, z)
phi_lt = amnet.atoms.cmp_lt(x, y, z)
# check dimensions
self.assertEqual(xyz.outdim, 5)
self.assertEqual(x.outdim, 2)
self.assertEqual(y.outdim, 2)
self.assertEqual(z.outdim, 1)
self.assertEqual(phi_eq.outdim, 2)
self.assertEqual(phi_neq.outdim, 2)
self.assertEqual(phi_ge.outdim, 2)
self.assertEqual(phi_gt.outdim, 2)
self.assertEqual(phi_le.outdim, 2)
self.assertEqual(phi_lt.outdim, 2)
# true cmp functions
def true_eq(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z == 0 else y
def true_neq(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z != 0 else y
def true_ge(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z >= 0 else y
def true_gt(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z > 0 else y
def true_le(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z <= 0 else y
def true_lt(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z < 0 else y
# evaluate
vals = np.array([1, -2, -3, 4])
sels = itertools.product([-1.1, -0.5, 0, 0.0, 0.01, 1, 12.0], repeat=1)
onvals = [np.concatenate((vals, sel), axis=0) for sel in sels]
self.validate_outputs(phi=phi_eq, onvals=onvals, true_f=true_eq)
self.validate_outputs(phi=phi_neq, onvals=onvals, true_f=true_neq)
self.validate_outputs(phi=phi_ge, onvals=onvals, true_f=true_ge)
self.validate_outputs(phi=phi_gt, onvals=onvals, true_f=true_gt)
self.validate_outputs(phi=phi_le, onvals=onvals, true_f=true_le)
self.validate_outputs(phi=phi_lt, onvals=onvals, true_f=true_lt)
def test_SmtEncoder_identity(self):
x = amnet.Variable(2, name='x')
w = np.array([[1, 2], [3, 4]])
b = np.array([-1, -1])
y = amnet.Affine(w, x, b)
z = amnet.atoms.identity(y)
self.assertEqual(y.outdim, 2)
self.assertEqual(z.outdim, 2)
self.assertEqual(z.indim, 2)
def true_z(fpin):
return np.dot(w, fpin) + b
self.validate_outputs(
phi=z,
onvals=itertools.product(self.floatvals, repeat=z.indim),
true_f=true_z
)
def test_SmtEncoder_absval1(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.absval(x)
self.assertEqual(y.outdim, 1)
self.assertEqual(y.indim, 1)
def true_absval(fpin):
return abs(fpin)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f = true_absval
)
# visualize absval1
if VISUALIZE: amnet.vis.quick_vis(y, title='absval1')
def test_SmtEncoder_absval3(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.absval(x)
self.assertEqual(y.outdim, 3)
self.assertEqual(y.indim, 3)
def true_absval(fpin):
x1, x2, x3 = fpin
return np.array([abs(x1), abs(x2), abs(x3)])
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_absval
)
# visualize absval3
if VISUALIZE: amnet.vis.quick_vis(y, title='absval3')
def test_SmtEncoder_sat1(self):
x = amnet.Variable(1, name='x')
y1 = amnet.atoms.sat(x)
y2 = amnet.atoms.sat(x, lo=-3, hi=3)
y3 = amnet.atoms.sat(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 1)
self.assertEqual(y1.indim, 1)
self.assertEqual(y2.outdim, 1)
self.assertEqual(y2.indim, 1)
self.assertEqual(y3.outdim, 1)
self.assertEqual(y3.indim, 1)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2])) - np.array([-1])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([-0.5])) - np.array([-0.5])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0])) - np.array([0.0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0.6])) - np.array([0.6])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([1.6])) - np.array([1.0])), 0)
# automatic tests
def true_sat1(fpval, lo, hi):
x = fpval
if lo <= x <= hi:
return x
elif x < lo:
return lo
else:
return hi
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals, repeat=y1.indim),
true_f=lambda z: true_sat1(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals, repeat=y2.indim),
true_f=lambda z: true_sat1(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals, repeat=y3.indim),
true_f=lambda z: true_sat1(z, -2, 1.5)
)
# visualize sat1
if VISUALIZE: amnet.vis.quick_vis(y1, title='sat1')
def test_SmtEncoder_sat3(self):
x = amnet.Variable(3, name='x')
y1 = amnet.atoms.sat(x)
y2 = amnet.atoms.sat(x, lo=-3, hi=3)
y3 = amnet.atoms.sat(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 3)
self.assertEqual(y1.indim, 3)
self.assertEqual(y2.outdim, 3)
self.assertEqual(y2.indim, 3)
self.assertEqual(y3.outdim, 3)
self.assertEqual(y3.indim, 3)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2, 1.6, 0.5])) - np.array([-1, 1, 0.5])), 0)
self.assertAlmostEqual(norm(y2.eval(np.array([-2, 1.6, 0.5])) - np.array([-2, 1.6, 0.5])), 0)
self.assertAlmostEqual(norm(y3.eval(np.array([-2, 1.6, 0.5])) - np.array([-2, 1.5, 0.5])), 0)
# visualize sat3
if VISUALIZE: amnet.vis.quick_vis(y1, title='sat3')
# automatic tests
def true_sat3(fpin, lo, hi):
return np.clip(fpin, lo, hi)
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals2, repeat=y1.indim),
true_f=lambda z: true_sat3(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals2, repeat=y2.indim),
true_f=lambda z: true_sat3(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals2, repeat=y3.indim),
true_f=lambda z: true_sat3(z, -2, 1.5)
)
def test_SmtEncoder_dz1(self):
x = amnet.Variable(1, name='x')
y1 = amnet.atoms.dz(x)
y2 = amnet.atoms.dz(x, lo=-3, hi=3)
y3 = amnet.atoms.dz(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 1)
self.assertEqual(y1.indim, 1)
self.assertEqual(y2.outdim, 1)
self.assertEqual(y2.indim, 1)
self.assertEqual(y3.outdim, 1)
self.assertEqual(y3.indim, 1)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2])) - np.array([-1])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([-0.5])) - np.array([0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0])) - np.array([0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0.6])) - np.array([0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([1.6])) - np.array([0.6])), 0)
# automatic tests
def true_dz1(fpval, lo, hi):
x = fpval
if lo <= x <= hi:
return 0
elif x < lo:
return x-lo
else:
return x-hi
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals, repeat=y1.indim),
true_f=lambda z: true_dz1(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals, repeat=y2.indim),
true_f=lambda z: true_dz1(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals, repeat=y3.indim),
true_f=lambda z: true_dz1(z, -2, 1.5)
)
# visualize dz1
if VISUALIZE: amnet.vis.quick_vis(y1, title='dz1')
def test_SmtEncoder_dz3(self):
x = amnet.Variable(3, name='x')
y1 = amnet.atoms.dz(x)
y2 = amnet.atoms.dz(x, lo=-3, hi=3)
y3 = amnet.atoms.dz(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 3)
self.assertEqual(y1.indim, 3)
self.assertEqual(y2.outdim, 3)
self.assertEqual(y2.indim, 3)
self.assertEqual(y3.outdim, 3)
self.assertEqual(y3.indim, 3)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2, 1.6, 0.5])) - np.array([-1, 0.6, 0])), 0)
self.assertAlmostEqual(norm(y2.eval(np.array([-2, 1.6, 0.5])) - np.array([0, 0, 0])), 0)
self.assertAlmostEqual(norm(y3.eval(np.array([-2, 1.6, 0.5])) - np.array([0, 0.1, 0])), 0)
# visualize dz3
if VISUALIZE: amnet.vis.quick_vis(y1, title='dz3')
# automatic tests
def true_dz3(fpin, lo, hi):
retv = np.array(fpin)
retv[(retv >= lo) & (retv <= hi)] = 0
retv[retv > hi] -= hi
retv[retv < lo] -= lo
return retv
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals2, repeat=y1.indim),
true_f=lambda z: true_dz3(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals2, repeat=y2.indim),
true_f=lambda z: true_dz3(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals2, repeat=y3.indim),
true_f=lambda z: true_dz3(z, -2, 1.5)
)
def test_SmtEncoder_norminf1(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.norminf(x)
self.assertEqual(y.indim, 1)
self.assertEqual(y.outdim, 1)
# visualize norminf1
if VISUALIZE: amnet.vis.quick_vis(y, title='norminf1')
# automatic tests
def true_norminf(fpin):
self.assertEqual(len(fpin), 1)
return norm(fpin, ord=np.inf)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f=true_norminf
)
def test_SmtEncoder_norminf3(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.norminf(x)
self.assertEqual(y.indim, 3)
self.assertEqual(y.outdim, 1)
# visualize norminf3
if VISUALIZE: amnet.vis.quick_vis(y, title='norminf3')
# automatic tests
def true_norminf(fpin):
self.assertEqual(len(fpin), 3)
return norm(fpin, ord=np.inf)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_norminf
)
def test_SmtEncoder_norm11(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.norm1(x)
self.assertEqual(y.indim, 1)
self.assertEqual(y.outdim, 1)
# visualize norm11
if VISUALIZE: amnet.vis.quick_vis(y, title='norm11')
# automatic tests
def true_norm1(fpin):
self.assertEqual(len(fpin), 1)
return norm(fpin, ord=1)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f=true_norm1
)
def test_SmtEncoder_norm13(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.norm1(x)
self.assertEqual(y.indim, 3)
self.assertEqual(y.outdim, 1)
# visualize norm13
if VISUALIZE: amnet.vis.quick_vis(y, title='norm13')
# automatic tests
def true_norm1(fpin):
self.assertEqual(len(fpin), 3)
return norm(fpin, ord=1)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_norm1
)
def test_SmtEncoder_phase_vgc(self):
alpha1 = 1.5
alpha2 = -0.7
x = amnet.Variable(2, name='x')
e = amnet.atoms.select(x, 0)
edot = amnet.atoms.select(x, 1)
phi_vgc1 = amnet.atoms.phase_vgc(e, edot, alpha=alpha1)
phi_vgc2 = amnet.atoms.phase_vgc(e, edot, alpha=alpha2)
self.assertEqual(phi_vgc1.indim, 2)
self.assertEqual(phi_vgc1.outdim, 1)
self.assertEqual(phi_vgc2.indim, 2)
self.assertEqual(phi_vgc2.outdim, 1)
# visualize vgc
if VISUALIZE:
ctx = amnet.smt.NamingContext(phi_vgc1)
ctx.rename(e, 'e')
ctx.rename(edot, 'edot')
ctx.rename(phi_vgc1, 'phi_vgc1')
amnet.vis.quick_vis(phi_vgc1, title='phase_vgc', ctx=ctx)
# manual tests
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([1.1, 1.2])) - np.array([alpha1 * 1.1])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([1.1, -1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([-1.1, -1.2])) - np.array([alpha1 * (-1.1)])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([-1.1, 1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([1.1, 0])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([0, 1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([-1.1, 0])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([0, -1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([0, 0])) - np.array([0])), 0)
# automatic tests
def true_phase_vgc(fpin, alpha):
x1, x2 = fpin
return alpha*x1 if x1*x2 > 0 else 0
self.validate_outputs(
phi=phi_vgc1,
onvals=itertools.product(self.floatvals2, repeat=phi_vgc1.indim),
true_f=lambda xi: true_phase_vgc(xi, alpha=alpha1)
)
self.validate_outputs(
phi=phi_vgc2,
onvals=itertools.product(self.floatvals2, repeat=phi_vgc2.indim),
true_f=lambda xi: true_phase_vgc(xi, alpha=alpha2)
)
def test_NamingContext_multiple_contexts_for(self):
x = amnet.Variable(2, name='x')
y = amnet.Variable(3, name='y')
phi_x = amnet.atoms.max_all(x)
phi_y = amnet.atoms.max_all(y)
# multiple context names
ctx_list = amnet.smt.NamingContext.multiple_contexts_for([phi_x, phi_y])
self.assertEqual(len(ctx_list), 2)
# make sure all names are unique
names = []
for ctx in ctx_list:
names.extend(ctx.symbols.keys())
self.assertEqual(len(names), len(set(names)))
if VISUALIZE:
amnet.vis.quick_vis(phi_x, title='multiple_contexts_phi_x', ctx=ctx_list[0])
amnet.vis.quick_vis(phi_y, title='multiple_contexts_phi_y', ctx=ctx_list[1])
def test_SmtEncoder_multiple_encode(self):
x = amnet.Variable(2, name='x')
y = amnet.Variable(3, name='y')
z = amnet.Variable(2, name='z')
phi_x = amnet.atoms.max_all(x)
phi_y = amnet.atoms.max_all(y)
phi_z = amnet.atoms.max_all(z)
# encode the AMNs
enc_x, enc_y, enc_z = amnet.smt.SmtEncoder.multiple_encode(phi_x, phi_y, phi_z)
solver = enc_x.solver
if VISUALIZE:
amnet.vis.quick_vis(phi_x, title='multiple_encode_phi_x', ctx=enc_x.ctx)
amnet.vis.quick_vis(phi_y, title='multiple_encode_phi_y', ctx=enc_y.ctx)
amnet.vis.quick_vis(phi_z, title='multiple_encode_phi_z', ctx=enc_z.ctx)
# make sure solver object is the same
self.assertTrue(enc_x.solver is solver)
self.assertTrue(enc_y.solver is solver)
self.assertTrue(enc_z.solver is solver)
# link the outputs of x and y to the inputs of z
phi_x_out = enc_x.var_of(phi_x)
phi_y_out = enc_y.var_of(phi_y)
z_in = enc_z.var_of_input()
self.assertEqual(len(phi_x_out), 1)
self.assertEqual(len(phi_y_out), 1)
self.assertEqual(len(z_in), 2)
# solver.add(z_in[0] == phi_x_out[0])
# solver.add(z_in[1] == phi_y_out[0])
amnet.util.eqv_z3(solver, z_in, [phi_x_out[0], phi_y_out[0]])
#print "Linked solver:", solver
# input variables to the linked network
x_in = enc_x.var_of_input()
y_in = enc_y.var_of_input()
phi_z_out = enc_z.var_of(phi_z)
self.assertEqual(len(x_in), 2)
self.assertEqual(len(y_in), 3)
self.assertEqual(len(phi_z_out), 1)
# do some test cases
def do_testcase(xf, yf, fpeval):
solver.push()
#print "Pre-input solver:", solver
amnet.util.eqv_z3(solver, x_in, xf)
amnet.util.eqv_z3(solver, y_in, yf)
#print "Post-input solver:", solver
# check for sat
result = solver.check()
self.assertTrue(result == z3.sat)
self.assertFalse(result == z3.unsat)
# extract the output
model = solver.model()
smteval = amnet.util.mfpv(model, phi_z_out)
#print smteval
# check that the outputs match
self.assertAlmostEqual(norm(smteval - fpeval), 0)
solver.pop()
do_testcase(
xf=np.array([1, 0]),
yf=np.array([-1, -4, 0]),
fpeval=np.array([1])
)
do_testcase(
xf=np.array([1, 4.1]),
yf=np.array([-1, 4.1, 0]),
fpeval=np.array([4.1])
)
do_testcase(
xf = np.array([-1, 0]),
yf = np.array([3, -4, 5]),
fpeval = np.array([5])
)
do_testcase(
xf=np.array([-1, 0]),
yf=np.array([3, 20, 5]),
fpeval=np.array([20])
)
do_testcase(
xf=np.array([-1, -17.1]),
yf=np.array([0, -4, -5]),
fpeval=np.array([0])
)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSmt)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| ipapusha/amnet | tests/test_smt.py | Python | bsd-3-clause | 32,730 |
from .functor import Functor
class _PASS(Functor):
def __call__(self, *_, **__):
pass
__enter__ = __exit__ = __call__
def __repr__(self):
return '<PASS>'
PASS = _PASS()
| Infinidat/infi.pyutils | infi/pyutils/functors/pass_.py | Python | bsd-3-clause | 198 |
from __future__ import print_function, division, absolute_import
from numbers import Integral
from operator import add
import os
import shutil
import sys
import traceback
import logging
import re
import pytest
from toolz import pluck
from tornado import gen
from tornado.ioloop import TimeoutError
from distributed.batched import BatchedStream
from distributed.core import rpc, dumps, loads, connect, read, write
from distributed.client import _wait
from distributed.scheduler import Scheduler
from distributed.sizeof import sizeof
from distributed.worker import Worker, error_message, logger
from distributed.utils import ignoring
from distributed.utils_test import (loop, inc, gen_cluster,
slow, slowinc, throws, current_loop, gen_test)
def test_worker_ncores():
from distributed.worker import _ncores
w = Worker('127.0.0.1', 8019)
try:
assert w.executor._max_workers == _ncores
finally:
shutil.rmtree(w.local_dir)
def test_identity():
w = Worker('127.0.0.1', 8019)
ident = w.identity(None)
assert ident['type'] == 'Worker'
assert ident['scheduler'] == ('127.0.0.1', 8019)
assert isinstance(ident['ncores'], int)
assert isinstance(ident['memory_limit'], int)
def test_health():
w = Worker('127.0.0.1', 8019)
d = w.host_health()
assert isinstance(d, dict)
d = w.host_health()
try:
import psutil
except ImportError:
pass
else:
assert 'disk-read' in d
assert 'disk-write' in d
assert 'network-recv' in d
assert 'network-send' in d
@gen_cluster()
def test_worker_bad_args(c, a, b):
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
class NoReprObj(object):
""" This object cannot be properly represented as a string. """
def __str__(self):
raise ValueError("I have no str representation.")
def __repr__(self):
raise ValueError("I have no repr representation.")
response = yield aa.compute(key='x',
function=dumps(NoReprObj),
args=dumps(()),
who_has={})
assert not a.active
assert response['status'] == 'OK'
assert a.data['x']
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
def bad_func(*args, **kwargs):
1 / 0
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
hdlr = MockLoggingHandler()
old_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(hdlr)
response = yield bb.compute(key='y',
function=dumps(bad_func),
args=dumps(['x']),
kwargs=dumps({'k': 'x'}),
who_has={'x': [a.address]})
assert not b.active
assert response['status'] == 'error'
# Make sure job died because of bad func and not because of bad
# argument.
assert isinstance(loads(response['exception']), ZeroDivisionError)
if sys.version_info[0] >= 3:
assert any('1 / 0' in line
for line in pluck(3, traceback.extract_tb(
loads(response['traceback'])))
if line)
assert hdlr.messages['warning'][0] == " Compute Failed\n" \
"Function: bad_func\n" \
"args: (< could not convert arg to str >)\n" \
"kwargs: {'k': < could not convert arg to str >}\n"
assert re.match(r"^Send compute response to scheduler: y, " \
"\{.*'args': \(< could not convert arg to str >\), .*" \
"'kwargs': \{'k': < could not convert arg to str >\}.*\}",
hdlr.messages['debug'][0]) or \
re.match("^Send compute response to scheduler: y, " \
"\{.*'kwargs': \{'k': < could not convert arg to str >\}, .*" \
"'args': \(< could not convert arg to str >\).*\}",
hdlr.messages['debug'][0])
logger.setLevel(old_level)
# Now we check that both workers are still alive.
assert not a.active
response = yield aa.compute(key='z',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not a.active
assert response['status'] == 'OK'
assert a.data['z'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
assert not b.active
response = yield bb.compute(key='w',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not b.active
assert response['status'] == 'OK'
assert b.data['w'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
aa.close_streams()
bb.close_streams()
@gen_cluster()
def test_worker(c, a, b):
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
result = yield aa.identity()
assert not a.active
response = yield aa.compute(key='x',
function=dumps(add),
args=dumps([1, 2]),
who_has={},
close=True)
assert not a.active
assert response['status'] == 'OK'
assert a.data['x'] == 3
assert isinstance(response['compute_start'], float)
assert isinstance(response['compute_stop'], float)
assert isinstance(response['thread'], Integral)
response = yield bb.compute(key='y',
function=dumps(add),
args=dumps(['x', 10]),
who_has={'x': [a.address]})
assert response['status'] == 'OK'
assert b.data['y'] == 13
assert response['nbytes'] == sizeof(b.data['y'])
assert isinstance(response['transfer_start'], float)
assert isinstance(response['transfer_stop'], float)
def bad_func():
1 / 0
response = yield bb.compute(key='z',
function=dumps(bad_func),
args=dumps(()),
close=True)
assert not b.active
assert response['status'] == 'error'
assert isinstance(loads(response['exception']), ZeroDivisionError)
if sys.version_info[0] >= 3:
assert any('1 / 0' in line
for line in pluck(3, traceback.extract_tb(
loads(response['traceback'])))
if line)
aa.close_streams()
yield a._close()
assert a.address not in c.ncores and b.address in c.ncores
assert list(c.ncores.keys()) == [b.address]
assert isinstance(b.address, str)
assert b.ip in b.address
assert str(b.port) in b.address
bb.close_streams()
def test_compute_who_has(current_loop):
@gen.coroutine
def f():
s = Scheduler()
s.listen(0)
x = Worker(s.ip, s.port, ip='127.0.0.1')
y = Worker(s.ip, s.port, ip='127.0.0.1')
z = Worker(s.ip, s.port, ip='127.0.0.1')
x.data['a'] = 1
y.data['a'] = 2
yield [x._start(), y._start(), z._start()]
zz = rpc(ip=z.ip, port=z.port)
yield zz.compute(function=dumps(inc),
args=dumps(('a',)),
who_has={'a': [x.address]},
key='b')
assert z.data['b'] == 2
if 'a' in z.data:
del z.data['a']
yield zz.compute(function=dumps(inc),
args=dumps(('a',)),
who_has={'a': [y.address]},
key='c')
assert z.data['c'] == 3
yield [x._close(), y._close(), z._close()]
zz.close_streams()
current_loop.run_sync(f, timeout=5)
@gen_cluster()
def dont_test_workers_update_center(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
response = yield aa.update_data(data={'x': dumps(1), 'y': dumps(2)})
assert response['status'] == 'OK'
assert response['nbytes'] == {'x': sizeof(1), 'y': sizeof(2)}
assert a.data == {'x': 1, 'y': 2}
assert s.who_has == {'x': {a.address},
'y': {a.address}}
assert s.has_what[a.address] == {'x', 'y'}
yield aa.delete_data(keys=['x'], close=True)
assert not s.who_has['x']
assert all('x' not in s for s in c.has_what.values())
aa.close_streams()
@slow
@gen_cluster()
def dont_test_delete_data_with_missing_worker(c, a, b):
bad = '127.0.0.1:9001' # this worker doesn't exist
c.who_has['z'].add(bad)
c.who_has['z'].add(a.address)
c.has_what[bad].add('z')
c.has_what[a.address].add('z')
a.data['z'] = 5
cc = rpc(ip=c.ip, port=c.port)
yield cc.delete_data(keys=['z']) # TODO: this hangs for a while
assert 'z' not in a.data
assert not c.who_has['z']
assert not c.has_what[bad]
assert not c.has_what[a.address]
cc.close_streams()
@gen_cluster()
def test_upload_file(s, a, b):
assert not os.path.exists(os.path.join(a.local_dir, 'foobar.py'))
assert not os.path.exists(os.path.join(b.local_dir, 'foobar.py'))
assert a.local_dir != b.local_dir
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
yield [aa.upload_file(filename='foobar.py', data=b'x = 123'),
bb.upload_file(filename='foobar.py', data='x = 123')]
assert os.path.exists(os.path.join(a.local_dir, 'foobar.py'))
assert os.path.exists(os.path.join(b.local_dir, 'foobar.py'))
def g():
import foobar
return foobar.x
yield aa.compute(function=dumps(g),
key='x')
result = yield aa.get_data(keys=['x'])
assert result == {'x': dumps(123)}
yield a._close()
yield b._close()
aa.close_streams()
bb.close_streams()
assert not os.path.exists(os.path.join(a.local_dir, 'foobar.py'))
@gen_cluster()
def test_upload_egg(s, a, b):
eggname = 'mytestegg-1.0.0-py3.4.egg'
local_file = __file__.replace('test_worker.py', eggname)
assert not os.path.exists(os.path.join(a.local_dir, eggname))
assert not os.path.exists(os.path.join(b.local_dir, eggname))
assert a.local_dir != b.local_dir
aa = rpc(ip=a.ip, port=a.port)
bb = rpc(ip=b.ip, port=b.port)
with open(local_file, 'rb') as f:
payload = f.read()
yield [aa.upload_file(filename=eggname, data=payload),
bb.upload_file(filename=eggname, data=payload)]
assert os.path.exists(os.path.join(a.local_dir, eggname))
assert os.path.exists(os.path.join(b.local_dir, eggname))
def g(x):
import testegg
return testegg.inc(x)
yield aa.compute(function=dumps(g), key='x', args=dumps((10,)))
result = yield aa.get_data(keys=['x'])
assert result == {'x': dumps(10 + 1)}
yield a._close()
yield b._close()
aa.close_streams()
bb.close_streams()
assert not os.path.exists(os.path.join(a.local_dir, eggname))
@gen_cluster()
def test_broadcast(s, a, b):
cc = rpc(ip=s.ip, port=s.port)
results = yield cc.broadcast(msg={'op': 'ping'})
assert results == {a.address: b'pong', b.address: b'pong'}
cc.close_streams()
@gen_test()
def test_worker_with_port_zero():
s = Scheduler()
s.listen(8007)
w = Worker(s.ip, s.port, ip='127.0.0.1')
yield w._start()
assert isinstance(w.port, int)
assert w.port > 1024
@slow
def test_worker_waits_for_center_to_come_up(current_loop):
@gen.coroutine
def f():
w = Worker('127.0.0.1', 8007, ip='127.0.0.1')
yield w._start()
try:
current_loop.run_sync(f, timeout=4)
except TimeoutError:
pass
@gen_cluster()
def test_worker_task(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
yield aa.compute(task=dumps((inc, 1)), key='x', report=False)
assert a.data['x'] == 2
@gen_cluster()
def test_worker_task_data(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
yield aa.compute(task=dumps(2), key='x', report=False)
assert a.data['x'] == 2
@gen_cluster()
def test_worker_task_bytes(s, a, b):
aa = rpc(ip=a.ip, port=a.port)
yield aa.compute(task=dumps((inc, 1)), key='x', report=False)
assert a.data['x'] == 2
yield aa.compute(function=dumps(inc), args=dumps((10,)), key='y',
report=False)
assert a.data['y'] == 11
def test_error_message():
class MyException(Exception):
def __init__(self, a, b):
self.args = (a + b,)
def __str__(self):
return "MyException(%s)" % self.args
msg = error_message(MyException('Hello', 'World!'))
assert 'Hello' in str(msg['exception'])
@gen_cluster()
def test_gather(s, a, b):
b.data['x'] = 1
b.data['y'] = 2
aa = rpc(ip=a.ip, port=a.port)
resp = yield aa.gather(who_has={'x': [b.address], 'y': [b.address]})
assert resp['status'] == 'OK'
assert a.data['x'] == b.data['x']
assert a.data['y'] == b.data['y']
@gen_cluster()
def test_compute_stream(s, a, b):
stream = yield connect(a.ip, a.port)
yield write(stream, {'op': 'compute-stream'})
msgs = [{'op': 'compute-task', 'function': dumps(inc), 'args': dumps((i,)), 'key': 'x-%d' % i}
for i in range(10)]
bstream = BatchedStream(stream, 0)
for msg in msgs[:5]:
yield write(stream, msg)
for i in range(5):
msg = yield read(bstream)
assert msg['status'] == 'OK'
assert msg['key'][0] == 'x'
for msg in msgs[5:]:
yield write(stream, msg)
for i in range(5):
msg = yield read(bstream)
assert msg['status'] == 'OK'
assert msg['key'][0] == 'x'
yield write(stream, {'op': 'close'})
@gen_cluster(client=True, ncores=[('127.0.0.1', 1)])
def test_active_holds_tasks(e, s, w):
future = e.submit(slowinc, 1, delay=0.2)
yield gen.sleep(0.1)
assert future.key in w.active
yield future._result()
assert future.key not in w.active
future = e.submit(throws, 1)
with ignoring(Exception):
yield _wait([future])
assert not w.active
def test_io_loop(loop):
s = Scheduler(loop=loop)
s.listen(0)
assert s.io_loop is loop
w = Worker(s.ip, s.port, loop=loop)
assert w.io_loop is loop
@gen_cluster(client=True, ncores=[])
def test_spill_to_disk(e, s):
np = pytest.importorskip('numpy')
w = Worker(s.ip, s.port, loop=s.loop, memory_limit=1000)
yield w._start()
x = e.submit(np.random.randint, 0, 255, size=500, dtype='u1', key='x')
yield _wait(x)
y = e.submit(np.random.randint, 0, 255, size=500, dtype='u1', key='y')
yield _wait(y)
assert set(w.data) == {x.key, y.key}
assert set(w.data.fast) == {x.key, y.key}
z = e.submit(np.random.randint, 0, 255, size=500, dtype='u1', key='z')
yield _wait(z)
assert set(w.data) == {x.key, y.key, z.key}
assert set(w.data.fast) == {y.key, z.key}
assert set(w.data.slow) == {x.key}
yield x._result()
assert set(w.data.fast) == {x.key, z.key}
assert set(w.data.slow) == {y.key}
@gen_cluster(client=True)
def test_access_key(c, s, a, b):
def f(i):
from distributed.worker import thread_state
return thread_state.key
futures = [c.submit(f, i, key='x-%d' % i) for i in range(20)]
results = yield c._gather(futures)
assert list(results) == ['x-%d' % i for i in range(20)]
@gen_cluster(client=True)
def test_run_dask_worker(c, s, a, b):
def f(dask_worker=None):
return dask_worker.id
response = yield c._run(f)
assert response == {a.address: a.id, b.address: b.id}
| broxtronix/distributed | distributed/tests/test_worker.py | Python | bsd-3-clause | 16,430 |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Particle system engine'''
__docformat__ = 'restructuredtext'
import random
import pyglet
from pyglet.gl import *
import math
import copy
import numpy
import ctypes
from cocosnode import CocosNode
from euclid import Point2
# for dev and diagnostic, None means real automatic, True / False means
# return this value inconditionally
forced_point_sprites = None
def point_sprites_available():
"""returns a bool telling if point sprites are available
For development and diagonostic cocos.particle.forced_point_sprites could
be set to force the desired return value
"""
if forced_point_sprites is not None:
return forced_point_sprites
have_point_sprites = True
try:
glEnable(GL_POINT_SPRITE)
glDisable(GL_POINT_SPRITE)
except:
have_point_sprites = False
return have_point_sprites
class ExceptionNoEmptyParticle(Exception):
"""particle system have no room for another particle"""
pass
rand = lambda: random.random() * 2 - 1
# PointerToNumpy by Gary Herron
# from pyglet's user list
def PointerToNumpy(a, ptype=ctypes.c_float):
a = numpy.ascontiguousarray(a) # Probably a NO-OP, but perhaps not
return a.ctypes.data_as(ctypes.POINTER(ptype)) # Ugly and undocumented!
class Color( object ):
def __init__( self, r,g,b,a ):
self.r = r
self.g = g
self.b = b
self.a = a
def to_array(self):
return self.r, self.g, self.b, self.a
class ParticleSystem( CocosNode ):
"""
Base class for many flawors of cocos particle systems
The most easy way to customize is subclass and redefine some class members;
see particle_systems by example.
If you want to use a custom texture remember it should hold only one image,
so don't use texture = pyglet.resource.image(...) (it would produce an atlas,
ie multiple images in a texture); using texture = pyglet.image.load(...) is fine
"""
# type of particle
POSITION_FREE, POSITION_GROUPED = range(2)
#: is the particle system active ?
active = True
#: duration in seconds of the system. -1 is infinity
duration = 0
#: time elapsed since the start of the system (in seconds)
elapsed = 0
#: Gravity of the particles
gravity = Point2(0.0, 0.0)
#: position is from "superclass" CocosNode
#: Position variance
pos_var = Point2(0.0, 0.0)
#: The angle (direction) of the particles measured in degrees
angle = 0.0
#: Angle variance measured in degrees;
angle_var = 0.0
#: The speed the particles will have.
speed = 0.0
#: The speed variance
speed_var = 0.0
#: Tangential acceleration
tangential_accel = 0.0
#: Tangential acceleration variance
tangential_accel_var = 0.0
#: Radial acceleration
radial_accel = 0.0
#: Radial acceleration variance
radial_accel_var = 0.0
#: Size of the particles
size = 0.0
#: Size variance
size_var = 0.0
#: How many seconds will the particle live
life = 0
#: Life variance
life_var = 0
#: Start color of the particles
start_color = Color(0.0,0.0,0.0,0.0)
#: Start color variance
start_color_var = Color(0.0,0.0,0.0,0.0)
#: End color of the particles
end_color = Color(0.0,0.0,0.0,0.0)
#: End color variance
end_color_var = Color(0.0,0.0,0.0,0.0)
#: Maximum particles
total_particles = 0
#:texture for the particles
pic = pyglet.image.load('fire.png', file=pyglet.resource.file('fire.png'))
texture = pic.get_texture()
#:blend additive
blend_additive = False
#:color modulate
color_modulate = True
# position type
position_type = POSITION_GROUPED
def __init__(self, fallback=None):
"""
fallback can be None, True, False; default is None
False: use point sprites, faster, not always availabel
True: use quads, slower but always available)
None: autodetect, use the faster available
"""
super(ParticleSystem,self).__init__()
# particles
# position x 2
self.particle_pos = numpy.zeros( (self.total_particles, 2), numpy.float32 )
# direction x 2
self.particle_dir = numpy.zeros( (self.total_particles, 2), numpy.float32 )
# rad accel x 1
self.particle_rad = numpy.zeros( (self.total_particles, 1), numpy.float32 )
# tan accel x 1
self.particle_tan = numpy.zeros( (self.total_particles, 1), numpy.float32 )
# gravity x 2
self.particle_grav = numpy.zeros( (self.total_particles, 2), numpy.float32 )
# colors x 4
self.particle_color = numpy.zeros( (self.total_particles, 4), numpy.float32 )
# delta colors x 4
self.particle_delta_color = numpy.zeros( (self.total_particles, 4), numpy.float32 )
# life x 1
self.particle_life = numpy.zeros( (self.total_particles, 1), numpy.float32 )
self.particle_life.fill(-1.0)
# size x 1
self.particle_size = numpy.zeros( (self.total_particles, 1), numpy.float32 )
# start position
self.start_pos = numpy.zeros( (self.total_particles, 2), numpy.float32 )
#: How many particles can be emitted per second
self.emit_counter = 0
#: Count of particles
self.particle_count = 0
#: auto remove when particle finishes
self.auto_remove_on_finish = False
#: rendering mode; True is quads, False is point_sprites, None is auto fallback
if fallback is None:
fallback = not point_sprites_available()
self.fallback = fallback
if fallback:
self._fallback_init()
self.draw = self.draw_fallback
self.schedule( self.step )
def on_enter( self ):
super( ParticleSystem, self).on_enter()
#self.add_particle()
def draw( self ):
glPushMatrix()
self.transform()
# color preserve - at least nvidia 6150SE needs that
glPushAttrib(GL_CURRENT_BIT)
glPointSize( self.size )
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texture.id )
glEnable(GL_POINT_SPRITE)
glTexEnvi( GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE )
glEnableClientState(GL_VERTEX_ARRAY)
vertex_ptr = PointerToNumpy( self.particle_pos )
glVertexPointer(2,GL_FLOAT,0,vertex_ptr);
glEnableClientState(GL_COLOR_ARRAY)
color_ptr = PointerToNumpy( self.particle_color)
glColorPointer(4,GL_FLOAT,0,color_ptr);
glPushAttrib(GL_COLOR_BUFFER_BIT)
glEnable(GL_BLEND)
if self.blend_additive:
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
else:
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
# mode = GLint()
# glTexEnviv( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, mode )
#
# if self.color_modulate:
# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE )
# else:
# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE )
glDrawArrays(GL_POINTS, 0, self.total_particles);
# un -blend
glPopAttrib()
# color restore
glPopAttrib()
# # restore env mode
# glTexEnvi( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, mode)
# disable states
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glDisable(GL_POINT_SPRITE);
glDisable(GL_TEXTURE_2D);
glPopMatrix()
def step( self, delta ):
# update particle count
self.particle_count = numpy.sum( self.particle_life >= 0 )
if self.active:
rate = 1.0 / self.emission_rate
self.emit_counter += delta
# if random.random() < 0.01:
# delta += 0.5
while self.particle_count < self.total_particles and self.emit_counter > rate:
self.add_particle()
self.emit_counter -= rate
self.elapsed += delta
if self.duration != -1 and self.duration < self.elapsed:
self.stop_system()
self.update_particles( delta )
if (not self.active and
self.particle_count == 0 and self.auto_remove_on_finish == True):
self.unschedule( self.step )
self.parent.remove( self )
def add_particle( self ):
"""
Code calling add_particle must be either:
be sure there is room for the particle
or
be prepared to catch the exception ExceptionNoEmptyParticle
It is acceptable to try: ... except...: pass
"""
self.init_particle()
self.particle_count += 1
def stop_system( self ):
self.active = False
self.elapsed= self.duration
self.emit_counter = 0
def reset_system( self ):
self.elapsed= self.duration
self.emit_counter = 0
def update_particles( self, delta ):
# radial: posx + posy
norm = numpy.sqrt( self.particle_pos[:,0] ** 2 + self.particle_pos[:,1] ** 2 )
# XXX prevent div by 0
norm = numpy.select( [norm==0], [0.0000001], default=norm )
posx = self.particle_pos[:,0] / norm
posy = self.particle_pos[:,1] / norm
radial = numpy.array( [posx, posy] )
tangential = numpy.array( [-posy, posx] )
# update dir
radial = numpy.swapaxes(radial,0,1)
radial *= self.particle_rad
tangential = numpy.swapaxes(tangential,0,1)
tangential *= self.particle_tan
self.particle_dir += (tangential + radial + self.particle_grav) * delta
# update pos with updated dir
self.particle_pos += self.particle_dir * delta
# life
self.particle_life -= delta
# position: free or grouped
if self.position_type == self.POSITION_FREE:
tuple = numpy.array( [self.x, self.y] )
tmp = tuple - self.start_pos
self.particle_pos -= tmp
# color
self.particle_color += self.particle_delta_color * delta
# if life < 0, set alpha in 0
self.particle_color[:,3] = numpy.select( [self.particle_life[:,0] < 0], [0], default=self.particle_color[:,3] )
# print self.particles[0]
# print self.pas[0,0:4]
def init_particle( self ):
# position
# p=self.particles[idx]
a = self.particle_life < 0
idxs = a.nonzero()
idx = -1
if len(idxs[0]) > 0:
idx = idxs[0][0]
else:
raise ExceptionNoEmptyParticle()
# position
self.particle_pos[idx][0] = self.pos_var.x * rand()
self.particle_pos[idx][1] = self.pos_var.y * rand()
# start position
self.start_pos[idx][0] = self.x
self.start_pos[idx][1] = self.y
a = math.radians( self.angle + self.angle_var * rand() )
v = Point2( math.cos( a ), math.sin( a ) )
s = self.speed + self.speed_var * rand()
dir = v * s
# direction
self.particle_dir[idx][0] = dir.x
self.particle_dir[idx][1] = dir.y
# radial accel
self.particle_rad[idx] = self.radial_accel + self.radial_accel_var * rand()
# tangential accel
self.particle_tan[idx] = self.tangential_accel + self.tangential_accel_var * rand()
# life
life = self.particle_life[idx] = self.life + self.life_var * rand()
# Color
# start
sr = self.start_color.r + self.start_color_var.r * rand()
sg = self.start_color.g + self.start_color_var.g * rand()
sb = self.start_color.b + self.start_color_var.b * rand()
sa = self.start_color.a + self.start_color_var.a * rand()
self.particle_color[idx][0] = sr
self.particle_color[idx][1] = sg
self.particle_color[idx][2] = sb
self.particle_color[idx][3] = sa
# end
er = self.end_color.r + self.end_color_var.r * rand()
eg = self.end_color.g + self.end_color_var.g * rand()
eb = self.end_color.b + self.end_color_var.b * rand()
ea = self.end_color.a + self.end_color_var.a * rand()
delta_color_r = (er - sr) / life
delta_color_g = (eg - sg) / life
delta_color_b = (eb - sb) / life
delta_color_a = (ea - sa) / life
self.particle_delta_color[idx][0] = delta_color_r
self.particle_delta_color[idx][1] = delta_color_g
self.particle_delta_color[idx][2] = delta_color_b
self.particle_delta_color[idx][3] = delta_color_a
# size
self.particle_size[idx] = self.size + self.size_var * rand()
# gravity
self.particle_grav[idx][0] = self.gravity.x
self.particle_grav[idx][1] = self.gravity.y
# Below only fallback functionality.
# It uses quads instehad of point sprites, doing a transformation
# point sprites buffers -> quads buffer, so any change in point sprite mode
# is automatically reflects in the fallback mode (except for changes in the
# draw method which should be manually adapted
def _fallback_init(self):
self.vertexs = numpy.zeros((self.total_particles * 4, 2), numpy.float32)
tex_coords_for_quad = numpy.array([[0.0, 1.0], [0.0, 0.0], [1.0, 0.0], [1.0, 1.0]], numpy.float32)
self.tex_coords = numpy.zeros((self.total_particles * 4, 2), numpy.float32)
all_tex_coords = self.tex_coords
for i in xrange(0,len(self.vertexs),4):
all_tex_coords[i : i + 4 ] = tex_coords_for_quad
self.per_vertex_colors = numpy.zeros( (self.total_particles * 4, 4), numpy.float32)
self.delta_pos_to_vertex = numpy.zeros((4, 2), numpy.float32)
def draw_fallback(self):
self.make_delta_pos_to_vertex()
self.update_vertexs_from_pos()
self.update_per_vertex_colors()
glPushMatrix()
self.transform()
# color preserve - at least intel 945G needs that
glPushAttrib(GL_CURRENT_BIT)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texture.id )
glEnableClientState(GL_VERTEX_ARRAY)
vertexs_ptr = PointerToNumpy(self.vertexs)
glVertexPointer(2, GL_FLOAT, 0, vertexs_ptr)
glEnableClientState(GL_COLOR_ARRAY)
color_ptr = PointerToNumpy(self.per_vertex_colors)
#glColorPointer(4, GL_UNSIGNED_BYTE, 0, color_ptr)
glColorPointer(4, GL_FLOAT, 0, color_ptr)
glEnableClientState( GL_TEXTURE_COORD_ARRAY )
tex_coord_ptr = PointerToNumpy(self.tex_coords)
glTexCoordPointer(2, GL_FLOAT, 0, tex_coord_ptr)
glPushAttrib(GL_COLOR_BUFFER_BIT)
glEnable(GL_BLEND)
if self.blend_additive:
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
else:
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glDrawArrays(GL_QUADS, 0, len(self.vertexs))
# un -blend
glPopAttrib()
# color restore
glPopAttrib()
# disable states
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisable(GL_TEXTURE_2D);
glPopMatrix()
def update_vertexs_from_pos(self):
vertexs = self.vertexs
delta = self.delta_pos_to_vertex
pos = self.particle_pos
for i, pos_i in enumerate(pos):
i4 = i*4
vertexs[i4:i4 + 4 ] = delta + pos_i
def update_per_vertex_colors(self):
colors = self.particle_color
per_vertex_colors = self.per_vertex_colors
for i, color in enumerate(colors):
i4 = i*4
per_vertex_colors[i4:i4 + 4 ] = color
def make_delta_pos_to_vertex(self):
size2 = self.size / 2.0
# counter-clockwise
self.delta_pos_to_vertex[0] = (-size2, +size2) # NW
self.delta_pos_to_vertex[1] = (-size2, -size2) # SW
self.delta_pos_to_vertex[2] = (+size2, -size2) # SE
self.delta_pos_to_vertex[3] = (+size2, +size2) # NE
| eevee/cocos2d-mirror | cocos/particle.py | Python | bsd-3-clause | 17,917 |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions to retrieve properties from a window handle
These are implemented in a procedural way so as to to be
useful to other modules with the least conceptual overhead
"""
import warnings
import win32process
import win32api
import win32con
import win32gui
from ctypes import wintypes
from ctypes import WINFUNCTYPE
from ctypes import c_int
from ctypes import byref
from ctypes import sizeof
from ctypes import create_unicode_buffer
from . import win32functions
from . import win32defines
from . import win32structures
from .actionlogger import ActionLogger
#=========================================================================
def text(handle):
"""Return the text of the window"""
class_name = classname(handle)
if class_name == 'IME':
return 'Default IME'
if class_name == 'MSCTFIME UI':
return 'M'
if class_name is None:
return ''
#length = win32functions.SendMessage(handle, win32defines.WM_GETTEXTLENGTH, 0, 0)
# XXX: there are some very rare cases when WM_GETTEXTLENGTH hangs!
# WM_GETTEXTLENGTH may hang even for notepad.exe main window!
c_length = win32structures.DWORD_PTR(0)
result = win32functions.SendMessageTimeout(
handle,
win32defines.WM_GETTEXTLENGTH,
0,
0,
win32defines.SMTO_ABORTIFHUNG,
500,
byref(c_length)
)
if result == 0:
ActionLogger().log('WARNING! Cannot retrieve text length for handle = ' + str(handle))
return ''
else:
length = c_length.value
textval = ''
# In some rare cases, the length returned by WM_GETTEXTLENGTH is <0.
# Guard against this by checking it is >0 (==0 is not of interest):
if length > 0:
length += 1
buffer_ = create_unicode_buffer(length)
ret = win32functions.SendMessage(
handle, win32defines.WM_GETTEXT, length, byref(buffer_))
if ret:
textval = buffer_.value
return textval
#=========================================================================
def classname(handle):
"""Return the class name of the window"""
if handle is None:
return None
class_name = create_unicode_buffer(u"", 257)
win32functions.GetClassName(handle, class_name, 256)
return class_name.value
#=========================================================================
def parent(handle):
"""Return the handle of the parent of the window"""
return win32functions.GetParent(handle)
#=========================================================================
def style(handle):
"""Return the style of the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_STYLE)
#=========================================================================
def exstyle(handle):
"""Return the extended style of the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_EXSTYLE)
#=========================================================================
def controlid(handle):
"""Return the ID of the control"""
return win32functions.GetWindowLong(handle, win32defines.GWL_ID)
#=========================================================================
def userdata(handle):
"""Return the value of any user data associated with the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_USERDATA)
#=========================================================================
def contexthelpid(handle):
"""Return the context help id of the window"""
return win32functions.GetWindowContextHelpId(handle)
#=========================================================================
def iswindow(handle):
"""Return True if the handle is a window"""
return False if handle is None else bool(win32functions.IsWindow(handle))
#=========================================================================
def isvisible(handle):
"""Return True if the window is visible"""
return False if handle is None else bool(win32functions.IsWindowVisible(handle))
#=========================================================================
def isunicode(handle):
"""Return True if the window is a Unicode window"""
return False if handle is None else bool(win32functions.IsWindowUnicode(handle))
#=========================================================================
def isenabled(handle):
"""Return True if the window is enabled"""
return False if handle is None else bool(win32functions.IsWindowEnabled(handle))
#=========================================================================
def is64bitprocess(process_id):
"""Return True if the specified process is a 64-bit process on x64
Return False if it is only a 32-bit process running under Wow64.
Always return False for x86.
"""
from .sysinfo import is_x64_OS
is32 = True
if is_x64_OS():
phndl = win32api.OpenProcess(win32con.MAXIMUM_ALLOWED, 0, process_id)
if phndl:
is32 = win32process.IsWow64Process(phndl)
#print("is64bitprocess, is32: %d, procid: %d" % (is32, process_id))
return (not is32)
#=========================================================================
def is64bitbinary(filename):
"""Check if the file is 64-bit binary"""
import win32file
try:
binary_type = win32file.GetBinaryType(filename)
return binary_type != win32file.SCS_32BIT_BINARY
except Exception as exc:
warnings.warn('Cannot get binary type for file "{}". Error: {}'
.format(filename, exc), RuntimeWarning, stacklevel=2)
return None
#=========================================================================
def clientrect(handle):
"""Return the client rectangle of the control"""
client_rect = win32structures.RECT()
win32functions.GetClientRect(handle, byref(client_rect))
return client_rect
#=========================================================================
def rectangle(handle):
"""Return the rectangle of the window"""
rect = win32structures.RECT()
win32functions.GetWindowRect(handle, byref(rect))
return rect
#=========================================================================
def font(handle):
"""Return the font as a LOGFONTW of the window"""
# get the font handle
if handle is None:
handle = 0 # make sure we don't pass window handle down as None
font_handle = win32functions.SendMessage(
handle, win32defines.WM_GETFONT, 0, 0)
# if the fondUsed is 0 then the control is using the
# system font (well probably not - even though that is what the docs say)
# instead we switch to the default GUI font - which is more likely correct.
if not font_handle:
# So just get the default system font
font_handle = win32functions.GetStockObject(win32defines.DEFAULT_GUI_FONT)
# if we still don't have a font!
# ----- ie, we're on an antiquated OS, like NT 3.51
if not font_handle:
# ----- On Asian platforms, ANSI font won't show.
if win32functions.GetSystemMetrics(win32defines.SM_DBCSENABLED):
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.SYSTEM_FONT)
else:
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.ANSI_VAR_FONT)
# Get the Logfont structure of the font of the control
fontval = win32structures.LOGFONTW()
ret = win32functions.GetObject(
font_handle, sizeof(fontval), byref(fontval))
# The function could not get the font - this is probably
# because the control does not have associated Font/Text
# So we should make sure the elements of the font are zeroed.
if not ret:
fontval = win32structures.LOGFONTW()
# if it is a main window
if is_toplevel_window(handle):
if "MS Shell Dlg" in fontval.lfFaceName or \
fontval.lfFaceName == "System":
# these are not usually the fonts actaully used in for
# title bars so we need to get the default title bar font
# get the title font based on the system metrics rather
# than the font of the control itself
ncms = win32structures.NONCLIENTMETRICSW()
ncms.cbSize = sizeof(ncms)
win32functions.SystemParametersInfo(
win32defines.SPI_GETNONCLIENTMETRICS,
sizeof(ncms),
byref(ncms),
0)
# with either of the following 2 flags set the font of the
# dialog isthe small one (but there is normally no difference!
if has_style(handle, win32defines.WS_EX_TOOLWINDOW) or \
has_style(handle, win32defines.WS_EX_PALETTEWINDOW):
fontval = ncms.lfSmCaptionFont
else:
fontval = ncms.lfCaptionFont
return fontval
#=========================================================================
def processid(handle):
"""Return the ID of process that controls this window"""
pid = wintypes.DWORD()
win32functions.GetWindowThreadProcessId(handle, byref(pid))
return pid.value
#=========================================================================
def has_enough_privileges(process_id):
"""Check if target process has enough rights to query GUI actions"""
try:
access_level = win32con.PROCESS_QUERY_INFORMATION | win32con.PROCESS_VM_READ
process_handle = win32api.OpenProcess(access_level, 0, process_id)
if process_handle:
win32api.CloseHandle(process_handle)
return True
return False
except win32gui.error:
return False
#=========================================================================
def children(handle):
"""Return a list of handles to the children of this window"""
# this will be filled in the callback function
child_windows = []
# callback function for EnumChildWindows
def enum_child_proc(hwnd, lparam):
"""Called for each child - adds child hwnd to list"""
# append it to our list
child_windows.append(hwnd)
# return true to keep going
return True
# define the child proc type
enum_child_proc_t = WINFUNCTYPE(
c_int, # return type
wintypes.HWND, # the window handle
wintypes.LPARAM) # extra information
# update the proc to the correct type
proc = enum_child_proc_t(enum_child_proc)
# loop over all the children (callback called for each)
win32functions.EnumChildWindows(handle, proc, 0)
return child_windows
#=========================================================================
def has_style(handle, tocheck):
"""Return True if the control has style tocheck"""
hwnd_style = style(handle)
return tocheck & hwnd_style == tocheck
#=========================================================================
def has_exstyle(handle, tocheck):
"""Return True if the control has extended style tocheck"""
hwnd_exstyle = exstyle(handle)
return tocheck & hwnd_exstyle == tocheck
#=========================================================================
def is_toplevel_window(handle):
"""Return whether the window is a top level window or not"""
# only request the style once - this is an optimization over calling
# (handle, style) for each style I wan to check!
style_ = style(handle)
if (style_ & win32defines.WS_OVERLAPPED == win32defines.WS_OVERLAPPED or
style_ & win32defines.WS_CAPTION == win32defines.WS_CAPTION) and \
not (style_ & win32defines.WS_CHILD == win32defines.WS_CHILD):
return True
else:
return False
#=========================================================================
def dumpwindow(handle):
"""Dump a window to a set of properties"""
props = {}
for func in (text,
classname,
rectangle,
clientrect,
style,
exstyle,
contexthelpid,
controlid,
userdata,
font,
parent,
processid,
isenabled,
isunicode,
isvisible,
children,
):
props[func.__name__] = func(handle)
return props
| airelil/pywinauto | pywinauto/handleprops.py | Python | bsd-3-clause | 14,685 |
"""
WSGI config for dj_bookmarks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_bookmarks.settings")
application = get_wsgi_application()
| kennethlove/django_bookmarks | dj_bookmarks/dj_bookmarks/wsgi.py | Python | bsd-3-clause | 402 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cacao', '0006_auto_20150119_0946'),
]
operations = [
migrations.AddField(
model_name='section',
name='peso',
field=models.PositiveIntegerField(default=1, help_text=b'Entre mayor sea el peso mas al fondo se ubica', verbose_name=b'Peso de la Seccion'),
preserve_default=False,
),
migrations.AlterField(
model_name='content',
name='description',
field=models.TextField(verbose_name=b'Descripcion'),
preserve_default=True,
),
migrations.AlterField(
model_name='content',
name='image',
field=models.ImageField(help_text=b'Required dimensions 1563x538', upload_to=b'cacao/', verbose_name=b'Imagen', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='content',
name='peso',
field=models.PositiveIntegerField(unique=True, verbose_name=b'Peso del Contenido'),
preserve_default=True,
),
migrations.AlterField(
model_name='content',
name='title',
field=models.CharField(max_length=250, verbose_name=b'Titulo'),
preserve_default=True,
),
migrations.AlterField(
model_name='guide',
name='description',
field=models.TextField(verbose_name=b'Descripcion'),
preserve_default=True,
),
migrations.AlterField(
model_name='guide',
name='image',
field=models.ImageField(upload_to=b'cacao/', verbose_name=b'Imagen'),
preserve_default=True,
),
migrations.AlterField(
model_name='guide',
name='name',
field=models.CharField(max_length=250, verbose_name=b'Nombre'),
preserve_default=True,
),
migrations.AlterField(
model_name='guide',
name='number',
field=models.IntegerField(verbose_name=b'Numero'),
preserve_default=True,
),
migrations.AlterField(
model_name='section',
name='image',
field=models.ImageField(upload_to=b'cacao/', verbose_name=b'Imagen', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='section',
name='title',
field=models.CharField(max_length=250, verbose_name=b'Titulo'),
preserve_default=True,
),
]
| CacaoMovil/guia-de-cacao-django | cacao_app/cacao/migrations/0007_auto_20150123_1600.py | Python | bsd-3-clause | 2,739 |
# -*- coding: utf-8 -*-
"""
Formulas for density calculation.
"""
from math import log10
from colormath.density_standards import (
ANSI_STATUS_T_BLUE,
ANSI_STATUS_T_GREEN,
ANSI_STATUS_T_RED,
VISUAL_DENSITY_THRESH,
ISO_VISUAL,
)
def ansi_density(color, density_standard):
"""
Calculates density for the given SpectralColor using the spectral weighting
function provided. For example, ANSI_STATUS_T_RED. These may be found in
:py:mod:`colormath.density_standards`.
:param SpectralColor color: The SpectralColor object to calculate
density for.
:param numpy.ndarray density_standard: NumPy array of filter of choice
from :py:mod:`colormath.density_standards`.
:rtype: float
:returns: The density value for the given color and density standard.
"""
# Load the spec_XXXnm attributes into a Numpy array.
sample = color.get_numpy_array()
# Matrix multiplication
intermediate = sample * density_standard
# Sum the products.
numerator = intermediate.sum()
# This is the denominator in the density equation.
sum_of_standard_wavelengths = density_standard.sum()
# This is the top level of the density formula.
return -1.0 * log10(numerator / sum_of_standard_wavelengths)
def auto_density(color):
"""
Given a SpectralColor, automatically choose the correct ANSI T filter.
Returns a tuple with a string representation of the filter the
calculated density.
:param SpectralColor color: The SpectralColor object to calculate
density for.
:rtype: float
:returns: The density value, with the filter selected automatically.
"""
blue_density = ansi_density(color, ANSI_STATUS_T_BLUE)
green_density = ansi_density(color, ANSI_STATUS_T_GREEN)
red_density = ansi_density(color, ANSI_STATUS_T_RED)
densities = [blue_density, green_density, red_density]
min_density = min(densities)
max_density = max(densities)
density_range = max_density - min_density
# See comments in density_standards.py for VISUAL_DENSITY_THRESH to
# understand what this is doing.
if density_range <= VISUAL_DENSITY_THRESH:
return ansi_density(color, ISO_VISUAL)
elif blue_density > green_density and blue_density > red_density:
return blue_density
elif green_density > blue_density and green_density > red_density:
return green_density
else:
return red_density
| gtaylor/python-colormath | colormath/density.py | Python | bsd-3-clause | 2,458 |
# -*- coding: utf-8 -*-
import hashlib
import math
import struct
import base64
import json
import zlib
import binascii
from Crypto.Cipher import AES
from Crypto import Random
salt ='__E3S$hH%&*KL:"II<UG=_!@fc9}021jFJ|KDI.si81&^&%%^*(del?%)))+__'
fingerprint_len =4
iv_len =16
randomiv_len =4
print_log =False
# 输入密码,输出其hash值的前两个字节的16进制表示.
def fingerprintSimple(input_str):
return binascii.hexlify(hashlib.sha256(input_str).digest()[0:2])
def hash(input):
return hashlib.sha256(input).digest()
def fingerprint(input):
return struct.pack('!i',zlib.adler32(input))
def pack(pwd, dict_input):
try:
if print_log:
print 'pack pwd=', pwd
print 'pack dict_input=', dict_input
input =json.dumps(dict_input)
l =len(input)
output =input.ljust(int(math.ceil(l/16.0)*16), ' ')
rndfile = Random.new()
randomiv =rndfile.read(randomiv_len)
iv =hash(randomiv)[0:iv_len]
if print_log:
print 'pack iv=', repr(iv)
key =hash(salt+pwd)
encryptor =AES.new(key, AES.MODE_CBC, iv)
encrypted_str = encryptor.encrypt(output)
output =randomiv+encrypted_str
fp =fingerprint(output)
# body_len + fp + randomiv + encrypted_msg + padding
body_len =struct.pack('!i', l)
output =body_len+fp+output
if print_log:
print 'pack body_len=', l
print 'pack randomiv=', repr(randomiv)
print 'pack fingerprint=', repr(fp)
print 'pack encrypted_str=%s, len=%d'% (repr(encrypted_str), len(encrypted_str))
output =base64.b64encode(output)
if print_log:
print 'pack result:%s, len=%d' %(output, len(output))
output =output+'\r\n'
return output
except:
return ''
def unpack(pwd, input_str_utf8):
try:
if input_str_utf8[-2: ]=='\r\n':
input =input_str_utf8[0: len(input_str_utf8)-2]
else :
input =input_str_utf8
if print_log:
print 'unpack input:%s, len=%d' %(input, len(input))
input =base64.b64decode(input)
# body_len + fp + randomiv + encrypted_msg + padding
l, =struct.unpack('!i', input[0:4])
if print_log:
print 'unpack body_len=', l
input =input[4:]
if print_log:
print 'unpack input fingerprint=', repr(input[0:fingerprint_len])
print 'unpack cal fingerprint=', repr(fingerprint(input[fingerprint_len:]))
if fingerprint(input[fingerprint_len:])!=input[0:fingerprint_len]:
return {}
input =input[fingerprint_len:]
randomiv =input[0:randomiv_len]
iv =hash(randomiv)[0:iv_len]
input =input[randomiv_len:]
if print_log:
print 'unpack randomiv=', repr(randomiv)
print 'unpack iv=', repr(iv)
key =hash(salt+pwd)
decryptor =AES.new(key, AES.MODE_CBC, iv)
output = decryptor.decrypt(input)
output =output[0:l]
if print_log:
print 'unpack, json.loads data:', output
d =json.loads(output)
if print_log:
print 'unpack result:', d
return d
except:
return {}
if __name__=='__main__':
d ={'k':u'大神好'}
print 'pack input=',d
enc =pack('qwert',d)
print 'pack result=',enc
d =unpack('qwert',enc)
print 'unpack result=',d
| dungeonsnd/forwarding | EChat/pack.py | Python | bsd-3-clause | 3,621 |
from django.test import TestCase
from corehq.apps.receiverwrapper import submit_form_locally
from couchforms.models import XFormError
class CaseProcessingErrorsTest(TestCase):
def test_no_case_id(self):
"""
submit form with a case block that has no case_id
check that
- it errors
- the form is not saved under its original id
- an XFormError is saved with the original id as orig_id
- the error was logged (<-- is this hard to test?)
<data xmlns="example.com/foo">
<case case_id="">
<update><foo>bar</foo></update>
</case>
</data>
"""
submit_form_locally(
"""<data xmlns="example.com/foo">
<meta>
<instanceID>abc-easy-as-123</instanceID>
</meta>
<case case_id="" xmlns="http://commcarehq.org/case/transaction/v2">
<update><foo>bar</foo></update>
</case>
</data>""",
'my_very_special_domain',
)
xform_errors = XFormError.view(
'domain/docs',
startkey=['my_very_special_domain', 'XFormError'],
endkey=['my_very_special_domain', 'XFormError', {}],
reduce=False,
include_docs=True,
).all()
related_errors = [xform_error for xform_error in xform_errors
if xform_error.get_id == 'abc-easy-as-123']
self.assertEqual(len(related_errors), 1)
related_error = related_errors[0]
self.assertEqual(related_error.problem,
'IllegalCaseId: case_id must not be empty')
def test_uses_referrals(self):
"""
submit form with a case block that uses referrals
check that
- it errors
- the form is not saved under its original id
- an XFormError is saved with the original id as orig_id
"""
submit_form_locally(
"""<data xmlns="example.com/foo">
<meta>
<instanceID>abc-easy-as-456</instanceID>
</meta>
<case case_id="123" xmlns="http://commcarehq.org/case/transaction/v2">
<referral>
<referral_id>456</referral_id>
<open>
<referral_types>t1 t2</referral_types>
</open>
</referral>
</case>
</data>""",
'my_very_special_domain',
)
xform_errors = XFormError.view(
'domain/docs',
startkey=['my_very_special_domain', 'XFormError'],
endkey=['my_very_special_domain', 'XFormError', {}],
reduce=False,
include_docs=True,
).all()
related_errors = [xform_error for xform_error in xform_errors
if xform_error.get_id == 'abc-easy-as-456']
self.assertEqual(len(related_errors), 1)
related_error = related_errors[0]
self.assertEqual(related_error.problem,
'UsesReferrals: Sorry, referrals are no longer supported!')
| puttarajubr/commcare-hq | corehq/ex-submodules/couchforms/tests/test_errors.py | Python | bsd-3-clause | 3,166 |
from datetime import datetime
import hashlib
import os
import random
import re
import string
import time
from django import forms, dispatch
from django.conf import settings
from django.contrib.auth.models import User as DjangoUser
from django.core import validators
from django.db import models
from django.template import Context, loader
from django.utils.encoding import smart_str, smart_unicode
from django.utils.functional import lazy
import caching.base as caching
import commonware.log
from tower import ugettext as _
import amo
import amo.models
from amo.urlresolvers import reverse
from translations.fields import PurifiedField
log = commonware.log.getLogger('z.users')
def get_hexdigest(algorithm, salt, raw_password):
return hashlib.new(algorithm, smart_str(salt + raw_password)).hexdigest()
def rand_string(length):
return ''.join(random.choice(string.letters) for i in xrange(length))
def create_password(algorithm, raw_password):
salt = get_hexdigest(algorithm, rand_string(12), rand_string(12))[:64]
hsh = get_hexdigest(algorithm, salt, raw_password)
return '$'.join([algorithm, salt, hsh])
class UserForeignKey(models.ForeignKey):
"""
A replacement for models.ForeignKey('users.UserProfile').
This field uses UserEmailField to make form fields key off the user's email
instead of the primary key id. We also hook up autocomplete automatically.
"""
def __init__(self, *args, **kw):
super(UserForeignKey, self).__init__(UserProfile, *args, **kw)
def value_from_object(self, obj):
return getattr(obj, self.name).email
def formfield(self, **kw):
defaults = {'form_class': UserEmailField}
defaults.update(kw)
return models.Field.formfield(self, **defaults)
class UserEmailField(forms.EmailField):
def clean(self, value):
if value in validators.EMPTY_VALUES:
raise forms.ValidationError(self.error_messages['required'])
try:
return UserProfile.objects.get(email=value)
except UserProfile.DoesNotExist:
raise forms.ValidationError(_('No user with that email.'))
def widget_attrs(self, widget):
lazy_reverse = lazy(reverse, str)
return {'class': 'email-autocomplete',
'data-src': lazy_reverse('users.ajax')}
class UserProfile(amo.models.OnChangeMixin, amo.models.ModelBase):
# nickname, firstname, & lastname are deprecated.
nickname = models.CharField(max_length=255, default='', null=True,
blank=True)
firstname = models.CharField(max_length=255, default='', blank=True)
lastname = models.CharField(max_length=255, default='', blank=True)
username = models.CharField(max_length=255, default='', unique=True)
display_name = models.CharField(max_length=255, default='', null=True,
blank=True)
password = models.CharField(max_length=255, default='')
email = models.EmailField(unique=True, null=True)
averagerating = models.CharField(max_length=255, blank=True, null=True)
bio = PurifiedField(short=False)
confirmationcode = models.CharField(max_length=255, default='',
blank=True)
deleted = models.BooleanField(default=False)
display_collections = models.BooleanField(default=False)
display_collections_fav = models.BooleanField(default=False)
emailhidden = models.BooleanField(default=True)
homepage = models.URLField(max_length=255, blank=True, default='',
verify_exists=False)
location = models.CharField(max_length=255, blank=True, default='')
notes = models.TextField(blank=True, null=True)
notifycompat = models.BooleanField(default=True)
notifyevents = models.BooleanField(default=True)
occupation = models.CharField(max_length=255, default='', blank=True)
# This is essentially a "has_picture" flag right now
picture_type = models.CharField(max_length=75, default='', blank=True)
resetcode = models.CharField(max_length=255, default='', blank=True)
resetcode_expires = models.DateTimeField(default=datetime.now, null=True,
blank=True)
sandboxshown = models.BooleanField(default=False)
last_login_ip = models.CharField(default='', max_length=45, editable=False)
last_login_attempt = models.DateTimeField(null=True, editable=False)
last_login_attempt_ip = models.CharField(default='', max_length=45,
editable=False)
failed_login_attempts = models.PositiveIntegerField(default=0,
editable=False)
user = models.ForeignKey(DjangoUser, null=True, editable=False, blank=True)
class Meta:
db_table = 'users'
def __init__(self, *args, **kw):
super(UserProfile, self).__init__(*args, **kw)
if self.username:
self.username = smart_unicode(self.username)
def __unicode__(self):
return '%s: %s' % (self.id, self.display_name or self.username)
def get_url_path(self):
return reverse('users.profile', args=[self.id])
def flush_urls(self):
urls = ['*/user/%d/' % self.id,
self.picture_url,
]
return urls
@amo.cached_property
def addons_listed(self):
"""Public add-ons this user is listed as author of."""
return self.addons.reviewed().filter(addonuser__user=self,
addonuser__listed=True)
@property
def picture_dir(self):
split_id = re.match(r'((\d*?)(\d{0,3}?))\d{1,3}$', str(self.id))
return os.path.join(settings.USERPICS_PATH, split_id.group(2) or '0',
split_id.group(1) or '0')
@property
def picture_path(self):
return os.path.join(self.picture_dir, str(self.id) + '.png')
@property
def picture_url(self):
if not self.picture_type:
return settings.MEDIA_URL + '/img/zamboni/anon_user.png'
else:
split_id = re.match(r'((\d*?)(\d{0,3}?))\d{1,3}$', str(self.id))
return settings.USERPICS_URL % (
split_id.group(2) or 0, split_id.group(1) or 0, self.id,
int(time.mktime(self.modified.timetuple())))
@amo.cached_property
def is_developer(self):
return self.addonuser_set.exists()
@amo.cached_property
def needs_tougher_password(user):
from access.acl import action_allowed_user
return (action_allowed_user(user, 'Editors', '%')
or action_allowed_user(user, 'Admin', '%'))
@property
def name(self):
return smart_unicode(self.display_name or self.username)
welcome_name = name
@property
def last_login(self):
"""Make UserProfile look more like auth.User."""
# Django expects this to be non-null, so fake a login attempt.
if not self.last_login_attempt:
self.update(last_login_attempt=datetime.now())
return self.last_login_attempt
@amo.cached_property
def reviews(self):
"""All reviews that are not dev replies."""
return self._reviews_all.filter(reply_to=None)
def anonymize(self):
log.info(u"User (%s: <%s>) is being anonymized." % (self, self.email))
self.email = None
self.password = "sha512$Anonymous$Password"
self.firstname = ""
self.lastname = ""
self.nickname = None
self.username = "Anonymous-%s" % self.id # Can't be null
self.display_name = None
self.homepage = ""
self.deleted = True
self.picture_type = ""
self.save()
def generate_confirmationcode(self):
if not self.confirmationcode:
self.confirmationcode = ''.join(random.sample(string.letters +
string.digits, 60))
return self.confirmationcode
def save(self, force_insert=False, force_update=False, using=None):
# we have to fix stupid things that we defined poorly in remora
if not self.resetcode_expires:
self.resetcode_expires = datetime.now()
# TODO POSTREMORA (maintain remora's view of user names.)
if not self.firstname or self.lastname or self.nickname:
self.nickname = self.name
delete_user = None
if self.deleted and self.user:
delete_user = self.user
self.user = None
# Delete user after saving this profile.
super(UserProfile, self).save(force_insert, force_update, using)
if self.deleted and delete_user:
delete_user.delete()
def check_password(self, raw_password):
if '$' not in self.password:
valid = (get_hexdigest('md5', '', raw_password) == self.password)
if valid:
# Upgrade an old password.
self.set_password(raw_password)
self.save()
return valid
algo, salt, hsh = self.password.split('$')
return hsh == get_hexdigest(algo, salt, raw_password)
def set_password(self, raw_password, algorithm='sha512'):
self.password = create_password(algorithm, raw_password)
def email_confirmation_code(self):
from amo.utils import send_mail
log.debug("Sending account confirmation code for user (%s)", self)
url = "%s%s" % (settings.SITE_URL,
reverse('users.confirm',
args=[self.id, self.confirmationcode]))
domain = settings.DOMAIN
t = loader.get_template('users/email/confirm.ltxt')
c = {'domain': domain, 'url': url, }
send_mail(_("Please confirm your email address"),
t.render(Context(c)), None, [self.email],
use_blacklist=False)
def log_login_attempt(self, request, successful):
"""Log a user's login attempt"""
self.last_login_attempt = datetime.now()
self.last_login_attempt_ip = commonware.log.get_remote_addr()
if successful:
log.debug(u"User (%s) logged in successfully" % self)
self.failed_login_attempts = 0
self.last_login_ip = commonware.log.get_remote_addr()
else:
log.debug(u"User (%s) failed to log in" % self)
if self.failed_login_attempts < 16777216:
self.failed_login_attempts += 1
self.save()
def create_django_user(self):
"""Make a django.contrib.auth.User for this UserProfile."""
# Reusing the id will make our life easier, because we can use the
# OneToOneField as pk for Profile linked back to the auth.user
# in the future.
self.user = DjangoUser(id=self.pk)
self.user.first_name = ''
self.user.last_name = ''
self.user.username = self.email # f
self.user.email = self.email
self.user.password = self.password
self.user.date_joined = self.created
if self.groups.filter(rules='*:*').count():
self.user.is_superuser = self.user.is_staff = True
self.user.save()
self.save()
return self.user
def mobile_collection(self):
return self.special_collection(amo.COLLECTION_MOBILE,
defaults={'slug': 'mobile', 'listed': False,
'name': _('My Mobile Add-ons')})
def favorites_collection(self):
return self.special_collection(amo.COLLECTION_FAVORITES,
defaults={'slug': 'favorites', 'listed': False,
'name': _('My Favorite Add-ons')})
def special_collection(self, type_, defaults):
from bandwagon.models import Collection
c, new = Collection.objects.get_or_create(
author=self, type=type_, defaults=defaults)
if new:
# Do an extra query to make sure this gets transformed.
c = Collection.objects.using('default').get(id=c.id)
return c
def purchase_ids(self):
return (self.addonpurchase_set.values_list('addon_id', flat=True)
.order_by('pk'))
@dispatch.receiver(models.signals.post_save, sender=UserProfile,
dispatch_uid='user.post_save')
def user_post_save(sender, instance, **kw):
if not kw.get('raw'):
from . import tasks
tasks.index_users.delay([instance.id])
@dispatch.receiver(models.signals.post_delete, sender=UserProfile,
dispatch_uid='user.post_delete')
def user_post_delete(sender, instance, **kw):
if not kw.get('raw'):
from . import tasks
tasks.unindex_users.delay([instance.id])
class UserNotification(amo.models.ModelBase):
user = models.ForeignKey(UserProfile, related_name='notifications')
notification_id = models.IntegerField()
enabled = models.BooleanField(default=False)
class Meta:
db_table = 'users_notifications'
@staticmethod
def update_or_create(update={}, **kwargs):
rows = UserNotification.objects.filter(**kwargs).update(**update)
if not rows:
update.update(dict(**kwargs))
UserNotification.objects.create(**update)
class RequestUserManager(amo.models.ManagerBase):
def get_query_set(self):
qs = super(RequestUserManager, self).get_query_set()
return qs.transform(RequestUser.transformer)
class RequestUser(UserProfile):
"""
A RequestUser has extra attributes we don't care about for normal users.
"""
objects = RequestUserManager()
def __init__(self, *args, **kw):
super(RequestUser, self).__init__(*args, **kw)
self.mobile_addons = []
self.favorite_addons = []
self.watching = []
class Meta:
proxy = True
@staticmethod
def transformer(users):
# We don't want to cache these things on every UserProfile; they're
# only used by a user attached to a request.
if not users:
return
from bandwagon.models import CollectionAddon, CollectionWatcher
SPECIAL = amo.COLLECTION_SPECIAL_SLUGS.keys()
user = users[0]
qs = CollectionAddon.objects.filter(
collection__author=user, collection__type__in=SPECIAL)
addons = dict((type_, []) for type_ in SPECIAL)
for addon, ctype in qs.values_list('addon', 'collection__type'):
addons[ctype].append(addon)
user.mobile_addons = addons[amo.COLLECTION_MOBILE]
user.favorite_addons = addons[amo.COLLECTION_FAVORITES]
user.watching = list((CollectionWatcher.objects.filter(user=user)
.values_list('collection', flat=True)))
# Touch this @cached_property so the answer is cached with the object.
user.is_developer
def _cache_keys(self):
# Add UserProfile.cache_key so RequestUser gets invalidated when the
# UserProfile is changed.
keys = super(RequestUser, self)._cache_keys()
return keys + (UserProfile(id=self.id).cache_key,)
class BlacklistedUsername(amo.models.ModelBase):
"""Blacklisted user usernames."""
username = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'users_blacklistedusername'
def __unicode__(self):
return self.username
@classmethod
def blocked(cls, username):
"""Check to see if a username is in the (cached) blacklist."""
qs = cls.objects.all()
f = lambda: [u.lower() for u in qs.values_list('username', flat=True)]
blacklist = caching.cached_with(qs, f, 'blocked')
return username.lower() in blacklist
class BlacklistedEmailDomain(amo.models.ModelBase):
"""Blacklisted user e-mail domains."""
domain = models.CharField(max_length=255, unique=True, default='',
blank=False)
def __unicode__(self):
return self.domain
@classmethod
def blocked(cls, domain):
qs = cls.objects.all()
f = lambda: list(qs.values_list('domain', flat=True))
blacklist = caching.cached_with(qs, f, 'blocked')
# because there isn't a good way to know if the domain is
# "example.com" or "example.co.jp", we'll re-construct it...
# so if it's "bad.example.co.jp", the following check the
# values in ['bad.example.co.jp', 'example.co.jp', 'co.jp']
x = domain.lower().split('.')
for d in ['.'.join(x[y:]) for y in range(len(x) - 1)]:
if d in blacklist:
return True
class PersonaAuthor(unicode):
"""Stub user until the persona authors get imported."""
@property
def id(self):
"""I don't want to change code depending on PersonaAuthor.id, so I'm
just hardcoding 0. The only code using this is flush_urls."""
return 0
@property
def name(self):
return self
display_name = name
class BlacklistedPassword(amo.models.ModelBase):
"""Blacklisted passwords"""
password = models.CharField(max_length=255, unique=True, blank=False)
def __unicode__(self):
return self.password
@classmethod
def blocked(cls, password):
return cls.objects.filter(password=password)
class UserHistory(amo.models.ModelBase):
email = models.EmailField()
user = models.ForeignKey(UserProfile, related_name='history')
class Meta:
db_table = 'users_history'
ordering = ('-created',)
@UserProfile.on_change
def watch_email(old_attr={}, new_attr={}, instance=None,
sender=None, **kw):
new_email, old_email = new_attr.get('email'), old_attr.get('email')
if old_email and new_email != old_email:
log.debug('Creating user history for user: %s' % instance.pk)
UserHistory.objects.create(email=old_email, user_id=instance.pk)
| jbalogh/zamboni | apps/users/models.py | Python | bsd-3-clause | 17,994 |
""" Defines the base class for color maps
"""
from traits.api import Enum, HasTraits, Instance
from data_range_1d import DataRange1D
class AbstractColormap(HasTraits):
"""
Abstract class for color maps, which map from scalar values to color values.
"""
# The data-space bounds of the mapper.
range = Instance(DataRange1D)
# The color depth of the colors to use.
color_depth = Enum('rgba', 'rgb')
def map_screen(self, val):
"""
map_screen(val) -> color
Maps an array of values to an array of colors. If the input array is
NxM, the returned array is NxMx3 or NxMx4, depending on the
**color_depth** setting.
"""
raise NotImplementedError()
def map_data(self, ary):
"""
map_data(ary) -> color_array
Returns an array of values containing the colors mapping to the values
in *ary*. If the input array is NxM, the returned array is NxMx3 or
NxMx4, depending on the **color_depth** setting.
"""
# XXX this seems bogus: by analogy with AbstractMapper, this should map
# colors to data values, and that will be generally hard to do well.
# no subclass implements this - CJW
raise NotImplementedError()
def map_index(self, ary):
"""
map_index(ary) -> index into color_bands
This method is like map_screen(), but it returns an array of indices
into the color map's color bands instead of an array of colors. If the
input array is NxM, then the output is NxM integer indices.
This method might not apply to all color maps. Ones that cannot
define a static set of color bands (e.g., function-defined color maps)
are not able to implement this function.
"""
raise NotImplementedError()
def map_uint8(self, val):
"""
map_uint8(val) -> rgb24 or rgba32 color
Maps a single value to a single color. Color is represented as either
length-3 or length-4 array of rgb(a) uint8 values, depending on the
**color_depth** setting.
"""
# default implementation (not efficient)
return (self.map_screen(val)*255.0).astype('uint8')
# EOF
| burnpanck/chaco | chaco/abstract_colormap.py | Python | bsd-3-clause | 2,253 |
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ('collectfast', )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('Blog API <noreply@djangocali.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[Blog API] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
| djangocali/blog-api | blog-api/config/production.py | Python | bsd-3-clause | 4,350 |
# perf script event handlers, generated by perf script -g python
# Licensed under the terms of the GNU GPL License version 2
# The common_* event handler fields are the most useful fields common to
# all events. They don't necessarily correspond to the 'common_*' fields
# in the format files. Those fields not available as handler params can
# be retrieved using Python functions of the form common_*(context).
# See the perf-trace-python Documentation for the list of available functions.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
syscalls = autodict()
def trace_begin():
pass
def trace_end():
pass
def raw_syscalls__sys_exit(event_name, context, cpu,
s, ns, pid, comm, callchain, syscall_id, args):
if pid not in syscalls or syscall_id not in syscalls[pid]:
return
latency = nsecs(s, ns) - syscalls[pid][syscall_id]
print "[%04d] %04d => %9uns" % (pid, syscall_id, latency)
def raw_syscalls__sys_enter(event_name, context, cpu,
s, ns, pid, comm, callchain, syscall_id, ret):
syscalls[pid][syscall_id] = nsecs(s, ns)
def trace_unhandled(event_name, context, event_fields_dict):
pass
| dterei/Scraps | perf/syscall-latency.py | Python | bsd-3-clause | 1,275 |
from pkg_resources import DistributionNotFound, get_distribution
def get_version():
try:
return get_distribution("naima").version
except DistributionNotFound:
# package is not installed
return "UNINSTALLED"
| zblz/naima | src/naima/version.py | Python | bsd-3-clause | 241 |
from simcore import *
comm.setADCValue(1,0,1,400)
comm.setADCValue(2,0,1,380)
comm.setADCValue(3,0,1,362)
comm.setADCValue(4,0,1,334)
comm.setADCValue(5,0,1,412)
comm.setADCValue(6,0,1,425)
comm.setADCValue(7,0,1,445)
comm.setADCValue(8,0,1,429)
| jryans/wavelet-tinyos | compasstest/adc.py | Python | bsd-3-clause | 246 |
####
#### Give a report on the "sanity" of the users and groups YAML
#### metadata files.
####
#### Example usage to analyze the usual suspects:
#### python3 sanity-check-users-and-groups.py --help
#### Get report of current problems:
#### python3 ./scripts/sanity-check-users-and-groups.py --users metadata/users.yaml --groups metadata/groups.yaml
#### Attempt to repair file (note that we go through json2yaml as libyaml output does not seem compatible with kwalify):
#### python3 ./scripts/sanity-check-users-and-groups.py --users metadata/users.yaml --groups metadata/groups.yaml --repair --output /tmp/output.json && json2yaml --depth 10 /tmp/output.json > /tmp/users.yaml
#### Check new yaml:
#### kwalify -E -f metadata/users.schema.yaml /tmp/users.yaml
#### Run report on new yaml.
#### reset && python3 ./scripts/sanity-check-users-and-groups.py --users /tmp/users.yaml --groups metadata/groups.yaml
import sys
import argparse
import logging
import yaml
import json
## Logger basic setup.
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger('sanity')
LOGGER.setLevel(logging.WARNING)
## Make sure we exit in a way that will get Jenkins's attention.
DIED_SCREAMING_P = False
def die_screaming(string):
""" Die and take our toys home. """
global DIED_SCREAMING_P
LOGGER.error(string)
DIED_SCREAMING_P = True
#sys.exit(1)
def main():
## Deal with incoming.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='More verbose output')
parser.add_argument('-u', '--users',
help='The users.yaml file to act on')
parser.add_argument('-g', '--groups',
help='The groups.yaml file to act on')
parser.add_argument("-r", "--repair", action="store_true",
help="Attempt to repair groups and update old permissions")
parser.add_argument("-o", "--output",
help="The file to output internal structure to (if repairing)")
args = parser.parse_args()
if args.verbose:
LOGGER.setLevel(logging.INFO)
LOGGER.info('Verbose: on')
## Ensure targets.
if not args.users:
die_screaming('need a users argument')
LOGGER.info('Will operate on users: ' + args.users)
if not args.groups:
die_screaming('need a groups argument')
LOGGER.info('Will operate on groups: ' + args.groups)
## Read.
users = None
with open(args.users) as mhandle:
users = yaml.safe_load(mhandle.read())
groups_linear = None
with open(args.groups) as mhandle:
groups_linear = yaml.safe_load(mhandle.read())
## Switch linear groups to lookup by URI.
groups_lookup = {}
for group in groups_linear:
groups_lookup[group['id']] = group['label']
violations = {
"uri": [],
"groups": [],
}
## Cycle through users and see if we find any violations.
for index, user in enumerate(users):
nick = user.get('nickname', '???')
## Update old authorizations type.
if args.repair:
if user.get("authorizations", {}).get("noctua-go", False):
print('REPAIR?: Update perms for ' + nick)
auths = user["authorizations"]["noctua-go"]
del user["authorizations"]["noctua-go"] # delete old way
user["authorizations"]["noctua"] = {
"go": auths
}
users[index] = user # save new back into list
## Does the user have noctua perms?
if user.get('authorizations', False):
auth = user.get('authorizations', {})
if auth.get('noctua-go', False) or \
(auth.get('noctua', False) and auth['noctua'].get('go', False)):
#print('Has perms: ' + user.get('nickname', '???'))
## 1: If so, do they have a URI?
if not user.get('uri', False):
die_screaming(user.get('nickname', '???') +\
' has no "uri"')
#print(nick + ' has no "uri"')
violations["uri"].append(nick)
else:
## 2: Is it an ORCID?
if user.get('uri', 'NIL').find('orcid') == -1:
die_screaming(user.get('nickname', '???') +\
' "uri" is not an ORCID.')
#print(nick + ' "uri" is not an ORCID.')
violations["uri"].append(nick)
## 3: If so, do they have a populated groups?
if not user.get('groups', False) or len(user["groups"]) == 0:
die_screaming(user.get('nickname', '???') +\
' has no "groups"')
#print(nick + ' has no "groups"')
if user.get("organization", False):
org = user["organization"]
print(nick + " could try org {}".format(org))
matching_groups = list(filter(lambda g: org == g["label"] or org == g["shorthand"], groups_linear))
if len(matching_groups) > 0:
print("REPAIR?: Use group: {}".format(matching_groups[0]["id"]))
if args.repair:
user["groups"] = [matching_groups[0]["id"]]
users[index] = user
else:
violations["groups"].append(nick)
else:
## 4: If so, are all entries in groups?
for gid in user.get('groups'):
if not groups_lookup.get(gid, False):
die_screaming(user.get('nickname', '???') +\
' has mistaken group entry: ' + gid)
#print(nick + ' has mistaken group entry: ' + gid)
violates_both = set(violations["uri"]).intersection(violations["groups"])
just_uri = set(violations["uri"]).difference(violates_both)
just_groups = set(violations["groups"]).difference(violates_both)
## Check privs.
for index, user in enumerate(users):
if user["nickname"] in just_uri or user["nickname"] in just_groups:
# If we have an auth with noctua-go with allow-edit set to True
if user.get("authorizations", {}).get("noctua", {}).get("go", {}).get("allow-edit", False):
print("REPAIR?: Revoke {} noctua-go edit privileges.".format(user["nickname"]))
if args.repair:
del user["authorizations"]
users[index] = user
print("\nNo URI, or no ORCID:")
print("===================")
print("\n".join(just_uri))
print("\nNo Groups:")
print("===================")
print("\n".join(just_groups))
print("\nBoth Bad:")
print("===================")
print("\n".join(violates_both))
#print(json.dumps(users))
#print(yaml.dump(users, default_flow_style=False))
#yaml.dump(data, default_flow_style=False)
if args.output:
with open(args.output, 'w+') as fhandle:
fhandle.write(json.dumps(users, sort_keys=True, indent=4))
## TODO: implement hard checks above later.
if DIED_SCREAMING_P:
print('Errors happened, alert the sheriff.')
sys.exit(1)
else:
print('Non-failing run.')
## You saw it coming...
if __name__ == '__main__':
main()
| geneontology/go-site | scripts/sanity-check-users-and-groups.py | Python | bsd-3-clause | 7,704 |
#!/usr/bin/env python
import os
import sys
try:
# For local development in a virtualenv:
from funfactory import manage
except ImportError:
# Production:
# Add a temporary path so that we can import the funfactory
tmp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'vendor', 'src', 'funfactory')
sys.path.append(tmp_path)
from funfactory import manage
# Let the path magic happen in setup_environ() !
sys.path.remove(tmp_path)
manage.setup_environ(__file__)
if __name__ == "__main__":
manage.main()
| satdav/mozillians | manage.py | Python | bsd-3-clause | 590 |
"""HaloEndpoint class"""
import cloudpassage.sanity as sanity
from .utility import Utility as utility
from .http_helper import HttpHelper
class HaloEndpoint(object):
"""Base class inherited by other specific HaloEndpoint classes."""
default_endpoint_version = 1
def __init__(self, session, **kwargs):
self.session = session
self.max_pages = 100
self.set_endpoint_version(kwargs)
def set_endpoint_version(self, kwargs):
"""Validate and set the endpoint version."""
if "endpoint_version" in kwargs:
version = kwargs["endpoint_version"]
if isinstance(version, int):
self.endpoint_version = version
else:
raise TypeError("Bad endpoint version {}".format(version))
else:
self.endpoint_version = self.default_endpoint_version
@classmethod
def endpoint(cls):
"""Not implemented at this level. Raises exception."""
raise NotImplementedError
@classmethod
def pagination_key(cls):
"""Not implemented at this level. Raises exception."""
raise NotImplementedError
@classmethod
def object_key(cls):
"""Not implemented at this level. Raises exception."""
raise NotImplementedError
def list_all(self, **kwargs):
"""Lists all objects of this type.
Returns:
list: List of objects (represented as dictionary-type objects)
Note:
This method supports query parameters via keyword arguments.
"""
request = HttpHelper(self.session)
params = utility.sanitize_url_params(kwargs)
response = request.get_paginated(self.endpoint(),
self.pagination_key(), self.max_pages,
params=params)
return response
def describe(self, object_id):
"""Get the detailed configuration by ID
Args:
object_id (str): ID to retrieve detailed configuration information
for
Returns:
dict: dictionary object representing the entire object.
"""
request = HttpHelper(self.session)
describe_endpoint = "%s/%s" % (self.endpoint(), object_id)
return request.get(describe_endpoint)[self.object_key()]
def create(self, object_body):
"""Create from JSON document.
Returns the ID of the new object
"""
request = HttpHelper(self.session)
request_body = utility.policy_to_dict(object_body)
return request.post(self.endpoint(),
request_body)[self.object_key()]["id"]
def delete(self, object_id):
"""Delete by ID. Success returns None"""
sanity.validate_object_id(object_id)
request = HttpHelper(self.session)
delete_endpoint = "%s/%s" % (self.endpoint(), object_id)
request.delete(delete_endpoint)
return None
def update(self, object_body):
"""Update. Success returns None"""
request = HttpHelper(self.session)
request_body = utility.policy_to_dict(object_body)
object_id = request_body[self.object_key()]["id"]
sanity.validate_object_id(object_id)
update_endpoint = "%s/%s" % (self.endpoint(), object_id)
request.put(update_endpoint, request_body)
return None
| cloudpassage/cloudpassage-halo-python-sdk | cloudpassage/halo_endpoint.py | Python | bsd-3-clause | 3,416 |
#(c) 2016-2018 by Authors
#This file is a part of Flye program.
#Released under the BSD license (see LICENSE file)
"""
Created on Wed Jan 4 03:50:31 2017
@author: jeffrey_yuan
"""
from __future__ import absolute_import
from __future__ import division
import os
import logging
from itertools import combinations, product
import copy
import multiprocessing, signal
import flye.polishing.alignment as flye_aln
from flye.utils.sam_parser import SynchronizedSamReader, Alignment
import flye.utils.fasta_parser as fp
import flye.config.py_cfg as config
import flye.polishing.polish as pol
import flye.trestle.divergence as div
import flye.trestle.trestle_config as trestle_config
from flye.six.moves import range
from flye.six.moves import zip
logger = logging.getLogger()
def resolve_repeats(args, trestle_dir, repeats_info, summ_file,
resolved_repeats_seqs):
all_file_names = define_file_names()
all_labels, initial_file_names = all_file_names[0], all_file_names[2]
all_resolved_reps_dict = {}
all_summaries = []
init_summary(summ_file)
#1. Process repeats from graph - generates a folder for each repeat
logger.debug("Finding unbridged repeats")
process_outputs = process_repeats(args.reads, repeats_info,
trestle_dir, all_labels,
initial_file_names)
repeat_list, repeat_edges, all_edge_headers = process_outputs
logger.info("Simple unbridged repeats: %d", len(repeat_list))
#if not repeat_list:
# return
#Resolve every repeat in a separate thread
def _thread_worker(func_args, log_file, results_queue, error_queue):
try:
#each thred logs to a separate file
log_formatter = \
logging.Formatter("[%(asctime)s] %(name)s: %(levelname)s: "
"%(message)s", "%Y-%m-%d %H:%M:%S")
file_handler = logging.FileHandler(log_file, mode="a")
file_handler.setFormatter(log_formatter)
for handler in logger.handlers[:]:
logger.removeHandler(handler)
logger.addHandler(file_handler)
result = resolve_each_repeat(*func_args)
results_queue.put(result)
except Exception as e:
error_queue.put(e)
job_chunks = [repeat_list[i:i + args.threads]
for i in range(0, len(repeat_list), args.threads)]
for job_chunk in job_chunks:
manager = multiprocessing.Manager()
results_queue = manager.Queue()
error_queue = manager.Queue()
repeat_threads = max(1, args.threads // len(job_chunk))
orig_sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
threads = []
for rep_id in sorted(job_chunk):
func_args = (rep_id, repeat_edges, all_edge_headers, args, trestle_dir,
repeats_info, all_file_names, repeat_threads)
log_file = os.path.join(trestle_dir,
"repeat_{0}".format(rep_id), "log.txt")
threads.append(multiprocessing.Process(target=_thread_worker,
args=(func_args, log_file,
results_queue, error_queue)))
signal.signal(signal.SIGINT, orig_sigint)
for t in threads:
t.start()
try:
for t in threads:
t.join()
if t.exitcode == -9:
logger.error("Looks like the system ran out of memory")
if t.exitcode != 0:
raise Exception("One of the processes exited with code: {0}"
.format(t.exitcode))
except KeyboardInterrupt:
for t in threads:
t.terminate()
raise
while not error_queue.empty():
logger.warning("Non-critical error in trestle thread: " + str(error_queue.get()))
#if not error_queue.empty():
# raise error_queue.get()
while not results_queue.empty():
resolved_dict, summary_list = results_queue.get()
all_resolved_reps_dict.update(resolved_dict)
all_summaries.extend(summary_list)
fp.write_fasta_dict(all_resolved_reps_dict, resolved_repeats_seqs)
num_resolved = 0
for summ_items in all_summaries:
if summ_items[6]:
num_resolved += 1
update_summary(summ_items, summ_file)
logger.info("Resolved: %d", num_resolved)
def resolve_each_repeat(rep_id, repeat_edges, all_edge_headers, args,
trestle_dir, repeats_info, all_file_names,
num_threads):
SUB_THRESH = trestle_config.vals["sub_thresh"]
DEL_THRESH = trestle_config.vals["del_thresh"]
INS_THRESH = trestle_config.vals["ins_thresh"]
MAX_ITER = trestle_config.vals["max_iter"]
MIN_ALN_RATE = trestle_config.vals["min_aln_rate"]
NUM_POL_ITERS = trestle_config.vals["num_pol_iters"]
ORIENT_CONFIG = trestle_config.vals["orientations_to_run"]
zero_it = 0
(all_labels, pol_dir_names, initial_file_names,
pre_file_names, div_file_names, aln_names,
middle_file_names, output_file_names) = all_file_names
repeat_label, side_labels = all_labels
pol_temp_name, pol_ext_name, pol_cons_name = pol_dir_names
(template_name, extended_name, repeat_reads_name,
pre_partitioning_name) = initial_file_names
pre_edge_reads_name, pre_read_aln_name, partitioning_name = pre_file_names
div_freq_name, div_pos_name, div_summ_name = div_file_names
(reads_template_aln_name, cons_temp_aln_name,
cut_cons_temp_aln_name, reads_cons_aln_name) = aln_names
(confirmed_pos_name, edge_reads_name,
cut_cons_name, cons_vs_cons_name) = middle_file_names
(side_stats_name, int_stats_name, int_confirmed_pos_name,
resolved_rep_name, res_vs_res_name) = output_file_names
logger.info("Resolving repeat %d: %s",
rep_id, repeats_info[rep_id].repeat_path)
repeat_dir = os.path.join(trestle_dir,
repeat_label.format(rep_id))
run_orientations = []
if ORIENT_CONFIG == "forward":
run_orientations = [("forward", rep_id)]
elif ORIENT_CONFIG == "reverse":
run_orientations = [("reverse", -rep_id)]
elif ORIENT_CONFIG == "both":
run_orientations = [("forward", rep_id), ("reverse", -rep_id)]
repeat_bridged = False
resolved_dict = {}
summary_list = []
for orientation, rep in run_orientations:
logger.debug("Orientation: " + orientation)
orient_dir = os.path.join(repeat_dir, orientation)
template = os.path.join(orient_dir, template_name)
extended = os.path.join(orient_dir, extended_name)
repeat_reads = os.path.join(orient_dir, repeat_reads_name)
term_bool = {s:False for s in side_labels}
#2. Polish template and extended templates
logger.debug("Polishing templates")
pol_temp_dir = os.path.join(orient_dir, pol_temp_name)
if not os.path.isdir(pol_temp_dir):
os.mkdir(pol_temp_dir)
polished_template, _ = \
pol.polish(template, [repeat_reads], pol_temp_dir, NUM_POL_ITERS,
num_threads, args.platform, output_progress=False)
if not os.path.getsize(polished_template):
for side in side_labels:
term_bool[side] = True
polished_extended = {}
pol_ext_dir = os.path.join(orient_dir, pol_ext_name)
for side in side_labels:
for edge_id in repeat_edges[rep][side]:
if not os.path.isdir(pol_ext_dir.format(side, edge_id)):
os.mkdir(pol_ext_dir.format(side, edge_id))
pol_output, _ = \
pol.polish(extended.format(side, edge_id), [repeat_reads],
pol_ext_dir.format(side, edge_id), NUM_POL_ITERS,
num_threads, args.platform,
output_progress=False)
polished_extended[(side, edge_id)] = pol_output
if not os.path.getsize(pol_output):
term_bool[side] = True
#3. Find divergent positions
logger.debug("Estimating divergence")
frequency_path = os.path.join(orient_dir, div_freq_name)
position_path = os.path.join(orient_dir, div_pos_name)
summary_path = os.path.join(orient_dir, div_summ_name)
#logger.info("running Minimap2")
alignment_file = os.path.join(orient_dir, reads_template_aln_name)
template_len = 0.0
if os.path.getsize(polished_template):
flye_aln.make_alignment(polished_template, [repeat_reads],
num_threads, orient_dir, args.platform,
alignment_file, reference_mode=True, sam_output=True)
template_info = flye_aln.get_contigs_info(polished_template)
template_len = template_info[str(rep)].length
logger.debug("Finding tentative divergence positions")
div.find_divergence(alignment_file, polished_template,
template_info, frequency_path, position_path,
summary_path, MIN_ALN_RATE,
args.platform, num_threads,
SUB_THRESH, DEL_THRESH, INS_THRESH)
read_endpoints = find_read_endpoints(alignment_file,
polished_template)
avg_cov = find_coverage(frequency_path)
#4. Initialize paths, variables, and stats
pre_partitioning = os.path.join(orient_dir, pre_partitioning_name)
pre_edge_reads = os.path.join(orient_dir, pre_edge_reads_name)
pre_read_align = os.path.join(orient_dir, pre_read_aln_name)
partitioning = os.path.join(orient_dir, partitioning_name)
cons_align = os.path.join(orient_dir, cons_temp_aln_name)
cut_cons_align = os.path.join(orient_dir, cut_cons_temp_aln_name)
read_align = os.path.join(orient_dir, reads_cons_aln_name)
confirmed_pos_path = os.path.join(orient_dir, confirmed_pos_name)
edge_reads = os.path.join(orient_dir, edge_reads_name)
cut_cons = os.path.join(orient_dir, cut_cons_name)
polishing_dir = os.path.join(orient_dir, pol_cons_name)
cons_vs_cons = os.path.join(orient_dir, cons_vs_cons_name)
side_stats = os.path.join(orient_dir, side_stats_name)
integrated_stats = os.path.join(orient_dir, int_stats_name)
int_confirmed_path = os.path.join(orient_dir,
int_confirmed_pos_name)
resolved_rep_path = os.path.join(orient_dir, resolved_rep_name)
res_vs_res = os.path.join(orient_dir, res_vs_res_name)
#5. Re-align reads to extended and initialize partitioning 0
logger.debug("Checking initial set of edge reads")
for side in side_labels:
for edge_id in repeat_edges[rep][side]:
write_edge_reads(zero_it, side, edge_id,
repeat_reads,
pre_partitioning.format(side),
pre_edge_reads.format(side, edge_id))
flye_aln.make_alignment(polished_extended[(side, edge_id)],
[pre_edge_reads.format(side, edge_id)],
num_threads, orient_dir, args.platform,
pre_read_align.format(side, edge_id),
reference_mode=True, sam_output=True)
init_partitioning(repeat_edges[rep][side],
side, pre_partitioning.format(side),
pre_read_align, polished_extended,
partitioning.format(zero_it, side))
cut_consensus = {}
side_it = {s:0 for s in side_labels}
iter_pairs = []
edge_below_cov = {s:False for s in side_labels}
dup_part = {s:False for s in side_labels}
prev_partitionings = {s:set() for s in side_labels}
#6. Initialize stats
for side in side_labels:
edge_below_cov[side] = init_side_stats(
rep, side, repeat_edges, args.min_overlap,
position_path,
partitioning.format(zero_it, side),
prev_partitionings[side],
template_len,
side_stats.format(side))
init_int_stats(rep, repeat_edges, zero_it, position_path,
partitioning, repeat_reads, template_len,
avg_cov, integrated_stats)
#7. Start iterations
logger.debug("Iterative procedure")
for it in range(1, MAX_ITER + 1):
both_break = True
for side in side_labels:
if (edge_below_cov[side] or dup_part[side] or
term_bool[side]):
continue
else:
logger.debug("Iteration %d, '%s'", it, side)
both_break = False
for edge_id in sorted(repeat_edges[rep][side]):
#7a. Call consensus on partitioned reads
pol_con_dir = polishing_dir.format(
it, side, edge_id)
curr_reads = edge_reads.format(it, side, edge_id)
write_edge_reads(
it, side, edge_id,
repeat_reads,
partitioning.format(it - 1, side),
curr_reads)
curr_extended = polished_extended[(side, edge_id)]
logger.debug("\tPolishing '%s %s' reads", side, edge_id)
if not os.path.isdir(pol_con_dir):
os.mkdir(pol_con_dir)
pol_con_out, _ = \
pol.polish(curr_extended, [curr_reads], pol_con_dir,
NUM_POL_ITERS, num_threads, args.platform,
output_progress=False)
#7b. Cut consensus where coverage drops
cutpoint = locate_consensus_cutpoint(
side, read_endpoints,
curr_reads)
if os.path.getsize(pol_con_out):
cons_al_file = cons_align.format(it, side, edge_id)
flye_aln.make_alignment(polished_template, [pol_con_out],
num_threads, orient_dir,
args.platform, cons_al_file,
reference_mode=True, sam_output=True)
else:
term_bool[side] = True
curr_cut_cons = cut_cons.format(it, side, edge_id)
cut_consensus[(it, side, edge_id)] = curr_cut_cons
if os.path.isfile(cons_al_file):
truncate_consensus(side, cutpoint, cons_al_file,
polished_template,
pol_con_out, curr_cut_cons)
else:
term_bool[side] = True
#7c. Align consensuses to template
# and reads to consensuses
if os.path.isfile(curr_cut_cons):
cut_cons_al_file = cut_cons_align.format(it, side, edge_id)
flye_aln.make_alignment(polished_template, [curr_cut_cons],
num_threads, orient_dir,
args.platform, cut_cons_al_file,
reference_mode=True, sam_output=True)
read_al_file = read_align.format(it, side, edge_id)
flye_aln.make_alignment(curr_cut_cons, [repeat_reads],
num_threads, orient_dir,
args.platform, read_al_file,
reference_mode=True, sam_output=True)
else:
term_bool[side] = True
#7d. Partition reads using divergent positions
logger.debug("\tPartitioning '%s' reads", side)
partition_reads(repeat_edges[rep][side], it, side,
position_path, cut_cons_align,
polished_template, read_align,
cut_consensus, confirmed_pos_path,
partitioning, all_edge_headers[rep])
#7e. Write stats file for current iteration
edge_pairs = sorted(combinations(repeat_edges[rep][side],
2))
for edge_one, edge_two in edge_pairs:
cons_one = cut_consensus[(it, side, edge_one)]
cons_two = cut_consensus[(it, side, edge_two)]
if (not os.path.isfile(cons_one) or
not os.path.isfile(cons_two)):
continue
cons_cons_file = cons_vs_cons.format(
it, side, edge_one,
it, side, edge_two)
flye_aln.make_alignment(cons_two, [cons_one],
num_threads, orient_dir,
args.platform, cons_cons_file,
reference_mode=True, sam_output=True)
side_stat_outputs = update_side_stats(
repeat_edges[rep][side], it, side,
cut_cons_align, polished_template,
confirmed_pos_path.format(it, side),
partitioning.format(it, side),
prev_partitionings[side],
side_stats.format(side))
edge_below_cov[side], dup_part[side] = side_stat_outputs
side_it[side] = it
iter_pairs.append((side_it[side_labels[0]],
side_it[side_labels[1]]))
update_int_stats(rep, repeat_edges, side_it, cut_cons_align,
polished_template,
template_len,
confirmed_pos_path, int_confirmed_path,
partitioning, integrated_stats)
if both_break:
break
#8. Finalize stats files
logger.debug("Writing stats files")
for side in side_labels:
finalize_side_stats(repeat_edges[rep][side], side_it[side],
side, cut_cons_align, polished_template,
cons_vs_cons, cut_consensus,
confirmed_pos_path.format(side_it[side],
side),
partitioning.format(side_it[side], side),
edge_below_cov[side],
dup_part[side], term_bool[side],
side_stats.format(side))
final_int_outputs = finalize_int_stats(rep, repeat_edges, side_it,
cut_cons_align,
polished_template,
template_len, cons_vs_cons,
cut_consensus,
int_confirmed_path,
partitioning,
integrated_stats,
resolved_rep_path)
bridged, repeat_seqs, summ_vals = final_int_outputs
#9. Generate summary and resolved repeat file
logger.debug("Generating summary and resolved repeat file")
avg_div = 0.0
both_resolved_present = False
if bridged:
res_inds = list(range(len(repeat_edges[rep]["in"])))
for res_one, res_two in sorted(combinations(res_inds, 2)):
res_one_path = resolved_rep_path.format(rep, res_one)
res_two_path = resolved_rep_path.format(rep, res_two)
if (os.path.isfile(res_one_path) and
os.path.isfile(res_two_path)):
both_resolved_present = True
repeat_bridged = True
flye_aln.make_alignment(res_two_path, [res_one_path],
num_threads, orient_dir,
args.platform,
res_vs_res.format(rep, res_one, res_two),
reference_mode=True, sam_output=True)
if both_resolved_present:
avg_div = int_stats_postscript(rep, repeat_edges,
integrated_stats,
resolved_rep_path,
res_vs_res)
if both_resolved_present:
resolved_dict.update(repeat_seqs)
summary_list.append((rep, repeats_info[rep].repeat_path, template_len,
avg_cov, summ_vals, avg_div,
both_resolved_present))
remove_unneeded_files(repeat_edges, rep, side_labels, side_it,
orient_dir, template, extended, pol_temp_dir,
pol_ext_dir, pre_edge_reads,
pre_partitioning, pre_read_align,
partitioning, cons_align, cut_cons_align,
read_align, confirmed_pos_path, edge_reads,
cut_cons, polishing_dir, cons_vs_cons,
int_confirmed_path, repeat_reads,
frequency_path, alignment_file,
NUM_POL_ITERS, iter_pairs)
if repeat_bridged:
logger.info("Repeat successfully resolved")
else:
logger.info("Repeat not resolved")
return resolved_dict, summary_list
def define_file_names():
#Defining directory and file names for trestle output
repeat_label = "repeat_{0}"
side_labels = ["in", "out"]
all_labels = repeat_label, side_labels
pol_temp_name = "Polishing.Template"
pol_ext_name = "Polishing.Extended.{0}.{1}"
pol_cons_name = "Polishing.Consensus.{0}.{1}.{2}"
pol_dir_names = pol_temp_name, pol_ext_name, pol_cons_name
template_name = "template.fasta"
extended_name = "extended_templates.{0}.{1}.fasta"
repeat_reads_name = "repeat_reads.fasta"
pre_partitioning_name = "pre_partitioning.{0}.txt"
initial_file_names = (template_name, extended_name, repeat_reads_name,
pre_partitioning_name)
pre_edge_reads_name = "pre_edge_reads.{0}.{1}.txt"
pre_read_aln_name = "pre_edge_reads.{0}.{1}.vs.extended.minimap.bam"
partitioning_name = "partitioning.{0}.{1}.txt"
pre_file_names = pre_edge_reads_name, pre_read_aln_name, partitioning_name
div_freq_name = "divergence_frequencies.txt"
div_pos_name = "divergent_positions.txt"
div_summ_name = "divergence_summary.txt"
div_file_names = div_freq_name, div_pos_name, div_summ_name
reads_template_aln_name = "reads.vs.template.minimap.bam"
cons_temp_aln_name = "uncut_consensus.{0}.{1}.{2}.vs.template.minimap.bam"
cut_cons_temp_aln_name = "consensus.{0}.{1}.{2}.vs.template.minimap.bam"
reads_cons_aln_name = "reads.vs.consensus.{0}.{1}.{2}.minimap.bam"
aln_names = (reads_template_aln_name, cons_temp_aln_name,
cut_cons_temp_aln_name, reads_cons_aln_name)
confirmed_pos_name = "confirmed_positions.{0}.{1}.txt"
edge_reads_name = "edge_reads.{0}.{1}.{2}.fasta"
cut_cons_name = "consensus.{0}.{1}.{2}.fasta"
cons_vs_cons_name = "".join(["consensus.{0}.{1}.{2}.vs.",
"consensus.{3}.{4}.{5}.minimap.bam"])
middle_file_names = (confirmed_pos_name, edge_reads_name,
cut_cons_name, cons_vs_cons_name)
side_stats_name = "stats_from_{0}.txt"
int_stats_name = "stats_integrated.txt"
int_confirmed_pos_name = "integrated_confirmed_positions.{0}.{1}.txt"
resolved_rep_name = "resolved_repeat_{0}.copy.{1}.fasta"
res_vs_res_name = "resolved_repeat_{0}.copy.{1}.vs.{2}.minimap.bam"
output_file_names = (side_stats_name, int_stats_name,
int_confirmed_pos_name, resolved_rep_name,
res_vs_res_name)
all_file_names = (all_labels, pol_dir_names, initial_file_names,
pre_file_names, div_file_names, aln_names,
middle_file_names, output_file_names)
return all_file_names
#Process Repeats functions
class ProcessingException(Exception):
pass
def process_repeats(reads, repeats_dict, work_dir, all_labels,
initial_file_names):
"""
Generates repeat dirs and files given reads, repeats_dump and
graph_edges files. Only returns repeats between min_mult and max_mult
"""
if not repeats_dict:
return [], {}, {}
#creates a separate process to make sure that
#read dictionary is released after the function exits
manager = multiprocessing.Manager()
return_queue = manager.Queue()
orig_sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
thread = multiprocessing.Process(target=_process_repeats_impl,
args=(reads, repeats_dict,
work_dir, all_labels,
initial_file_names,
return_queue))
signal.signal(signal.SIGINT, orig_sigint)
thread.start()
try:
thread.join()
if thread.exitcode == -9:
logger.error("Looks like the system ran out of memory")
if thread.exitcode != 0:
raise Exception("One of the processes exited with code: {0}"
.format(thread.exitcode))
except KeyboardInterrupt:
thread.terminate()
raise
return return_queue.get()
def _process_repeats_impl(reads, repeats_dict, work_dir, all_labels,
initial_file_names, return_queue):
"""
This function is called in a separate process
"""
MIN_MULT = trestle_config.vals["min_mult"]
MAX_MULT = trestle_config.vals["max_mult"]
FLANKING_LEN = trestle_config.vals["flanking_len"]
ORIENT_CONFIG = trestle_config.vals["orientations_to_run"]
repeat_label, side_labels = all_labels
(template_name, extended_name, repeat_reads_name,
pre_partitioning_name) = initial_file_names
reads_dict = {}
for read_file in reads:
reads_dict.update(fp.read_sequence_dict(read_file))
#orig_graph = fp.read_sequence_dict(graph_edges)
#graph_dict = {int(h.split('_')[1]):orig_graph[h] for h in orig_graph}
if not reads_dict:
raise ProcessingException("No reads found from {0}".format(reads))
#if not graph_dict:
# raise ProcessingException("No edges found from {0}".format(
# graph_edges))
repeat_list = []
repeat_edges = {}
all_edge_headers = {}
for rep in sorted(repeats_dict, reverse=True):
#Checks multiplicity of repeat and presence of reverse strand
#One run processes both forward and reverse strand of repeat
if rep <= 0:
continue
valid_repeat = True
if -rep not in repeats_dict:
logger.debug("Repeat %s missing reverse strand", rep)
valid_repeat = False
elif (repeats_dict[rep].multiplicity < MIN_MULT or
repeats_dict[rep].multiplicity > MAX_MULT or
repeats_dict[-rep].multiplicity < MIN_MULT or
repeats_dict[-rep].multiplicity > MAX_MULT):
logger.debug("Repeat %s multiplicity not in range: %s",
rep, repeats_dict[rep].multiplicity)
valid_repeat = False
#if rep not in graph_dict:
# logger.debug("Repeat {0} missing from graph file".format(rep))
# valid_repeat = False
if not valid_repeat:
continue
#Makes repeat dirs
repeat_dir = os.path.join(work_dir, repeat_label.format(rep))
if not os.path.isdir(repeat_dir):
os.mkdir(repeat_dir)
repeat_list.append(rep)
run_orientations = []
if ORIENT_CONFIG == "forward":
run_orientations = [("forward", rep)]
elif ORIENT_CONFIG == "reverse":
run_orientations = [("reverse", -rep)]
elif ORIENT_CONFIG == "both":
run_orientations = [("forward", rep), ("reverse", -rep)]
for curr_label, curr_rep in run_orientations:
orient_path = os.path.join(repeat_dir, curr_label)
if not os.path.isdir(orient_path):
os.mkdir(orient_path)
template_path = os.path.join(orient_path, template_name)
extended_path = os.path.join(orient_path, extended_name)
repeat_reads_path = os.path.join(orient_path, repeat_reads_name)
partitioning_path = os.path.join(orient_path,
pre_partitioning_name)
in_label = side_labels[0]
out_label = side_labels[1]
repeat_edges[curr_rep] = {in_label:[], out_label:[]}
#(mult, all_reads_list, inputs_dict,
# outputs_dict) = repeats_dict[curr_rep]
#mult = repeats_dict[curr_rep].multiplicity
all_reads_list = repeats_dict[curr_rep].all_reads
inputs_dict = repeats_dict[curr_rep].in_reads
outputs_dict = repeats_dict[curr_rep].out_reads
template_dict = {}
extended_dicts = {}
repeat_reads_dict = {}
#Partitioning parts: id_num, Partitioned/Tied/None,
#edge_id, top_score, total_score, Header
partitioning = {in_label:[], out_label:[]}
read_id = 0
template_seq = repeats_dict[curr_rep].sequences["template"]
#if curr_label == "reverse":
# template_seq = fp.reverse_complement(graph_dict[rep])
template_dict[curr_rep] = template_seq
all_edge_headers[curr_rep] = {}
out_headers = set()
#Headers will be in the form -h or +h,
#edge_dict is in the form >[Input,Output]_edge##_h,
#rev_comp of read will be written if the header is -h
for edge_id in inputs_dict:
repeat_edges[curr_rep][in_label].append(edge_id)
extended_dicts[(in_label, edge_id)] = {}
headers = inputs_dict[edge_id]
for header in headers:
if (not header) or (header[0] != '+' and header[0] != '-'):
raise ProcessingException(
"Input read format not recognized: {0}".format(
header))
if header[1:] not in reads_dict:
raise ProcessingException(
"Read header {0} not in any of {1}".format(
header[1:], reads))
if header[1:] not in all_edge_headers[curr_rep]:
status_label = "Partitioned"
edge_label = str(edge_id)
score = 1
total_score = 0
partitioning[in_label].append((read_id, status_label,
edge_label, score,
total_score,
header[1:]))
all_edge_headers[curr_rep][header[1:]] = read_id
read_id += 1
extend_in_header = "Extended_Template_Input_{0}".format(
edge_id)
#if edge_id > 0:
# edge_seq = graph_dict[edge_id]
#elif edge_id < 0:
# edge_seq = fp.reverse_complement(graph_dict[-edge_id])
edge_seq = repeats_dict[curr_rep].sequences[edge_id]
extended_seq = edge_seq[-FLANKING_LEN:]
extended_dicts[(in_label, edge_id)][extend_in_header] = (
extended_seq + template_seq)
for edge_id in outputs_dict:
repeat_edges[curr_rep][out_label].append(edge_id)
extended_dicts[(out_label, edge_id)] = {}
headers = outputs_dict[edge_id]
for header in headers:
if (not header) or (header[0] != '+' and header[0] != '-'):
raise ProcessingException(
"Output read format not recognized: {0}".format(
header))
if header[1:] not in reads_dict:
raise ProcessingException(
"Read header {0} not in any of {1}".format(
header[1:], reads))
curr_read_id = read_id
if header[1:] not in all_edge_headers[curr_rep]:
status_label = "None"
edge_label = "NA"
score = 0
total_score = 0
partitioning[in_label].append((read_id, status_label,
edge_label, score,
total_score,
header[1:]))
all_edge_headers[curr_rep][header[1:]] = read_id
read_id += 1
else:
curr_read_id = all_edge_headers[curr_rep][header[1:]]
if header[1:] not in out_headers:
status_label = "Partitioned"
edge_label = str(edge_id)
score = 1
total_score = 0
partitioning[out_label].append((curr_read_id,
status_label,
edge_label, score,
total_score,
header[1:]))
out_headers.add(header[1:])
extend_out_header = "Extended_Template_Output_{0}".format(
edge_id)
#if edge_id > 0:
# edge_seq = graph_dict[edge_id]
#elif edge_id < 0:
# edge_seq = fp.reverse_complement(graph_dict[-edge_id])
edge_seq = repeats_dict[curr_rep].sequences[edge_id]
extended_seq = edge_seq[:FLANKING_LEN]
extended_dicts[(out_label, edge_id)][extend_out_header] = (
template_seq + extended_seq)
#Need to reiterate over in_headers to add in_headers to
#out-partitioning while avoiding double-adding ones in both
for edge_id in inputs_dict:
headers = inputs_dict[edge_id]
for header in headers:
if header[1:] not in out_headers:
curr_read_id = all_edge_headers[curr_rep][header[1:]]
status_label = "None"
edge_label = "NA"
score = 0
total_score = 0
partitioning[out_label].append((curr_read_id,
status_label,
edge_label, score,
total_score,
header[1:]))
for header in all_reads_list:
if (not header) or (header[0] != '+' and header[0] != '-'):
raise ProcessingException(
"All reads format not recognized: {0}".format(header))
if header[1:] not in reads_dict:
raise ProcessingException(
"Read header {0} not in any of {1}".format(
header[1:], reads))
seq = reads_dict[header[1:]]
if header[0] == '-':
seq = fp.reverse_complement(seq)
repeat_reads_dict[header[1:]] = seq
curr_read_id = read_id
if header[1:] not in all_edge_headers[curr_rep]:
all_edge_headers[curr_rep][header[1:]] = read_id
read_id += 1
status_label = "None"
edge_label = "NA"
score = 0
total_score = 0
partitioning[in_label].append((curr_read_id, status_label,
edge_label, score,
total_score, header[1:]))
status_label = "None"
edge_label = "NA"
score = 0
total_score = 0
partitioning[out_label].append((curr_read_id, status_label,
edge_label, score,
total_score, header[1:]))
if template_dict and list(template_dict.values())[0]:
fp.write_fasta_dict(template_dict, template_path)
for edge in extended_dicts:
if extended_dicts[edge] and list(extended_dicts[edge].values())[0]:
extended_edge_path = extended_path.format(edge[0],
edge[1])
fp.write_fasta_dict(extended_dicts[edge],
extended_edge_path)
if repeat_reads_dict and list(repeat_reads_dict.values())[0]:
fp.write_fasta_dict(repeat_reads_dict, repeat_reads_path)
for side in side_labels:
_write_partitioning_file(partitioning[side],
partitioning_path.format(side))
if not template_dict:
raise ProcessingException("No template {0} found".format(
curr_rep))
for edge in extended_dicts:
if not template_dict:
raise ProcessingException(
"No extended template {0} {1} {2} found".format(
curr_rep, edge[0], edge[1]))
if not repeat_reads_dict:
raise ProcessingException("No repeat reads {0} found".format(
curr_rep))
for side in side_labels:
if not partitioning[side]:
raise ProcessingException(
"Empty partitioning file {0}".format(
partitioning_path.format(side)))
return_queue.put((repeat_list, repeat_edges, all_edge_headers))
def _write_partitioning_file(part_list, part_path):
with open(part_path, "w") as f:
header_labels = ["Read_ID", "Status", "Edge", "Top Score",
"Total Score", "Header"]
spaced_header = ["{:11}".format(h) for h in header_labels]
f.write("\t".join(spaced_header))
f.write("\n")
for read_label in sorted(part_list):
spaced_label = ["{:11}".format(h) for h in read_label]
f.write("\t".join(spaced_label))
f.write("\n")
def _read_partitioning_file(partitioning_file):
part_list = []
with open(partitioning_file, "r") as f:
for i, line in enumerate(f):
if i > 0:
line = line.strip()
tokens = [t.strip() for t in line.split("\t")]
for int_ind in [0, 3, 4]:
tokens[int_ind] = int(tokens[int_ind])
part_list.append(tuple(tokens))
return part_list
def find_coverage(frequency_file):
coverage = 0.0
if os.path.isfile(frequency_file):
header, freqs = div.read_frequency_path(frequency_file)
cov_ind = header.index("Cov")
all_covs = [f[cov_ind] for f in freqs]
coverage = _mean(all_covs)
#print min(all_covs), _mean(all_covs), max(all_covs)
return coverage
def write_edge_reads(it, side, edge_id, all_reads, partitioning, out_file):
all_reads_dict = fp.read_sequence_dict(all_reads)
part_list = _read_partitioning_file(partitioning)
edge_header_name = "Read_{0}|Iter_{1}|Side_{2}|Edge_{3}|{4}"
edge_reads = {}
for read_id, status, edge, _, _, header in part_list:
if status == "Partitioned" and edge != "NA" and int(edge) == edge_id:
edge_seq = all_reads_dict[header]
edge_header = edge_header_name.format(read_id, it,
side, edge_id, header)
edge_reads[edge_header] = edge_seq
if edge_reads and list(edge_reads.values())[0]:
fp.write_fasta_dict(edge_reads, out_file)
def init_partitioning(edges, side, pre_partitioning, pre_read_align, extended,
partitioning):
FLANKING_LEN = trestle_config.vals["flanking_len"]
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
#dict from read_header to edge
extend_overlap_reads = {}
for edge in edges:
non_overlap_reads = 0
aligns = _read_alignment(pre_read_align.format(side, edge),
extended[(side, edge)], CONS_ALN_RATE)
if aligns and aligns[0]:
for aln in aligns[0]:
edge_header = aln.qry_id
read_header = edge_header.split("|")[-1]
if ((side == "in" and
aln.trg_start < FLANKING_LEN) or
(side == "out" and
aln.trg_end >= aln.trg_len - FLANKING_LEN)):
extend_overlap_reads[read_header] = str(edge)
else:
non_overlap_reads += 1
logger.debug("Side %s, edge %s, non-overlap reads = %d",
side, edge, non_overlap_reads)
partitioned_reads = []
part_list = _read_partitioning_file(pre_partitioning)
for read_id, _, edge, _, _, header in part_list:
if header in extend_overlap_reads:
partitioned_reads.append((read_id, "Partitioned",
extend_overlap_reads[header],
1, 0, header))
else:
partitioned_reads.append((read_id, "None", "NA", 0, 0, header))
_write_partitioning_file(partitioned_reads, partitioning)
#Cut Consensus Functions
def find_read_endpoints(alignment_file, template):
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
read_endpoints = {}
aligns = _read_alignment(alignment_file, template, CONS_ALN_RATE)
if aligns and aligns[0]:
for aln in aligns[0]:
read_header = aln.qry_id
start = aln.trg_start
end = aln.trg_end
if read_header not in read_endpoints:
read_endpoints[read_header] = (start, end)
else:
logger.debug("No read alignment to template, no read_endpoints")
return read_endpoints
def locate_consensus_cutpoint(side, read_endpoints, edge_read_file):
MIN_EDGE_COV = trestle_config.vals["min_edge_cov"]
all_endpoints = []
max_endpoint = 0
edge_reads = fp.read_sequence_dict(edge_read_file)
for edge_header in edge_reads:
parts = edge_header.split("|")
read_header = parts[-1]
if read_header in read_endpoints:
endpoint = read_endpoints[read_header]
if max(endpoint) > max_endpoint:
max_endpoint = max(endpoint)
all_endpoints.append(endpoint)
coverage = [0 for _ in range(max_endpoint + 1)]
for start, end in all_endpoints:
for x in range(start, end):
coverage[x] += 1
window_len = 100
cutpoint = -1
for i in range(len(coverage) - window_len):
if side == "in":
window_start = (len(coverage) - window_len) - i
window_end = len(coverage) - i
if window_start < 0:
window_start = 0
if window_end > len(coverage):
window_end = len(coverage)
avg_cov = _mean(coverage[window_start:window_end])
if avg_cov >= MIN_EDGE_COV:
cutpoint = window_end
break
elif side == "out":
window_start = i
window_end = i + window_len
if window_start < 0:
window_start = 0
if window_end > len(coverage):
window_end = len(coverage)
avg_cov = _mean(coverage[window_start:window_end])
if avg_cov >= MIN_EDGE_COV:
cutpoint = window_start
break
return cutpoint
def truncate_consensus(side, cutpoint, cons_al_file, template,
polished_consensus, cut_cons_file):
if cutpoint == -1:
logger.debug("No cutpoint for consensus file")
return
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
cons_al = _read_alignment(cons_al_file, template, CONS_ALN_RATE)
consensus_endpoint = -1
if cons_al and cons_al[0]:
consensus_endpoint = _find_consensus_endpoint(cutpoint, cons_al, side)
else:
logger.debug("No cons alignment to template, no cut consensus")
return
if consensus_endpoint != -1:
cons_seqs = fp.read_sequence_dict(polished_consensus)
cons_head = list(cons_seqs.keys())[0]
consensus = list(cons_seqs.values())[0]
if side == "in":
start = 0
end = consensus_endpoint
elif side == "out":
start = consensus_endpoint
end = len(consensus)
cut_head = "".join([cons_head, "|{0}_{1}".format(start, end)])
cut_dict = {cut_head:consensus[start:end]}
fp.write_fasta_dict(cut_dict, cut_cons_file)
def _find_consensus_endpoint(cutpoint, aligns, side):
consensus_endpoint = -1
#first try collapsing
coll_aln = _collapse_cons_aln(aligns)
if cutpoint >= coll_aln.trg_start and cutpoint < coll_aln.trg_end:
trg_aln, _ = _index_mapping(coll_aln.trg_seq)
_, aln_qry = _index_mapping(coll_aln.qry_seq)
cutpoint_minus_start = cutpoint - coll_aln.trg_start
aln_ind = trg_aln[cutpoint_minus_start]
qry_ind = aln_qry[aln_ind]
consensus_endpoint = qry_ind + coll_aln.qry_start
else:
#otherwise try each alignment
MIN_SUPP_ALN_LEN = trestle_config.vals["min_supp_align_len"]
#save tuples of cutpoint distance, cutpoint
aln_endpoints = []
for i, aln in enumerate(aligns[0]):
if i == 0 or len(aln.trg_seq) >= MIN_SUPP_ALN_LEN:
if cutpoint >= aln.trg_start and cutpoint < aln.trg_end:
trg_aln, _ = _index_mapping(aln.trg_seq)
_, aln_qry = _index_mapping(aln.qry_seq)
cutpoint_minus_start = cutpoint - aln.trg_start
if cutpoint_minus_start < 0:
logger.warning("%s %s %s %s %s", aln.qry_id, aln.trg_id, side,
cutpoint, cutpoint_minus_start)
aln_ind = trg_aln[0]
elif cutpoint_minus_start >= len(trg_aln):
logger.warning("%s %s %s %s %s", aln.qry_id, aln.trg_id, side,
cutpoint, cutpoint_minus_start)
aln_ind = trg_aln[-1]
else:
aln_ind = trg_aln[cutpoint_minus_start]
qry_ind = aln_qry[aln_ind]
endpoint = qry_ind + coll_aln.qry_start
aln_endpoints.append((0, endpoint))
elif side == "in" and cutpoint >= aln.trg_end:
endpoint = aln.qry_end
distance = cutpoint - aln.trg_end
aln_endpoints.append((distance, endpoint))
elif side == "out" and cutpoint < aln.trg_start:
endpoint = aln.qry_start
distance = aln.trg_start - cutpoint
aln_endpoints.append((distance, endpoint))
if aln_endpoints:
consensus_endpoint = sorted(aln_endpoints)[0][1]
return consensus_endpoint
#Partition Reads Functions
def partition_reads(edges, it, side, position_path, cons_align_path,
template, read_align_path, consensuses,
confirmed_pos_path, part_file,
headers_to_id):
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
BUFFER_COUNT = trestle_config.vals["buffer_count"]
skip_bool = False
_, pos = div.read_positions(position_path)
cons_aligns = {}
for edge_id in edges:
if not os.path.isfile(cons_align_path.format(it, side, edge_id)):
skip_bool = True
else:
cons_aligns[edge_id] = _read_alignment(cons_align_path.format(it,
side,
edge_id),
template,
CONS_ALN_RATE)
if (skip_bool or
not cons_aligns or
not cons_aligns[edge_id] or
not cons_aligns[edge_id][0]):
logger.debug("No cons alignment found for edge %s", edge_id)
skip_bool = True
if skip_bool:
if it <= 1:
confirmed_pos = {"total":[], "sub":[], "ins":[], "del":[]}
rejected_pos = {"total":[], "sub":[], "ins":[], "del":[]}
consensus_pos = pos
else:
previous_pos = _read_confirmed_positions(
confirmed_pos_path.format(it - 1, side))
confirmed_pos, rejected_pos, consensus_pos = previous_pos
else:
#collapse multiple consensus alignments to the template
coll_cons_aligns = {}
for edge_id in cons_aligns:
aln = cons_aligns[edge_id]
coll_cons_aligns[edge_id] = _collapse_cons_aln(aln)
curr_pos = _evaluate_positions(pos, coll_cons_aligns, side)
confirmed_pos, rejected_pos, consensus_pos = curr_pos
_write_confirmed_positions(confirmed_pos, rejected_pos, pos,
confirmed_pos_path.format(it, side))
read_aligns = {}
for edge_id in edges:
if (not os.path.isfile(read_align_path.format(it, side, edge_id)) or
not os.path.isfile(consensuses[(it, side, edge_id)])):
skip_bool = True
elif not skip_bool:
read_aligns[edge_id] = _read_alignment(
read_align_path.format(it, side,
edge_id),
consensuses[(it, side, edge_id)],
CONS_ALN_RATE)
if (skip_bool or
not read_aligns or
not read_aligns[edge_id] or
not read_aligns[edge_id][0]):
logger.debug("No read alignment found for edge %s", edge_id)
skip_bool = True
if skip_bool:
partitioning = _read_partitioning_file(part_file.format(it - 1, side))
else:
partitioning = _classify_reads(read_aligns, consensus_pos,
headers_to_id, BUFFER_COUNT)
_write_partitioning_file(partitioning, part_file.format(it, side))
def _read_alignment(alignment, target_path, min_aln_rate):
alignments = []
aln_reader = SynchronizedSamReader(alignment,
fp.read_sequence_dict(target_path),
config.vals["max_read_coverage"])
while not aln_reader.is_eof():
ctg_id, ctg_aln = aln_reader.get_chunk()
if ctg_id is None:
break
alignments.append(ctg_aln)
aln_reader.close()
return alignments
def _collapse_cons_aln(cons_aligns):
MAX_SUPP_ALIGN_OVERLAP = trestle_config.vals["max_supp_align_overlap"]
coll_aln = None
for aln in cons_aligns[0]:
if coll_aln is None:
coll_aln = aln
elif _overlap(coll_aln, aln) <= MAX_SUPP_ALIGN_OVERLAP:
coll_aln = _collapse(coll_aln, aln)
return coll_aln
def _overlap(aln_one, aln_two):
qry_overlap_lens = []
if (aln_one.qry_start >= aln_two.qry_start and
aln_one.qry_start < aln_two.qry_end):
if aln_one.qry_end >= aln_two.qry_end:
qry_overlap_lens.append(aln_two.qry_end - aln_one.qry_start)
else:
qry_overlap_lens.append(aln_one.qry_end - aln_one.qry_start)
if (aln_one.qry_end > aln_two.qry_start and
aln_one.qry_end <= aln_two.qry_end):
if aln_one.qry_start <= aln_two.qry_start:
qry_overlap_lens.append(aln_one.qry_end - aln_two.qry_start)
else:
qry_overlap_lens.append(aln_one.qry_end - aln_one.qry_start)
if (aln_two.qry_start >= aln_one.qry_start and
aln_two.qry_start < aln_one.qry_end):
if aln_two.qry_end >= aln_one.qry_end:
qry_overlap_lens.append(aln_one.qry_end - aln_two.qry_start)
else:
qry_overlap_lens.append(aln_two.qry_end - aln_two.qry_start)
if (aln_two.qry_end > aln_one.qry_start and
aln_two.qry_end <= aln_one.qry_end):
if aln_two.qry_start <= aln_one.qry_start:
qry_overlap_lens.append(aln_two.qry_end - aln_one.qry_start)
else:
qry_overlap_lens.append(aln_two.qry_end - aln_two.qry_start)
qry_len = 0
if qry_overlap_lens:
qry_len = min(qry_overlap_lens)
trg_overlap_lens = []
if (aln_one.trg_start >= aln_two.trg_start and
aln_one.trg_start < aln_two.trg_end):
if aln_one.trg_end >= aln_two.trg_end:
trg_overlap_lens.append(aln_two.trg_end - aln_one.trg_start)
else:
trg_overlap_lens.append(aln_one.trg_end - aln_one.trg_start)
if (aln_one.trg_end > aln_two.trg_start and
aln_one.trg_end <= aln_two.trg_end):
if aln_one.trg_start <= aln_two.trg_start:
trg_overlap_lens.append(aln_one.trg_end - aln_two.trg_start)
else:
trg_overlap_lens.append(aln_one.trg_end - aln_one.trg_start)
if (aln_two.trg_start >= aln_one.trg_start and
aln_two.trg_start < aln_one.trg_end):
if aln_two.trg_end >= aln_one.trg_end:
trg_overlap_lens.append(aln_one.trg_end - aln_two.trg_start)
else:
trg_overlap_lens.append(aln_two.trg_end - aln_two.trg_start)
if (aln_two.trg_end > aln_one.trg_start and
aln_two.trg_end <= aln_one.trg_end):
if aln_two.trg_start <= aln_one.trg_start:
trg_overlap_lens.append(aln_two.trg_end - aln_one.trg_start)
else:
trg_overlap_lens.append(aln_two.trg_end - aln_two.trg_start)
trg_len = 0
if trg_overlap_lens:
trg_len = min(trg_overlap_lens)
return max([qry_len, trg_len])
def _collapse(aln_one, aln_two):
MAX_SUPP_ALIGN_OVERLAP = trestle_config.vals["max_supp_align_overlap"]
out_aln = copy.deepcopy(aln_one)
if (aln_one.qry_sign == "-" or aln_two.qry_sign == "-" or
_overlap(aln_one, aln_two) > MAX_SUPP_ALIGN_OVERLAP):
return out_aln
if (aln_one.qry_start <= aln_two.qry_start and
aln_one.trg_start <= aln_two.trg_start):
qry_merge_outs = _merge_alns(aln_one.qry_start, aln_one.qry_end,
aln_one.qry_seq, aln_two.qry_start,
aln_two.qry_end, aln_two.qry_seq)
one_qry_seq, two_qry_seq, out_qry_end = qry_merge_outs
trg_merge_outs = _merge_alns(aln_one.trg_start, aln_one.trg_end,
aln_one.trg_seq, aln_two.trg_start,
aln_two.trg_end, aln_two.trg_seq)
one_trg_seq, two_trg_seq, out_trg_end = trg_merge_outs
fill_qry = ""
fill_trg = ""
qry_lens = len(one_qry_seq) + len(two_qry_seq)
trg_lens = len(one_trg_seq) + len(two_trg_seq)
if qry_lens > trg_lens:
diff = qry_lens - trg_lens
fill_trg = "-" * diff
elif trg_lens > qry_lens:
diff = trg_lens - qry_lens
fill_qry = "-" * diff
out_qry_seq = "".join([one_qry_seq, fill_qry, two_qry_seq])
out_trg_seq = "".join([one_trg_seq, fill_trg, two_trg_seq])
out_err_rate = ((aln_one.err_rate * len(aln_one.trg_seq) +
aln_two.err_rate * len(aln_two.trg_seq)) /
(len(aln_one.trg_seq) + len(aln_two.trg_seq)))
out_aln = Alignment(aln_one.qry_id, aln_one.trg_id, aln_one.qry_start,
out_qry_end, aln_one.qry_sign, aln_one.qry_len,
aln_one.trg_start, out_trg_end, aln_one.trg_sign,
aln_one.trg_len, out_qry_seq, out_trg_seq,
out_err_rate, is_secondary=False)
return out_aln
elif (aln_two.qry_start <= aln_one.qry_start and
aln_two.trg_start <= aln_one.trg_start):
qry_merge_outs = _merge_alns(aln_two.qry_start, aln_two.qry_end,
aln_two.qry_seq, aln_one.qry_start,
aln_one.qry_end, aln_one.qry_seq)
two_qry_seq, one_qry_seq, out_qry_end = qry_merge_outs
trg_merge_outs = _merge_alns(aln_two.trg_start, aln_two.trg_end,
aln_two.trg_seq, aln_one.trg_start,
aln_one.trg_end, aln_one.trg_seq)
two_trg_seq, one_trg_seq, out_trg_end = trg_merge_outs
fill_qry = ""
fill_trg = ""
qry_lens = len(two_qry_seq) + len(one_qry_seq)
trg_lens = len(two_trg_seq) + len(one_trg_seq)
if qry_lens > trg_lens:
diff = qry_lens - trg_lens
fill_trg = "-" * diff
elif trg_lens > qry_lens:
diff = trg_lens - qry_lens
fill_qry = "-" * diff
out_qry_seq = "".join([two_qry_seq, fill_qry, one_qry_seq])
out_trg_seq = "".join([two_trg_seq, fill_trg, one_trg_seq])
out_err_rate = ((aln_one.err_rate * len(aln_one.trg_seq) +
aln_two.err_rate * len(aln_two.trg_seq)) /
(len(aln_one.trg_seq) + len(aln_two.trg_seq)))
out_aln = Alignment(aln_one.qry_id, aln_one.trg_id, aln_two.qry_start,
out_qry_end, aln_one.qry_sign, aln_one.qry_len,
aln_two.trg_start, out_trg_end, aln_one.trg_sign,
aln_one.trg_len, out_qry_seq, out_trg_seq,
out_err_rate, is_secondary=False)
return out_aln
return out_aln
def _merge_alns(first_start, first_end, first_seq,
second_start, second_end, second_seq):
first_out_seq = first_seq
second_out_seq = second_seq
out_end = second_end
if first_end <= second_start:
fill_qry_seq = "N" * (second_start - first_end)
first_out_seq = "".join([first_seq, fill_qry_seq])
second_out_seq = second_seq
else:
if first_end < second_end:
overlap = first_end - second_start
two_cut_ind = _overlap_to_aln_ind(overlap, second_seq)
first_out_seq = first_seq
second_out_seq = second_seq[two_cut_ind:]
else:
first_out_seq = first_seq
second_out_seq = ""
out_end = first_end
return first_out_seq, second_out_seq, out_end
def _overlap_to_aln_ind(overlap, aln):
num_bases = 0
for i, base in enumerate(aln):
if base != "-":
num_bases += 1
if num_bases == overlap:
return i + 1
return len(aln)
class EdgeAlignment(object):
__slots__ = ("edge_id", "qry_seq", "trg_seq", "qry_start", "trg_start",
"trg_end", "in_alignment", "curr_aln_ind", "curr_qry_ind",
"curr_qry_nuc", "curr_trg_nuc", "curr_ins_nuc")
def __init__(self, edge_id, qry_seq, trg_seq, qry_start, trg_start,
trg_end):
self.edge_id = edge_id
self.qry_seq = flye_aln.shift_gaps(trg_seq, qry_seq)
self.trg_seq = flye_aln.shift_gaps(self.qry_seq, trg_seq)
self.qry_start = qry_start
self.trg_start = trg_start
self.trg_end = trg_end
self.in_alignment = False
self.curr_aln_ind = -1
self.curr_qry_ind = -1
self.curr_qry_nuc = ""
self.curr_trg_nuc = ""
self.curr_ins_nuc = ""
def reset_nucs(self):
self.curr_qry_nuc = ""
self.curr_trg_nuc = ""
self.curr_ins_nuc = ""
def _evaluate_positions(pos, cons_aligns, side):
#Includes insertions!
confirmed_pos = {"total":[], "sub":[], "ins":[], "del":[]}
rejected_pos = {"total":[], "sub":[], "ins":[], "del":[]}
consensus_pos = {e:[] for e in cons_aligns}
alns = {}
for edge_id in cons_aligns:
orig_aln = cons_aligns[edge_id]
alns[edge_id] = EdgeAlignment(edge_id, orig_aln.qry_seq,
orig_aln.trg_seq, orig_aln.qry_start,
orig_aln.trg_start, orig_aln.trg_end)
min_start_edge = min([alns[e].trg_start for e in alns])
max_end_edge = max([alns[e].trg_end for e in alns])
#end indices for conservatively defining confirmed positions
min_end_edge = min([alns[e].trg_end for e in alns])
max_start_edge = max([alns[e].trg_start for e in alns])
for trg_ind in range(min_start_edge, max_end_edge):
for edge_id in alns:
aln = alns[edge_id]
if aln.trg_start == trg_ind:
aln.curr_aln_ind = 0
aln.curr_qry_ind = aln.qry_start
aln.in_alignment = True
if aln.trg_start > trg_ind or aln.trg_end <= trg_ind:
aln.in_alignment = False
if aln.in_alignment:
while aln.trg_seq[aln.curr_aln_ind] == "-":
if aln.qry_seq[aln.curr_aln_ind] != "-":
aln.curr_ins_nuc += aln.qry_seq[aln.curr_aln_ind]
aln.curr_qry_ind += 1
aln.curr_aln_ind += 1
aln.curr_qry_nuc = aln.qry_seq[aln.curr_aln_ind]
aln.curr_trg_nuc = aln.trg_seq[aln.curr_aln_ind]
if trg_ind in pos["total"]:
if ((side == "in" and trg_ind < min_end_edge) or
(side == "out" and trg_ind >= max_start_edge)):
ins_confirmed = False
del_confirmed = False
sub_confirmed = False
qry_nuc = ""
trg_nuc = ""
for edge_id in alns:
aln = alns[edge_id]
if aln.in_alignment:
#Directly add positions only to consensuses
# where insertions occur
#Add the position prior to curr_qry_ind to
# account for insertion
if aln.curr_ins_nuc:
ins_confirmed = True
consensus_pos[edge_id].append(aln.curr_qry_ind - 1)
if qry_nuc and qry_nuc != aln.curr_qry_nuc:
if qry_nuc != "N" and aln.curr_qry_nuc != "N":
if qry_nuc == "-":
del_confirmed = True
else:
sub_confirmed = True
else:
qry_nuc = aln.curr_qry_nuc
if (trg_nuc and trg_nuc != aln.curr_trg_nuc and
trg_nuc != "N" and aln.curr_trg_nuc != "N"):
logger.debug("Inconsistent trg_nuc, %s %s %s %s",
edge_id, trg_ind, trg_nuc,
aln.curr_trg_nuc)
trg_nuc = aln.curr_trg_nuc
if ins_confirmed or del_confirmed or sub_confirmed:
confirmed_pos["total"].append(trg_ind)
#Add positions to consensuses for only subs/deletions
if del_confirmed or sub_confirmed:
for edge_id in alns:
aln = alns[edge_id]
if aln.in_alignment:
consensus_pos[edge_id].append(aln.curr_qry_ind)
if trg_ind in pos["ins"]:
if ins_confirmed:
confirmed_pos["ins"].append(trg_ind)
else:
rejected_pos["ins"].append(trg_ind)
if trg_ind in pos["del"]:
if del_confirmed:
confirmed_pos["del"].append(trg_ind)
else:
rejected_pos["del"].append(trg_ind)
if trg_ind in pos["sub"]:
if sub_confirmed:
confirmed_pos["sub"].append(trg_ind)
else:
rejected_pos["sub"].append(trg_ind)
else:
rejected_pos["total"].append(trg_ind)
if trg_ind in pos["ins"]:
rejected_pos["ins"].append(trg_ind)
if trg_ind in pos["del"]:
rejected_pos["del"].append(trg_ind)
if trg_ind in pos["sub"]:
rejected_pos["sub"].append(trg_ind)
for edge_id in alns:
aln = alns[edge_id]
if aln.in_alignment:
if aln.qry_seq[aln.curr_aln_ind] != "-":
aln.curr_qry_ind += 1
aln.curr_aln_ind += 1
aln.reset_nucs()
return confirmed_pos, rejected_pos, consensus_pos
def _write_confirmed_positions(confirmed, rejected, pos, out_file):
with open(out_file, 'w') as f:
f.write(">Confirmed_total_positions_{0}\n"
.format(len(confirmed["total"])))
f.write(",".join([str(x) for x in sorted(confirmed["total"])]) + "\n")
f.write(">Confirmed_sub_positions_{0}\n".format(len(confirmed["sub"])))
f.write(",".join([str(x) for x in sorted(confirmed["sub"])]) + "\n")
f.write(">Confirmed_del_positions_{0}\n".format(len(confirmed["del"])))
f.write(",".join([str(x) for x in sorted(confirmed["del"])]) + "\n")
f.write(">Confirmed_ins_positions_{0}\n".format(len(confirmed["ins"])))
f.write(",".join([str(x) for x in sorted(confirmed["ins"])]) + "\n")
f.write(">Rejected_total_positions_{0}\n".format(len(rejected["total"])))
f.write(",".join([str(x) for x in sorted(rejected["total"])]) + "\n")
f.write(">Rejected_sub_positions_{0}\n".format(len(rejected["sub"])))
f.write(",".join([str(x) for x in sorted(rejected["sub"])]) + "\n")
f.write(">Rejected_del_positions_{0}\n".format(len(rejected["del"])))
f.write(",".join([str(x) for x in sorted(rejected["del"])])+ "\n")
f.write(">Rejected_ins_positions_{0}\n".format(len(rejected["ins"])))
f.write(",".join([str(x) for x in sorted(rejected["ins"])]) + "\n")
f.write(">Tentative_total_positions_{0}\n".format(len(pos["total"])))
f.write(",".join([str(x) for x in sorted(pos["total"])]) + "\n")
f.write(">Tentative_sub_positions_{0}\n".format(len(pos["sub"])))
f.write(",".join([str(x) for x in sorted(pos["sub"])]) + "\n")
f.write(">Tentative_del_positions_{0}\n".format(len(pos["del"])))
f.write(",".join([str(x) for x in sorted(pos["del"])]) + "\n")
f.write(">Tentative_ins_positions_{0}\n".format(len(pos["ins"])))
f.write(",".join([str(x) for x in sorted(pos["ins"])]) + "\n")
def _read_confirmed_positions(confirmed_file):
confirmed = {"total":[], "sub":[], "ins":[], "del":[]}
rejected = {"total":[], "sub":[], "ins":[], "del":[]}
pos = {"total":[], "sub":[], "ins":[], "del":[]}
with open(confirmed_file, "r") as f:
for i, line in enumerate(f):
line = line.strip()
if i == 1 and line:
confirmed["total"] = [int(x) for x in line.split(",")]
elif i == 3 and line:
confirmed["sub"] = [int(x) for x in line.split(",")]
elif i == 5 and line:
confirmed["del"] = [int(x) for x in line.split(",")]
elif i == 7 and line:
confirmed["ins"] = [int(x) for x in line.split(",")]
elif i == 9 and line:
rejected["total"] = [int(x) for x in line.split(",")]
elif i == 11 and line:
rejected["sub"] = [int(x) for x in line.split(",")]
elif i == 13 and line:
rejected["del"] = [int(x) for x in line.split(",")]
elif i == 15 and line:
rejected["ins"] = [int(x) for x in line.split(",")]
elif i == 17 and line:
pos["total"] = [int(x) for x in line.split(",")]
elif i == 19 and line:
pos["sub"] = [int(x) for x in line.split(",")]
elif i == 21 and line:
pos["del"] = [int(x) for x in line.split(",")]
elif i == 23 and line:
pos["ins"] = [int(x) for x in line.split(",")]
return confirmed, rejected, pos
def _classify_reads(read_aligns, consensus_pos,
headers_to_id, buffer_count):
#Includes insertion positions where an insertion occurs right before the
#position for the read.
#partitioning format same as above:
#list of (read_id, status, edge_id, top_score, total_score, header)
partitioning = []
read_scores = {}
for edge_id in read_aligns:
read_counts = {}
for aln in read_aligns[edge_id][0]:
read_header = aln.qry_id
cons_header = aln.trg_id
#Unmapped segments will not be scored
if cons_header == "*":
continue
if read_header not in read_scores:
read_scores[read_header] = {}
read_scores[read_header][edge_id] = 0
if read_header not in read_counts:
read_counts[read_header] = 1
else:
read_counts[read_header] += 1
#Any alignments after the first supplementary will not be scored
if read_counts[read_header] > 2:
continue
positions = consensus_pos[edge_id]
trg_aln, _ = _index_mapping(aln.trg_seq)
for pos in positions:
if pos >= aln.trg_start and pos < aln.trg_end:
pos_minus_start = pos - aln.trg_start
aln_ind = trg_aln[pos_minus_start]
if aln.qry_seq[aln_ind] == aln.trg_seq[aln_ind]:
read_scores[read_header][edge_id] += 1
#Iterate through all read_headers so partitioning will be a complete set
for read_header in headers_to_id:
read_id = headers_to_id[read_header]
if read_header in read_scores:
tie_bool = False
top_edge = 0
top_score = 0
total_score = 0
for edge_id in read_scores[read_header]:
edge_score = read_scores[read_header][edge_id]
#print edge_id, edge_score, top_score
if edge_score - buffer_count > top_score:
top_edge = edge_id
top_score = edge_score
tie_bool = False
elif (edge_score - buffer_count <= top_score and
edge_score >= top_score):
top_score = edge_score
tie_bool = True
elif (edge_score >= top_score - buffer_count and
edge_score < top_score):
tie_bool = True
total_score += edge_score
if total_score == 0:
status_label = "None"
edge_label = "NA"
elif tie_bool:
status_label = "Tied"
edge_label = "NA"
else:
status_label = "Partitioned"
edge_label = str(top_edge)
partitioning.append((read_id, status_label, edge_label,
top_score, total_score, read_header))
else:
status_label = "None"
edge_label = "NA"
top_score = 0
total_score = 0
partitioning.append((read_id, status_label, edge_label,
top_score, total_score, read_header))
return partitioning
def _index_mapping(aln):
#Given a genomic index, return the alignment index of the alignment
al_inds = []
#Given an alignment index, return the genomic index at that position
gen_inds = []
for i,b in enumerate(aln):
gen_inds.append(len(al_inds))
if b != '-':
al_inds.append(i)
return al_inds, gen_inds
def init_side_stats(rep, side, repeat_edges, min_overlap, position_path,
partitioning, prev_parts, template_len, stats_file):
SUB_THRESH = trestle_config.vals["sub_thresh"]
DEL_THRESH = trestle_config.vals["del_thresh"]
INS_THRESH = trestle_config.vals["ins_thresh"]
FLANKING_LEN = trestle_config.vals["flanking_len"]
BUFFER_COUNT = trestle_config.vals["buffer_count"]
MAX_ITER = trestle_config.vals["max_iter"]
MIN_EDGE_COV = trestle_config.vals["min_edge_cov"]
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
_, pos = div.read_positions(position_path)
#Count partitioned reads
edge_below_cov = False
part_list = _read_partitioning_file(partitioning)
edge_reads, _, _ = _get_partitioning_info(part_list,
repeat_edges[rep][side])
#Check break condition for iteration loop
for edge in repeat_edges[rep][side]:
if edge_reads[edge] < MIN_EDGE_COV:
edge_below_cov = True
prev_parts.add(tuple(part_list))
#Prepare header for iteration stats
#Iter,Rep Lens,Confirmed/Rejected Pos,Partitioned Reads
header_labels = ["Iter"]
for edge in sorted(repeat_edges[rep][side]):
header_labels.extend(["Rep Len {0}".format(edge)])
header_labels.extend(["Confirmed Pos", "Rejected Pos"])
for edge in sorted(repeat_edges[rep][side]):
header_labels.extend(["#Reads {0}".format(edge)])
header_labels.extend(["#Tied", "#Unassigned"])
spaced_header = ["{:11}".format(h) for h in header_labels]
#Write stats output
with open(stats_file, 'w') as f:
f.write("{0:25}\t{1}\n".format("Repeat:", rep))
f.write("{0:25}\t'{1}'\n".format("Side:", side))
f.write("{0:25}\t".format("Edges:"))
f.write(", ".join([str(x) for x in sorted(repeat_edges[rep][side])]) + "\n")
f.write("{0:25}\t{1}\n\n".format("Template Length:", template_len))
f.write("Initial Option Values\n")
f.write("{0:25}\t{1}\n".format("min_overlap:", min_overlap))
f.write("{0:25}\t{1}\n".format("sub_thresh:", SUB_THRESH))
f.write("{0:25}\t{1}\n".format("del_thresh:", DEL_THRESH))
f.write("{0:25}\t{1}\n".format("ins_thresh:", INS_THRESH))
f.write("{0:25}\t{1}\n".format("flanking_len:", FLANKING_LEN))
f.write("{0:25}\t{1}\n".format("buffer_count:", BUFFER_COUNT))
f.write("{0:25}\t{1}\n".format("max_iter:", MAX_ITER))
f.write("{0:25}\t{1}\n".format("min_edge_cov:", MIN_EDGE_COV))
f.write("{0:25}\t{1}\n".format("cons_aln_rate:", CONS_ALN_RATE))
f.write("\n")
f.write("The following numbers are calculated based on moving ")
f.write("into the repeat from the '{0}' direction\n\n".format(side))
f.write("{0}\n".format("Divergent Positions:"))
f.write("{0:25}\t{1}\n".format("Total", len(pos["total"])))
f.write("{0:25}\t{1}\n".format("Substitutions", len(pos["sub"])))
f.write("{0:25}\t{1}\n".format("Deletions", len(pos["del"])))
f.write("{0:25}\t{1}\n".format("Insertions", len(pos["ins"])))
f.write("\n")
f.write("{0:25}\t{1}\n".format("Total Starting Reads:",
sum(edge_reads.values())))
for edge in sorted(repeat_edges[rep][side]):
f.write("{0}{1}{2:18}\t{3}\n".format("Edge ", edge,
" starting reads:",
edge_reads[edge]))
f.write("\n\n")
f.write("\t".join(spaced_header))
f.write("\n")
return edge_below_cov
def update_side_stats(edges, it, side, cons_align_path, template,
confirmed_pos_path, partitioning, prev_parts,
stats_file):
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
MIN_EDGE_COV = trestle_config.vals["min_edge_cov"]
#Write stats for each iteration
#Iter,Rep Lens,Confirmed/Rejected Pos,Partitioned Reads
stats_out = [str(it)]
for edge_id in sorted(edges):
rep_len = 0
if os.path.isfile(cons_align_path.format(it, side, edge_id)):
cons_align = _read_alignment(cons_align_path.format(it, side,
edge_id),
template,
CONS_ALN_RATE)
if cons_align and cons_align[0]:
if side == "in":
rep_len = (cons_align[0][0].qry_len -
cons_align[0][0].qry_start)
elif side == "out":
rep_len = cons_align[0][0].qry_end
stats_out.extend([str(rep_len)])
confirmed_total = 0
rejected_total = 0
if it > 0:
confirmed, rejected, _ = _read_confirmed_positions(confirmed_pos_path)
confirmed_total = len(confirmed["total"])
rejected_total = len(rejected["total"])
stats_out.extend([str(confirmed_total),
str(rejected_total)])
edge_below_cov = False
dup_part = False
part_list = _read_partitioning_file(partitioning)
edge_reads, tied_reads, unassigned_reads = _get_partitioning_info(part_list, edges)
for edge_id in sorted(edges):
stats_out.extend([str(edge_reads[edge_id])])
stats_out.extend([str(tied_reads), str(unassigned_reads)])
#Check break conditions for iteration loop
for edge in edges:
if edge_reads[edge] < MIN_EDGE_COV:
edge_below_cov = True
if tuple(part_list) in prev_parts:
dup_part = True
else:
prev_parts.add(tuple(part_list))
spaced_header = ["{:11}".format(x) for x in stats_out]
with open(stats_file, "a") as f:
f.write("\t".join(spaced_header))
f.write("\n")
return edge_below_cov, dup_part
def finalize_side_stats(edges, it, side, cons_align_path, template,
cons_vs_cons_path, consensuses, confirmed_pos_path,
partitioning, edge_below_cov, dup_part, term_bool,
stats_file):
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
MAX_ITER = trestle_config.vals["max_iter"]
with open(stats_file, "a") as f:
f.write("\n\n")
f.write("{0:26}\t{1}\n\n".format("Final Iter:", it))
f.write("Iteration terminated because:\n")
if it == MAX_ITER:
f.write("Max iter reached\n")
if edge_below_cov:
f.write("Edge coverage fell below min_edge_cov\n")
if dup_part:
f.write("Partitioning was identical to a previous iteration\n")
if term_bool:
f.write("Encountered empty consensus sequence or alignment\n")
f.write("\n")
#Write out alignment indices for edges vs template
limit_ind = None
limit_label = ""
if side == "in":
limit_label = "Min Template End"
elif side == "out":
limit_label = "Max Template Start"
for edge_id in sorted(edges):
qry_start = 0
qry_end = 0
qry_len = 0
trg_start = 0
trg_end = 0
trg_len = 0
curr_cons_path = cons_align_path.format(it, side, edge_id)
if os.path.isfile(curr_cons_path):
cons_align = _read_alignment(curr_cons_path,
template,
CONS_ALN_RATE)
if cons_align and cons_align[0]:
#collapse multiple consensus alignments
coll_cons = _collapse_cons_aln(cons_align)
qry_start = coll_cons.qry_start
qry_end = coll_cons.qry_end
qry_len = coll_cons.qry_len
trg_start = coll_cons.trg_start
trg_end = coll_cons.trg_end
trg_len = coll_cons.trg_len
if limit_ind is None or (
(side == "in" and trg_end < limit_ind) or
(side == "out" and trg_start >= limit_ind)):
if side == "in":
limit_ind = trg_end
elif side == "out":
limit_ind = trg_start
f.write("Edge {0}|Template Alignment\n".format(edge_id))
f.write("{0}{1}{2:20}\t{3:5}-{4:5} of {5:5}\n".format(
"Edge ", edge_id, ":",
qry_start, qry_end, qry_len))
f.write("{0:26}\t{1:5}-{2:5} of {3:5}\n".format("Template:",
trg_start, trg_end, trg_len))
f.write("\n")
f.write("{0:26}\t{1}\n".format(limit_label, limit_ind))
f.write("(End of positions considered)\n\n")
#Write out alignment indices for edges vs edges
edge_pairs = sorted(combinations(edges, 2))
for edge_one, edge_two in edge_pairs:
qry_start = 0
qry_end = 0
qry_len = 0
trg_start = 0
trg_end = 0
trg_len = 0
qry_seq = ""
trg_seq = ""
if (os.path.isfile(cons_vs_cons_path.format(it, side, edge_one,
it, side, edge_two)) and
os.path.isfile(consensuses[(it, side, edge_two)])):
cons_vs_cons = _read_alignment(cons_vs_cons_path.format(
it, side, edge_one,
it, side, edge_two),
consensuses[(it, side,
edge_two)],
CONS_ALN_RATE)
if cons_vs_cons and cons_vs_cons[0]:
qry_start = cons_vs_cons[0][0].qry_start
qry_end = cons_vs_cons[0][0].qry_end
qry_len = cons_vs_cons[0][0].qry_len
trg_start = cons_vs_cons[0][0].trg_start
trg_end = cons_vs_cons[0][0].trg_end
trg_len = cons_vs_cons[0][0].trg_len
qry_seq = cons_vs_cons[0][0].qry_seq
trg_seq = cons_vs_cons[0][0].trg_seq
f.write("Edge {0}|Edge {1} Alignment\n".format(edge_one, edge_two))
f.write("{0}{1}{2:20}\t{3:5}-{4:5} of {5:5}\n".format(
"Edge ", edge_one, ":",
qry_start, qry_end, qry_len))
f.write("{0}{1}{2:20}\t{3:5}-{4:5} of {5:5}\n".format(
"Edge ", edge_two, ":",
trg_start, trg_end, trg_len))
div_rate = _calculate_divergence(qry_seq, trg_seq)
f.write("{0:26}\t{1:.4f}\n".format("Divergence Rate:", div_rate))
f.write("\n")
#Write overall position stats
types = ["total", "sub", "del", "ins"]
confirmed = {t:[] for t in types}
rejected = {t:[] for t in types}
pos = {t:[] for t in types}
if it > 0:
confirmed_pos_output = _read_confirmed_positions(confirmed_pos_path)
confirmed, rejected, pos = confirmed_pos_output
if side == "in":
largest_pos = -1
if confirmed["total"]:
largest_pos = max(confirmed["total"])
f.write("{0:26}\t{1}\n".format("Largest Confirmed Position:",
largest_pos))
elif side == "out":
smallest_pos = -1
if confirmed["total"]:
smallest_pos = min(confirmed["total"])
f.write("{0:26}\t{1}\n".format("Smallest Confirmed Position:",
smallest_pos))
remainings = {}
for typ in types:
remainings[typ] = len(pos[typ]) - (len(confirmed[typ]) +
len(rejected[typ]))
type_strs = ["Total", "Sub", "Del", "Ins"]
for typ, typ_str in zip(types, type_strs):
confirmed_frac = 0.0
rejected_frac = 0.0
remaining_frac = 0.0
if len(pos[typ]) != 0:
confirmed_frac = len(confirmed[typ]) / float(len(pos[typ]))
rejected_frac = len(rejected[typ]) / float(len(pos[typ]))
remaining_frac = remainings[typ] / float(len(pos[typ]))
f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format(
"Confirmed {0} Positions:".format(typ_str),
len(confirmed[typ]),
len(pos[typ]),
confirmed_frac))
f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format(
"Rejected {0} Positions:".format(typ_str),
len(rejected[typ]),
len(pos[typ]),
rejected_frac))
f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format(
"Remaining {0} Positions:".format(typ_str),
remainings[typ],
len(pos[typ]),
remaining_frac))
f.write("\n")
f.write("\n")
#Write overall partitioning stats
part_list = _read_partitioning_file(partitioning)
edge_reads = {edge:0 for edge in edges}
tied_reads = 0
unassigned_reads = 0
total_reads = len(part_list)
for _, status, edge, _, _, _ in part_list:
if status == "Partitioned" and edge != "NA":
edge_reads[int(edge)] += 1
elif status == "Tied":
tied_reads += 1
elif status == "None":
unassigned_reads += 1
else:
exception_str = "Unknown status {0} in partitioning file {1}"
raise Exception(exception_str.format(status, partitioning))
for edge_id in sorted(edges):
f.write("{0}{1}{2:13}\t{3}/{4} = {5:.4f}\n".format(
"Total Edge ", edge_id, " Reads:",
edge_reads[edge_id], total_reads,
edge_reads[edge_id] / float(total_reads)))
f.write("{0:26}\t{1}/{2} = {3:.4f}\n".format("Total Tied Reads:",
tied_reads, total_reads,
tied_reads / float(total_reads)))
f.write("{0:26}\t{1}/{2} = {3:.4f}\n".format("Total Unassigned Reads:",
unassigned_reads, total_reads,
unassigned_reads / float(total_reads)))
f.write("\n")
def init_int_stats(rep, repeat_edges, zero_it, position_path, partitioning,
all_reads_file, template_len, cov, int_stats_file):
#Count edge reads
side_reads = {}
total_reads = 0
all_side_reads = 0
internal_reads = 0
for side in sorted(repeat_edges[rep]):
part_list = _read_partitioning_file(partitioning.format(zero_it, side))
total_reads = len(part_list)
partitioning_outputs = _get_partitioning_info(part_list,
repeat_edges[rep][side])
side_reads[side], _, _ = partitioning_outputs
all_side_reads += sum(side_reads[side].values())
internal_reads = total_reads - all_side_reads
all_reads_n50 = _n50(all_reads_file)
#Prepare header for iterative integrated stats
#in/out Iter,Mean in/out/gap Len,Confirmed/Rejected Pos,Bridging Reads
header_labels = []
for side in sorted(repeat_edges[rep]):
header_labels.extend(["{0} Iter".format(side)])
header_labels.extend(["in Len", "Gap Len", "out Len"])
header_labels.extend(["Confirmed", "Rejected"])
side_edges = []
for side in sorted(repeat_edges[rep]):
side_edges.append([])
for edge in sorted(repeat_edges[rep][side]):
side_edges[-1].append("{0}{1}".format(side,edge))
for edge_pair in sorted(product(*side_edges)):
header_labels.extend(["{0}".format("|".join(edge_pair))])
spaced_header = ["{:8}".format(x) for x in header_labels]
#Write to file
with open(int_stats_file, 'w') as f:
f.write("{0:16}\t{1}\n".format("Repeat:", rep))
f.write("{0:16}\t{1}\n".format("Template Length:", template_len))
f.write("{0:16}\t{1:.2f}\n".format("Avg Coverage:", cov))
f.write("{0:16}\t{1}\n".format("# All Reads:", total_reads))
f.write("{0:16}\t{1}\n\n".format("All Reads N50:", all_reads_n50))
edge_headers = ["Side", " Edge", "# Reads"]
spaced_edge_header = ["{:5}".format(h) for h in edge_headers]
f.write("\t".join(spaced_edge_header))
f.write("\n")
for side in sorted(repeat_edges[rep]):
for edge_id in sorted(repeat_edges[rep][side]):
edge_values = [side, edge_id, side_reads[side][edge_id]]
spaced_values = ["{:6}".format(h) for h in edge_values]
f.write("\t".join(spaced_values))
f.write("\n")
f.write("{0:12}\t {1}\n".format("Internal", internal_reads))
f.write("\n\n")
f.write("\t".join(spaced_header))
f.write("\n")
def update_int_stats(rep, repeat_edges, side_it, cons_align_path, template,
template_len, confirmed_pos_path, int_confirmed_path,
partitioning, int_stats_file):
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
stats_out = []
#Add side iters
for side in sorted(repeat_edges[rep]):
stats_out.extend([str(side_it[side])])
#Find median in, out, and gap lengths
medians = {s:0 for s in repeat_edges[rep]}
for side in sorted(repeat_edges[rep]):
trg_limits = []
for edge_id in sorted(repeat_edges[rep][side]):
curr_cons_path = cons_align_path.format(side_it[side],
side, edge_id)
if os.path.isfile(curr_cons_path):
cons_align = _read_alignment(curr_cons_path,
template,
CONS_ALN_RATE)
if cons_align and cons_align[0]:
if side == "in":
trg_limits.append(cons_align[0][0].trg_end)
elif side == "out":
trg_limits.append(template_len -
cons_align[0][0].trg_start)
if trg_limits:
medians[side] = _get_median(trg_limits)
gap_len = template_len - (medians["in"] + medians["out"])
stats_out.extend([str(medians["in"]), str(gap_len), str(medians["out"])])
#Add confirmed and rejected reads
in_confirmed_path = confirmed_pos_path.format(side_it["in"], "in")
out_confirmed_path = confirmed_pos_path.format(side_it["out"], "out")
types = ["total", "sub", "del", "ins"]
int_confirmed = {t:[] for t in types}
int_rejected = {t:[] for t in types}
pos = {t:[] for t in types}
if side_it["in"] > 0 and side_it["out"] > 0:
all_in_pos = _read_confirmed_positions(in_confirmed_path)
all_out_pos = _read_confirmed_positions(out_confirmed_path)
confirmed_pos_outputs = _integrate_confirmed_pos(all_in_pos,
all_out_pos)
int_confirmed, int_rejected, pos = confirmed_pos_outputs
elif side_it["in"] > 0:
all_in_pos = _read_confirmed_positions(in_confirmed_path)
int_confirmed, int_rejected, pos = all_in_pos
elif side_it["out"] > 0:
all_out_pos = _read_confirmed_positions(out_confirmed_path)
int_confirmed, int_rejected, pos = all_out_pos
_write_confirmed_positions(int_confirmed, int_rejected, pos,
int_confirmed_path.format(side_it["in"],
side_it["out"]))
stats_out.extend([str(len(int_confirmed["total"])),
str(len(int_rejected["total"]))])
#Get bridging reads for each pair of in/out edges
side_headers_dict = {}
all_headers = set()
for side in sorted(repeat_edges[rep]):
side_headers_dict[side] = {}
part_list = _read_partitioning_file(partitioning.format(side_it[side],
side))
for _, status, edge, _, _, header in part_list:
all_headers.add(header)
if status == "Partitioned" and edge != "NA":
side_headers_dict[side][header] = (side, int(edge))
bridging_reads = {}
side_edges = []
for side in sorted(repeat_edges[rep]):
side_edges.append([])
for edge in sorted(repeat_edges[rep][side]):
side_edges[-1].append((side, edge))
for edge_pair in sorted(product(*side_edges)):
bridging_reads[edge_pair] = 0
for header in all_headers:
if (header in side_headers_dict["in"] and
header in side_headers_dict["out"]):
in_edge = side_headers_dict["in"][header]
out_edge = side_headers_dict["out"][header]
bridging_reads[(in_edge, out_edge)] += 1
for edge_pair in sorted(bridging_reads):
#stats_out.extend(["{0}".format(edge_pair)])
stats_out.extend([str(bridging_reads[edge_pair])])
spaced_header = ["{:8}".format(x) for x in stats_out]
#Write to file
with open(int_stats_file, "a") as f:
f.write("\t".join(spaced_header))
f.write("\n")
def finalize_int_stats(rep, repeat_edges, side_it, cons_align_path, template,
template_len, cons_vs_cons_path, consensuses,
int_confirmed_path, partitioning, int_stats_file,
resolved_seq_file):
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
MIN_BRIDGE_COUNT = trestle_config.vals["min_bridge_count"]
MIN_BRIDGE_FACTOR = trestle_config.vals["min_bridge_factor"]
#Resolved repeat seqs to be returned, NOT written
resolved_repeats = {}
summ_vals = []
with open(int_stats_file, "a") as f:
f.write("\n\n")
for side in sorted(repeat_edges[rep]):
f.write("{0}'{1}'{2:8}\t{3}\n"
.format("Final ", side, " Iter:", side_it[side]))
f.write("\n\n")
#Overall confirmed and rejected positions
types = ["total", "sub", "del", "ins"]
int_confirmed = {t:[] for t in types}
int_rejected = {t:[] for t in types}
pos = {t:[] for t in types}
if side_it["in"] > 0 or side_it["out"] > 0:
int_confirmed, int_rejected, pos = _read_confirmed_positions(
int_confirmed_path.format(side_it["in"], side_it["out"]))
remainings = {}
for typ in types:
remainings[typ] = len(pos[typ]) - (len(int_confirmed[typ]) +
len(int_rejected[typ]))
type_strs = ["Total", "Sub", "Del", "Ins"]
for typ, typ_str in zip(types, type_strs):
confirmed_frac = 0.0
rejected_frac = 0.0
remaining_frac = 0.0
if len(pos[typ]) != 0:
confirmed_frac = len(int_confirmed[typ]) / float(len(pos[typ]))
rejected_frac = len(int_rejected[typ]) / float(len(pos[typ]))
remaining_frac = remainings[typ] / float(len(pos[typ]))
f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format(
"Confirmed {0} Positions:".format(typ_str),
len(int_confirmed[typ]),
len(pos[typ]),
confirmed_frac))
f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format(
"Rejected {0} Positions:".format(typ_str),
len(int_rejected[typ]),
len(pos[typ]),
rejected_frac))
f.write("{0:26}\t{1}/{2} = {3:.3f}\n".format(
"Remaining {0} Positions:".format(typ_str),
remainings[typ],
len(pos[typ]),
remaining_frac))
f.write("\n")
f.write("\n")
#Basic stats for confirmed positions
av_div = 0.0
if template_len != 0:
av_div = len(int_confirmed["total"]) / float(template_len)
position_gaps = [0 for _ in range(len(int_confirmed["total"]) + 1)]
curr_pos = 0
for i, p in enumerate(int_confirmed["total"]):
position_gaps[i] = p - curr_pos
curr_pos = p
position_gaps[-1] = template_len - curr_pos
mean_position_gap = _mean(position_gaps)
max_position_gap = max(position_gaps)
f.write("{0:26}\t{1}\n".format("Template Length:", template_len))
f.write("{0:26}\t{1}\n".format("# Confirmed Positions:",
len(int_confirmed["total"])))
f.write("{0:26}\t{1:.4f}\n".format("Confirmed Pos Avg Divergence:",
av_div))
f.write("{0:26}\t{1:.2f}\n".format("Mean Confirmed Pos Gap:",
mean_position_gap))
f.write("{0:26}\t{1}\n".format("Max Confirmed Pos Gap:",
max_position_gap))
f.write("\n\n")
summ_vals.extend([len(int_confirmed["total"]), max_position_gap])
#Write bridging reads
side_headers_dict = {}
all_headers = set()
for side in sorted(repeat_edges[rep]):
side_headers_dict[side] = {}
part_list = _read_partitioning_file(partitioning.format(
side_it[side], side))
for _, status, edge, _, _, header in part_list:
all_headers.add(header)
if status == "Partitioned" and edge != "NA":
side_headers_dict[side][header] = (side, int(edge))
bridging_reads = {}
side_edges = []
for side in sorted(repeat_edges[rep]):
side_edges.append([])
for edge in sorted(repeat_edges[rep][side]):
side_edges[-1].append((side, edge))
for edge_pair in sorted(product(*side_edges)):
bridging_reads[edge_pair] = 0
for header in all_headers:
if (header in side_headers_dict["in"] and
header in side_headers_dict["out"]):
in_edge = side_headers_dict["in"][header]
out_edge = side_headers_dict["out"][header]
bridging_reads[(in_edge, out_edge)] += 1
for edge_pair in sorted(bridging_reads):
pair_label = "|".join(["{0}{1}".format(x[0], x[1]) for x in edge_pair])
f.write("{0}{1:21}\t{2}\n".format(pair_label, " Bridging Reads:",
bridging_reads[edge_pair]))
f.write("\n\n")
#Write combos which are sets of bridging reads
all_combos = _get_combos(side_edges[0], side_edges[1])
combo_support = [0 for _ in all_combos]
for i, combo in enumerate(all_combos):
for edge_pair in combo:
if edge_pair in bridging_reads:
combo_support[i] += bridging_reads[edge_pair]
for i, combo in enumerate(all_combos):
f.write("{0} {1}\n".format("Combo", i))
coms = ["|".join(["".join([str(z) for z in x]) for x in y]) for y in combo]
combo_edges = " + ".join(coms)
f.write("{0:12}\t{1}\n".format("Resolution:", combo_edges))
f.write("{0:12}\t{1}\n\n".format("Support:", combo_support[i]))
#Bridging conditions
bridged = False
bridged_edges = None
combo_inds = list(zip(combo_support, list(range(len(combo_support)))))
sorted_combos = sorted(combo_inds, reverse=True)
if (len(sorted_combos) > 1 and
sorted_combos[0][0] >= MIN_BRIDGE_COUNT and
sorted_combos[0][0] >= sorted_combos[1][0] * MIN_BRIDGE_FACTOR):
bridged = True
bridged_edges = all_combos[sorted_combos[0][1]]
best_combo = sorted_combos[0][1]
best_support = sorted_combos[0][0]
best_against = 0
second_combo = -1
second_support = 0
if len(sorted_combos) > 1:
for support, _ in sorted_combos[1:]:
best_against += support
second_combo = sorted_combos[1][1]
second_support = sorted_combos[1][0]
if bridged:
f.write("BRIDGED\n")
f.write("Bridging Combo: {0}\n".format(best_combo))
br_ct_str = "{0} (min_bridge_count)".format(MIN_BRIDGE_COUNT)
br_diff_str = "{0} * {1} (Combo {2} * min_bridge_factor)".format(
second_support, MIN_BRIDGE_FACTOR, second_combo)
f.write("Support = {0}\t> {1}\n{2:12}\t> {3}\n".format(
best_support, br_ct_str, "", br_diff_str))
f.write("Resolution:\n")
for edge_pair in sorted(bridged_edges):
f.write("{0[0]} {0[1]:2} {1:3} {2[0]} {2[1]}\n"
.format(edge_pair[0], "->", edge_pair[1]))
f.write("\n\n")
else:
f.write("UNBRIDGED\n")
f.write("Best combo {0}\n".format(best_combo))
f.write("{0:20}\t{1}\n".format("min_bridge_count",
MIN_BRIDGE_COUNT))
f.write("{0:20}\t{1}\n\n\n".format("min_bridge_factor",
MIN_BRIDGE_FACTOR))
summ_vals.extend([bridged, best_support, best_against])
#If not bridged, find in/gap/out lengths and divergence rates
if not bridged:
#Write median in, out, and gap lengths
side_lens = {s:0 for s in repeat_edges[rep]}
for side in sorted(repeat_edges[rep]):
trg_limits = []
for edge_id in sorted(repeat_edges[rep][side]):
curr_cons_path = cons_align_path.format(side_it[side],
side, edge_id)
if os.path.isfile(curr_cons_path):
cons_align = _read_alignment(curr_cons_path,
template,
CONS_ALN_RATE)
if cons_align and cons_align[0]:
if side == "in":
trg_limits.append(cons_align[0][0].trg_end)
elif side == "out":
trg_limits.append(template_len -
cons_align[0][0].trg_start)
if trg_limits:
side_lens[side] = _get_median(trg_limits)
gap_len = template_len - (side_lens["in"] + side_lens["out"])
f.write("{0:30}\t{1}\n".format("Median in Sequence Length:",
side_lens["in"]))
f.write("{0:30}\t{1}\n".format("Median out Sequence Length:",
side_lens["out"]))
f.write("{0:30}\t{1}\n\n".format("Median Gap/Overlap Length:",
gap_len))
#Write mean in and out divergence rates
div_rates = {s:[] for s in repeat_edges[rep]}
for side in sorted(repeat_edges[rep]):
side_pairs = sorted(combinations(repeat_edges[rep][side], 2))
for edge_one, edge_two in side_pairs:
cons_cons_file = cons_vs_cons_path.format(
side_it[side], side, edge_one,
side_it[side], side, edge_two)
if (os.path.isfile(cons_cons_file) and
os.path.isfile(consensuses[(side_it[side],
side, edge_two)])):
cons_vs_cons = _read_alignment(cons_cons_file,
consensuses[(side_it[side],
side, edge_two)],
CONS_ALN_RATE)
if cons_vs_cons and cons_vs_cons[0]:
edge_rate = _calculate_divergence(
cons_vs_cons[0][0].qry_seq,
cons_vs_cons[0][0].trg_seq)
div_rates[side].append(edge_rate)
mean_in_div = 0.0
if div_rates["in"]:
mean_in_div = _mean(div_rates["in"])
mean_out_div = 0.0
if div_rates["out"]:
mean_out_div = _mean(div_rates["out"])
weighted_mean_div = 0.0
if side_lens["in"] + side_lens["out"] != 0:
weighted_mean_div = ((mean_in_div*side_lens["in"] +
mean_out_div*side_lens["out"]) /
float(side_lens["in"] + side_lens["out"]))
f.write("{0:30}\t{1}\n".format("Mean in Divergence Rate:",
mean_in_div))
f.write("{0:30}\t{1}\n".format("Mean out Divergence Rate:",
mean_out_div))
f.write("{0:30}\t{1}\n\n".format("Weighted Mean Divergence Rate:",
weighted_mean_div))
res_str = "No resolution so no resolved file for repeat {0}\n\n"
f.write(res_str.format(rep))
#for i, edge in enumerate(sorted(repeat_edges[rep]["in"])):
#header = "Repeat_{0}_unbridged_copy_{1}".format(rep, i)
#resolved_repeats[header] = ""
#seq_dict = {header:""}
#fp.write_fasta_dict(seq_dict, resolved_seq_file.format(i))
summ_vals.extend(["*", "*"])
#If bridged, find overlap and construct repeat copy sequences
else:
#Find end of repeat as min/max of in/out cons_vs_cons alignments
edge_limits = {}
for side in sorted(repeat_edges[rep]):
side_pairs = sorted(combinations(repeat_edges[rep][side], 2))
for edge_one, edge_two in side_pairs:
cons_cons_file = cons_vs_cons_path.format(
side_it[side], side, edge_one,
side_it[side], side, edge_two)
if (os.path.isfile(cons_cons_file) and
os.path.isfile(consensuses[(side_it[side],
side, edge_two)])):
cons_vs_cons = _read_alignment(cons_cons_file,
consensuses[(side_it[side],
side, edge_two)],
CONS_ALN_RATE)
if cons_vs_cons and cons_vs_cons[0]:
#collapse multiple consensus alignments
coll_cons = _collapse_cons_aln(cons_vs_cons)
one_start = coll_cons.qry_start
one_end = coll_cons.qry_end
two_start = coll_cons.trg_start
two_end = coll_cons.trg_end
if side == "in":
if (side, edge_one) not in edge_limits:
edge_limits[(side, edge_one)] = one_start
elif one_start < edge_limits[(side, edge_one)]:
edge_limits[(side, edge_one)] = one_start
if (side, edge_two) not in edge_limits:
edge_limits[(side, edge_two)] = two_start
elif two_start < edge_limits[(side, edge_two)]:
edge_limits[(side, edge_two)] = two_start
elif side == "out":
if (side, edge_one) not in edge_limits:
edge_limits[(side, edge_one)] = one_end
elif one_end > edge_limits[(side, edge_one)]:
edge_limits[(side, edge_one)] = one_end
if (side, edge_two) not in edge_limits:
edge_limits[(side, edge_two)] = two_end
elif two_end > edge_limits[(side, edge_two)]:
edge_limits[(side, edge_two)] = two_end
#For each edge_pair, find starting and ending indices of
#in, out, and template sequences to construct sequences
summ_resolution = []
resolved_sequences = []
for i, edge_pair in enumerate(sorted(bridged_edges)):
f.write("Repeat Copy {0}\n".format(i))
f.write("{0[0]} {0[1]:2} {1:3} {2[0]} {2[1]}\n".format(
edge_pair[0],
"->",
edge_pair[1]))
in_start = None
out_end = None
out_align = None
in_align = None
for side, edge_id in edge_pair:
if side == "in" and (side, edge_id) in edge_limits:
in_start = edge_limits[(side, edge_id)]
elif side == "out" and (side, edge_id) in edge_limits:
out_end = edge_limits[(side, edge_id)]
if os.path.isfile(cons_align_path.format(side_it[side],
side,
edge_id)):
cons_align = _read_alignment(
cons_align_path.format(side_it[side],
side,
edge_id),
template,
CONS_ALN_RATE)
if cons_align and cons_align[0]:
#collapse multiple consensus alignments
coll_cons_align = _collapse_cons_aln(cons_align)
if side == "in":
in_align = coll_cons_align
elif side == "out":
out_align = coll_cons_align
if not in_align:
in_start = 0
in_end = 0
temp_start = 0
#if in_start is None:
# in_start = 0
else:
in_start = in_align.qry_start
in_end = in_align.qry_end
temp_start = in_align.trg_end
#if in_start is None:
# in_start = in_align.qry_start
#f.write("CHECK: in qry {0} - {1} of {2}\n".format(in_align.qry_start,
# in_align.qry_end, in_align.qry_len))
#f.write("CHECK: in trg {0} - {1} of {2}\n".format(in_align.trg_start,
# in_align.trg_end, in_align.trg_len))
if not out_align:
temp_end = 0
out_start = 0
out_end = 0
#if out_end is None:
# out_end = 0
out_qry_seq = ""
out_trg_seq = ""
out_trg_end = 0
out_qry_end = 0
else:
temp_end = out_align.trg_start
out_start = out_align.qry_start
out_end = out_align.qry_end
#if out_end is None:
# out_end = out_align.qry_end
out_qry_seq = out_align.qry_seq
out_trg_seq = out_align.trg_seq
out_trg_end = out_align.trg_end
out_qry_end = out_align.qry_end
#f.write("CHECK: out qry {0} - {1} of {2}\n".format(out_align.qry_start,
# out_align.qry_end, out_align.qry_len))
#f.write("CHECK: out trg {0} - {1} of {2}\n".format(out_align.trg_start,
# out_align.trg_end, out_align.trg_len))
f.write("Alignment Indices:\n")
f.write("{0:10}\t{1:5} - {2:5}\n".format("in",
in_start, in_end))
#f.write("{0:10}\t{1:5} - {2:5}\n".format("Template",
#temp_start,
#temp_end))
f.write("{0:10}\t{1:5} - {2:5}\n".format("out",
out_start, out_end))
#Report gap/overlap length
gap_len = temp_end - temp_start
if gap_len >= 0:
f.write("{0}\t{1}\n".format("Gap between edges:", gap_len))
else:
f.write("{0}\t{1}\n\n".format("Overlap between edges:",
-gap_len))
#in sequence used to represent overlapping segment
#print check of overlapping segment
new_temp_end = temp_start
new_out_start = None
_, out_aln_qry = _index_mapping(out_qry_seq)
out_trg_aln, _ = _index_mapping(out_trg_seq)
in_edge = edge_pair[0][1]
out_edge = edge_pair[1][1]
if temp_start >= out_trg_end:
#f.write("CHECK, unhelpful case, temp_start {0}\n".format(temp_start))
new_out_start = out_qry_end
else:
#f.write("CHECK: temp_start {0}, len(out_trg_aln) {1}\n".format(temp_start, len(out_trg_aln)))
temp_trg_start = temp_start - temp_end
if temp_trg_start < len(out_trg_aln):
out_aln_ind = out_trg_aln[temp_trg_start]
#f.write("CHECK: out_aln_ind {0}, len(out_aln_qry) {1}\n".format(out_aln_ind, len(out_aln_qry)))
if out_aln_ind < len(out_aln_qry):
new_out_start = (out_start +
out_aln_qry[out_aln_ind])
#f.write("CHECK: new_out_start {0}\n".format(new_out_start))
#_check_overlap(
# consensuses[(side_it["in"], "in", in_edge)],
# template,
# consensuses[(side_it["out"], "out", out_edge)],
# -gap_len, in_start, in_end, temp_start, temp_end,
# out_start, out_end,
# new_out_start, in_align.qry_seq, in_align.trg_seq,
# out_align.qry_seq, out_align.trg_seq, out_trg_aln,
# out_aln_trg, out_qry_aln, out_aln_qry,
# out_align.trg_end, out_align.qry_end,
# in_align, out_align)
temp_end = new_temp_end
if new_out_start:
out_start = new_out_start
f.write("Adjusted Alignment Indices:\n")
f.write("{0:10}\t{1:5} - {2:5}\n".format("in",
in_start, in_end))
if temp_start != new_temp_end:
f.write("{0:10}\t{1:5} - {2:5}\n".format("Template",
temp_start,
new_temp_end))
f.write("{0:10}\t{1:5} - {2:5}\n\n\n".format("out",
new_out_start,
out_end))
in_edge = edge_pair[0][1]
out_edge = edge_pair[1][1]
#header = "_".join(["Repeat_{0}".format(rep),
# "bridged_copy_{0}".format(i),
# "in_{0}_{1}_{2}".format(in_edge,
# in_start,
# in_end),
# "template_{0}_{1}".format(temp_start,
# temp_end),
# "out_{0}_{1}_{2}".format(out_edge,
# out_start,
# out_end)])
header = "repeat_{0}_path_{1}_{2}".format(rep, in_edge, out_edge)
copy_seq = ""
if side_it["in"] > 0 and side_it["out"] > 0:
copy_seq = _construct_repeat_copy(
consensuses[(side_it["in"], "in", in_edge)],
template,
consensuses[(side_it["out"], "out", out_edge)],
in_start, in_end,
temp_start, temp_end,
out_start, out_end)
resolved_repeats[header] = copy_seq
if copy_seq:
seq_dict = {header:copy_seq}
fp.write_fasta_dict(seq_dict,
resolved_seq_file.format(rep, i))
#in_str = "".join(["in", str(in_edge)])
#out_str = "".join(["out", str(out_edge)])
#summ_resolution.append("|".join([in_str, out_str]))
summ_resolution.append("{0},{1}".format(in_edge,out_edge))
resolved_sequences.append(header)
#summ_vals.extend(["+".join(summ_resolution)])
summ_vals.append(":".join(summ_resolution))
summ_vals.append(":".join(resolved_sequences))
return bridged, resolved_repeats, summ_vals
def int_stats_postscript(rep, repeat_edges, integrated_stats,
resolved_rep_path, res_vs_res):
CONS_ALN_RATE = trestle_config.vals["cons_aln_rate"]
divs = []
with open(integrated_stats, "a") as f:
res_inds = list(range(len(repeat_edges[rep]["in"])))
f.write("Resolved Repeat Sequence Alignments\n")
for res_one, res_two in sorted(combinations(res_inds, 2)):
qry_start = 0
qry_end = 0
qry_len = 0
trg_start = 0
trg_end = 0
trg_len = 0
qry_seq = ""
trg_seq = ""
if os.path.isfile(res_vs_res.format(rep, res_one, res_two) and
resolved_rep_path.format(rep, res_two)):
res_align = _read_alignment(res_vs_res.format(rep, res_one,
res_two),
resolved_rep_path.format(rep,
res_two),
CONS_ALN_RATE)
if res_align and res_align[0]:
qry_start = res_align[0][0].qry_start
qry_end = res_align[0][0].qry_end
qry_len = res_align[0][0].qry_len
trg_start = res_align[0][0].trg_start
trg_end = res_align[0][0].trg_end
trg_len = res_align[0][0].trg_len
qry_seq = res_align[0][0].qry_seq
trg_seq = res_align[0][0].trg_seq
f.write("Copy {0}|Copy {1}\n".format(res_one, res_two))
f.write("{0}{1}{2:16}\t{3:5}-{4:5} of {5:5}\n".format(
"Copy ", res_one, ":",
qry_start, qry_end, qry_len))
f.write("{0}{1}{2:16}\t{3:5}-{4:5} of {5:5}\n".format(
"Copy ", res_two, ":",
trg_start, trg_end, trg_len))
div_rate = _calculate_divergence(qry_seq, trg_seq)
divs.append(div_rate)
f.write("{0:26}\t{1:.4f}\n".format("Divergence Rate:", div_rate))
f.write("\n")
return _mean(divs)
def _get_partitioning_info(part_list, edges):
edge_reads = {edge:0 for edge in edges}
tied_reads = 0
unassigned_reads = 0
for _, status, edge, _, _, _ in part_list:
if status == "Partitioned" and edge != "NA":
edge_reads[int(edge)] += 1
elif status == "Tied":
tied_reads += 1
elif status == "None":
unassigned_reads += 1
else:
exception_str = "Unknown status {0} in partitioning file"
raise Exception(exception_str.format(status))
return edge_reads, tied_reads, unassigned_reads
def _calculate_divergence(qry_seq, trg_seq):
if not qry_seq or not trg_seq:
return 0.0
curr_del = 0
curr_ins = 0
match_count = 0
mis_count = 0
del_count = 0
ins_count = 0
for q, t in zip(qry_seq, trg_seq):
if q == t:
match_count += 1
if curr_del != 0:
del_count += 1
curr_del = 0
if curr_ins != 0:
ins_count += 1
curr_ins = 0
elif q == "-" and t != "-":
curr_del += 1
if curr_ins != 0:
ins_count += 1
curr_ins = 0
elif q != "-" and t == "-":
curr_ins += 1
if curr_del != 0:
del_count += 1
curr_del = 0
elif q != t:
mis_count += 1
if curr_del != 0:
del_count += 1
curr_del = 0
if curr_ins != 0:
ins_count += 1
curr_ins = 0
else:
raise Exception("No alignment conditions fit, {0} {1}".format(q, t))
if curr_del != 0:
del_count += 1
curr_del = 0
if curr_ins != 0:
ins_count += 1
curr_ins = 0
indel_sim_rate = 0.0
total = match_count + mis_count + del_count + ins_count
if total != 0:
indel_sim_rate = match_count / float(total)
return 1 - indel_sim_rate
def _n50(reads_file):
reads_dict = fp.read_sequence_dict(reads_file)
read_lengths = sorted([len(x) for x in reads_dict.values()], reverse=True)
summed_len = 0
n50 = 0
for l in read_lengths:
summed_len += l
if summed_len >= sum(read_lengths) // 2:
n50 = l
break
return n50
def _get_median(lst):
if not lst:
raise ValueError("_get_median() arg is an empty sequence")
sorted_list = sorted(lst)
if len(lst) % 2 == 1:
return sorted_list[len(lst) // 2]
else:
mid1 = sorted_list[(len(lst) // 2) - 1]
mid2 = sorted_list[(len(lst) // 2)]
return mid1 + mid2 // 2
def _integrate_confirmed_pos(all_in_pos, all_out_pos):
in_conf, in_rej, in_pos = all_in_pos
out_conf, out_rej, _ = all_out_pos
integrated_confirmed = {"total":[], "sub":[], "ins":[], "del":[]}
integrated_rejected = {"total":[], "sub":[], "ins":[], "del":[]}
for pos in sorted(in_pos["total"]):
for pos_type in in_conf:
if pos in in_conf[pos_type] or pos in out_conf[pos_type]:
integrated_confirmed[pos_type].append(pos)
elif pos in in_rej[pos_type] or pos in out_rej[pos_type]:
integrated_rejected[pos_type].append(pos)
return integrated_confirmed, integrated_rejected, in_pos
def _get_combos(in_list, out_list):
all_combos = []
for combo in _combo_helper(in_list, out_list):
all_combos.append(combo)
return all_combos
def _combo_helper(in_list, out_list):
if not in_list or not out_list:
yield []
return
else:
in1 = in_list[0]
for j in range(len(out_list)):
combo = (in1, out_list[j])
for rest in _combo_helper(in_list[1:],
out_list[:j] + out_list[j + 1:]):
yield [combo] + rest
def _get_aln_end(aln_start, aln_seq):
return aln_start+len(aln_seq.replace("-",""))
"""
def _check_overlap(in_file, temp_file, out_file, overlap, in_start, in_end,
temp_start, temp_end, out_start, out_end, new_out_start,
in_qry, in_trg, out_qry, out_trg, out_trg_aln, out_aln_trg,
out_qry_aln, out_aln_qry, out_trg_end, out_qry_end,
in_align, out_align):
in_dict = fp.read_sequence_dict(in_file)
in_seq = in_dict.values()[0]
temp_dict = fp.read_sequence_dict(temp_file)
temp_seq = temp_dict.values()[0]
out_dict = fp.read_sequence_dict(out_file)
out_seq = out_dict.values()[0]
for i in range(len(out_qry)/50-1, len(out_qry)/50+1):
aln_ind_st = i*50
aln_ind_end = (i+1)*50
if aln_ind_end > len(out_qry):
aln_ind_end = len(out_qry)
print 'ALN inds', aln_ind_st, aln_ind_end
qry_ind_st = out_aln_qry[aln_ind_st]
if aln_ind_end < len(out_aln_qry):
qry_ind_end = out_aln_qry[aln_ind_end]
else:
qry_ind_end = out_aln_qry[-1]
print 'QRY inds', qry_ind_st, qry_ind_end
trg_ind_st = out_aln_trg[aln_ind_st]
if aln_ind_end < len(out_aln_trg):
trg_ind_end = out_aln_trg[aln_ind_end]
else:
trg_ind_end = out_aln_trg[-1]
print 'TRG inds', trg_ind_st, trg_ind_end
print "TRG ALN", out_trg_aln[trg_ind_st:trg_ind_end]
print "ALN TRG", out_aln_trg[aln_ind_st:aln_ind_end]
print "QRY ALN", out_qry_aln[qry_ind_st:qry_ind_end]
print "ALN QRY", out_aln_qry[aln_ind_st:aln_ind_end]
print "QRY SEQ", out_qry[aln_ind_st:aln_ind_end]
print "TRG SEQ", out_trg[aln_ind_st:aln_ind_end]
print
print 'In end, in template end',in_end,temp_start
print 'AR In qry end',in_qry[-10:]
print 'AR In trg end',in_trg[-10:]
print 'Out old start, old end, new start, out template start', out_start,
print out_end, new_out_start, temp_end
print "Out_trg_end", out_trg_end
print "Out_qry_end", out_qry_end
print "In align qry inds", in_align.qry_start, in_align.qry_end,
print in_align.qry_len
print "In align trg inds", in_align.trg_start, in_align.trg_end,
print in_align.trg_len
print "Out align qry inds", out_align.qry_start, out_align.qry_end,
print out_align.qry_len
print "Out align trg inds", out_align.trg_start, out_align.trg_end,
print out_align.trg_len
print
print "Overlap:\t{0}".format(overlap)
print "In seq(-30 to end):\t{0}".format(in_seq[in_end-30:in_end])
temp_end_seq = temp_seq[temp_start-30:temp_start]
print "Template seq(-30 to end):\t{0}".format(temp_end_seq)
#print "Out seq:\t{0}".format(out_seq[out_start:out_end])
#print "AR In seq:\t{0}".format(in_seq[in_start-10:in_end+10])
#print "AR Template seq:\t{0}".format(temp_seq[temp_end:temp_start+10])
#print "AR Out seq:\t{0}".format(out_seq[out_start:out_end+10])
pre_new_out = out_seq[new_out_start-30:new_out_start]
post_new_out = out_seq[new_out_start:new_out_start+30]
print "New out seq(-30 to new start):\t{0}".format(pre_new_out)
print "New out seq(new_start to +30):\t{0}".format(post_new_out)
print
"""
def _construct_repeat_copy(in_file, temp_file, out_file, in_start, in_end,
temp_start, temp_end, out_start, out_end):
if (not os.path.isfile(in_file) or
not os.path.isfile(temp_file) or
not os.path.isfile(out_file)):
return ""
in_dict = fp.read_sequence_dict(in_file)
temp_dict = fp.read_sequence_dict(temp_file)
out_dict = fp.read_sequence_dict(out_file)
if not in_dict or not temp_dict or not out_dict:
return ""
in_seq = list(in_dict.values())[0]
temp_seq = list(temp_dict.values())[0]
out_seq = list(out_dict.values())[0]
seq = ''.join([in_seq[in_start:in_end],
temp_seq[temp_start:temp_end],
out_seq[out_start:out_end]])
return seq
def init_summary(summary_file):
with open(summary_file, "w") as f:
summ_header_labels = ["Repeat_ID", "Path", "Template", "Cov",
"#Conf_Pos", "Max_Pos_Gap", "Bridged?",
"Support", "Against", "Avg_Div", "Resolution",
"Sequences"]
#spaced_header = map("{:13}".format, summ_header_labels)
f.write(" ".join(["{:<13}".format(str(x)) for x in summ_header_labels]) + "\n")
def update_summary(summ_items, summary_file):
(rep_id, graph_path, template_len, avg_cov, summ_vals,
avg_div, both_resolved_present) = summ_items
(confirmed_pos, max_pos_gap, bridged,
support, against, resolution, sequences) = tuple(summ_vals)
avg_cov = "{:.4f}".format(avg_cov)
avg_div = "{:.4f}".format(avg_div)
graph_path = ",".join([str(p) for p in graph_path])
bridged = bridged and both_resolved_present
summ_out = [rep_id, graph_path, template_len, avg_cov, confirmed_pos,
max_pos_gap, bridged, support, against, avg_div, resolution,
sequences]
with open(summary_file, "a") as f:
f.write(" ".join(["{:<13}".format(str(x)) for x in summ_out]) + "\n")
def remove_unneeded_files(repeat_edges, rep, side_labels, side_it, orient_dir,
template, extended, pol_temp_dir, pol_ext_dir,
pre_edge_reads, pre_partitioning, pre_read_align,
partitioning, cons_align, cut_cons_align,
read_align, confirmed_pos_path, edge_reads,
cut_cons, polishing_dir, cons_vs_cons,
int_confirmed_path, repeat_reads, frequency_path,
alignment_file, num_pol_iters, iter_pairs):
add_dir_name = "additional_output"
add_dir = os.path.join(orient_dir, add_dir_name)
if not os.path.isdir(add_dir):
os.mkdir(add_dir)
pol_name = "polished_{0}.fasta".format(num_pol_iters)
pol_template = "polished_template.fasta"
pol_ext = "polished_extended.{0}.{1}.fasta"
pol_temp_file = os.path.join(pol_temp_dir, pol_name)
if os.path.exists(pol_temp_file):
os.rename(pol_temp_file, os.path.join(add_dir, pol_template))
for side in side_labels:
for edge_id in repeat_edges[rep][side]:
pol_ext_file = os.path.join(pol_ext_dir.format(side, edge_id),
pol_name)
if os.path.exists(pol_ext_file):
os.rename(pol_ext_file,
os.path.join(add_dir,
pol_ext.format(side, edge_id)))
files_to_remove = [template]
dirs_to_remove = [pol_temp_dir]
files_to_move = [repeat_reads, frequency_path, alignment_file]
if os.path.exists(pol_temp_dir):
for fil in os.listdir(pol_temp_dir):
files_to_remove.append(os.path.join(pol_temp_dir, fil))
for side in side_labels:
for edge_id in repeat_edges[rep][side]:
files_to_remove.append(extended.format(side, edge_id))
curr_pol_ext_dir = pol_ext_dir.format(side, edge_id)
dirs_to_remove.append(curr_pol_ext_dir)
if os.path.exists(curr_pol_ext_dir):
for fil in os.listdir(curr_pol_ext_dir):
files_to_remove.append(os.path.join(curr_pol_ext_dir, fil))
files_to_remove.append(pre_edge_reads.format(side, edge_id))
files_to_remove.append(pre_read_align.format(side, edge_id))
for it in range(1, side_it[side] + 1):
files_to_remove.append(cons_align.format(it, side, edge_id))
files_to_remove.append(read_align.format(it, side, edge_id))
files_to_remove.append(edge_reads.format(it, side, edge_id))
pol_cons = polishing_dir.format(it, side, edge_id)
dirs_to_remove.append(pol_cons)
if os.path.exists(pol_cons):
for fil in os.listdir(pol_cons):
files_to_remove.append(os.path.join(pol_cons, fil))
for it in range(1, side_it[side]):
files_to_remove.append(cut_cons_align.format(it, side, edge_id))
files_to_remove.append(cut_cons.format(it, side, edge_id))
it = side_it[side]
files_to_move.append(cut_cons_align.format(it, side, edge_id))
files_to_move.append(cut_cons.format(it, side, edge_id))
edge_pairs = sorted(combinations(repeat_edges[rep][side], 2))
for edge_one, edge_two in edge_pairs:
for it in range(1, side_it[side]):
cons_cons_file = cons_vs_cons.format(it, side, edge_one,
it, side, edge_two)
files_to_remove.append(cons_cons_file)
it = side_it[side]
cons_cons_file = cons_vs_cons.format(it, side, edge_one,
it, side, edge_two)
files_to_move.append(cons_cons_file)
files_to_remove.append(pre_partitioning.format(side))
for it in range(1, side_it[side]):
files_to_remove.append(partitioning.format(it, side))
files_to_remove.append(confirmed_pos_path.format(it, side))
for it in [0, side_it[side]]:
files_to_move.append(partitioning.format(it, side))
it = side_it[side]
files_to_move.append(confirmed_pos_path.format(it, side))
last_conf_pos = int_confirmed_path.format(side_it[side_labels[0]],
side_it[side_labels[1]])
for it1, it2 in iter_pairs:
curr_conf_pos = int_confirmed_path.format(it1, it2)
if curr_conf_pos != last_conf_pos:
files_to_remove.append(curr_conf_pos)
else:
files_to_move.append(curr_conf_pos)
for f in files_to_remove:
if os.path.exists(f):
os.remove(f)
for d in dirs_to_remove:
if os.path.exists(d):
os.rmdir(d)
for f in files_to_move:
if os.path.exists(f):
split_path = os.path.split(f)
new_file = os.path.join(split_path[0], add_dir_name, split_path[1])
os.rename(f, new_file)
def _mean(lst):
if not lst:
return 0
return sum(lst) / len(lst)
| fenderglass/ABruijn | flye/trestle/trestle.py | Python | bsd-3-clause | 138,871 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/perf/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os
import re
import sys
def _CommonChecks(input_api, output_api):
"""Performs common checks, which includes running pylint."""
results = []
old_sys_path = sys.path
try:
# Modules in tools/perf depend on telemetry.
sys.path = [os.path.join(os.pardir, 'telemetry')] + sys.path
results.extend(input_api.canned_checks.RunPylint(
input_api, output_api, black_list=[], pylintrc='pylintrc'))
results.extend(_CheckJson(input_api, output_api))
results.extend(_CheckWprShaFiles(input_api, output_api))
finally:
sys.path = old_sys_path
return results
def _CheckWprShaFiles(input_api, output_api):
"""Check whether the wpr sha files have matching URLs."""
from catapult_base import cloud_storage
results = []
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if not filename.endswith('wpr.sha1'):
continue
expected_hash = cloud_storage.ReadHash(filename)
is_wpr_file_uploaded = any(
cloud_storage.Exists(bucket, expected_hash)
for bucket in cloud_storage.BUCKET_ALIASES.itervalues())
if not is_wpr_file_uploaded:
wpr_filename = filename[:-5]
results.append(output_api.PresubmitError(
'The file matching %s is not in Cloud Storage yet.\n'
'You can upload your new WPR archive file with the command:\n'
'depot_tools/upload_to_google_storage.py --bucket '
'<Your pageset\'s bucket> %s.\nFor more info: see '
'http://www.chromium.org/developers/telemetry/'
'record_a_page_set#TOC-Upload-the-recording-to-Cloud-Storage' %
(filename, wpr_filename)))
return results
def _CheckJson(input_api, output_api):
"""Checks whether JSON files in this change can be parsed."""
for affected_file in input_api.AffectedFiles(include_deletes=False):
filename = affected_file.AbsoluteLocalPath()
if os.path.splitext(filename)[1] != '.json':
continue
try:
input_api.json.load(open(filename))
except ValueError:
return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]
return []
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def _IsBenchmarksModified(change):
"""Checks whether CL contains any modification to Telemetry benchmarks."""
for affected_file in change.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, _ = os.path.splitext(affected_file_path)
if (os.path.join('tools', 'perf', 'benchmarks') in file_path or
os.path.join('tools', 'perf', 'measurements') in file_path):
return True
return False
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds extra try bots list to the CL description in order to run
Telemetry benchmarks on Perf trybots in addtion to CQ trybots if the CL
contains any changes to Telemetry benchmarks.
"""
benchmarks_modified = _IsBenchmarksModified(change)
rietveld_obj = cl.RpcServer()
issue = cl.issue
original_description = rietveld_obj.get_description(issue)
if not benchmarks_modified or re.search(
r'^CQ_EXTRA_TRYBOTS=.*', original_description, re.M | re.I):
return []
results = []
bots = [
'linux_perf_bisect',
'mac_perf_bisect',
'win_perf_bisect',
'android_nexus5_perf_bisect'
]
bots = ['tryserver.chromium.perf:%s' % s for s in bots]
bots_string = ';'.join(bots)
description = original_description
description += '\nCQ_EXTRA_TRYBOTS=%s' % bots_string
results.append(output_api.PresubmitNotifyResult(
'Automatically added Perf trybots to run Telemetry benchmarks on CQ.'))
if description != original_description:
rietveld_obj.update_description(issue, description)
return results
| Just-D/chromium-1 | tools/perf/PRESUBMIT.py | Python | bsd-3-clause | 4,403 |
#!/usr/bin/env python
# -*- coding: utf-8 -*_
| fuzhouch/amberalertcn | server/amberalertcn/api/__init__.py | Python | bsd-3-clause | 47 |
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
return self.connection.escape(value)
def skip_default(self, field):
"""
MySQL doesn't accept default values for longtext and longblob
and implicitly treats these columns as nullable.
"""
return field.db_type(self.connection) in {'longtext', 'longblob'}
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
if self.skip_default(field) and field.default not in {None, NOT_PROVIDED}:
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
| cyaninc/django-mysql-pymysql | src/mysql_pymysql/schema.py | Python | bsd-3-clause | 2,073 |
#!/usr/bin/env python
###
# Copyright (c) 2002-2007 Systems in Motion
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
###
# This is an example from the Inventor Mentor,
# chapter 5, example 3.
#
# This example creates a TriangleStripSet. It creates
# a pennant-shaped flag.
#
import sys
from pivy.coin import *
from pivy.sogui import *
##############################################################
## CODE FOR The Inventor Mentor STARTS HERE
#
# Positions of all of the vertices:
#
vertexPositions = (
( 0, 12, 0 ), ( 0, 15, 0),
(2.1, 12.1, -.2 ), ( 2.1, 14.6, -.2),
( 4, 12.5, -.7 ), ( 4, 14.5, -.7),
(4.5, 12.6, -.8 ), ( 4.5, 14.4, -.8),
( 5, 12.7, -1 ), ( 5, 14.4, -1),
(4.5, 12.8, -1.4 ), ( 4.5, 14.6, -1.4),
( 4, 12.9, -1.6 ), ( 4, 14.8, -1.6),
(3.3, 12.9, -1.8 ), ( 3.3, 14.9, -1.8),
( 3, 13, -2.0 ), ( 3, 14.9, -2.0),
(3.3, 13.1, -2.2 ), ( 3.3, 15.0, -2.2),
( 4, 13.2, -2.5 ), ( 4, 15.0, -2.5),
( 6, 13.5, -2.2 ), ( 6, 14.8, -2.2),
( 8, 13.4, -2 ), ( 8, 14.6, -2),
( 10, 13.7, -1.8 ), ( 10, 14.4, -1.8),
( 12, 14, -1.3 ), ( 12, 14.5, -1.3),
( 15, 14.9, -1.2 ), ( 15, 15, -1.2),
(-.5, 15, 0 ), ( -.5, 0, 0), # the flagpole
( 0, 15, .5 ), ( 0, 0, .5),
( 0, 15, -.5 ), ( 0, 0, -.5),
(-.5, 15, 0 ), ( -.5, 0, 0)
)
# Number of vertices in each strip.
numVertices = (
32, # flag
8 # pole
)
# Colors for the 12 faces
colors = (
( .5, .5, 1 ), # purple flag
( .4, .4, .4 ), # grey flagpole
)
# set this variable to 0 if you want to use the other method
IV_STRICT = 1
# Routine to create a scene graph representing a pennant.
def makePennant():
result = SoSeparator()
# A shape hints tells the ordering of polygons.
# This insures double sided lighting.
myHints = SoShapeHints()
myHints.vertexOrdering = SoShapeHints.COUNTERCLOCKWISE
result.addChild(myHints)
if IV_STRICT:
# This is the preferred code for Inventor 2.1
# Using the new SoVertexProperty node is more efficient
myVertexProperty = SoVertexProperty()
# Define colors for the strips
for i in range(2):
myVertexProperty.orderedRGBA.set1Value(i, SbColor(colors[i]).getPackedValue())
myVertexProperty.materialBinding = SoMaterialBinding.PER_PART
# Define coordinates for vertices
myVertexProperty.vertex.setValues(0, 40, vertexPositions)
# Define the TriangleStripSet, made of two strips.
myStrips = SoTriangleStripSet()
myStrips.numVertices.setValues(0, 2, numVertices)
myStrips.vertexProperty = myVertexProperty
result.addChild(myStrips)
else:
# Define colors for the strips
myMaterials = SoMaterial()
myMaterials.diffuseColor.setValues(0, 2, colors)
result.addChild(myMaterials)
myMaterialBinding = SoMaterialBinding()
myMaterialBinding.value = SoMaterialBinding.PER_PART
result.addChild(myMaterialBinding)
# Define coordinates for vertices
myCoords = SoCoordinate3()
myCoords.point.setValues(0, 40, vertexPositions)
result.addChild(myCoords)
# Define the TriangleStripSet, made of two strips.
myStrips = SoTriangleStripSet()
myStrips.numVertices.setValues(0, 2, numVertices)
result.addChild(myStrips)
return result
## CODE FOR The Inventor Mentor ENDS HERE
##############################################################
def main():
# Initialize Inventor and Qt
myWindow = SoGui.init(sys.argv[0])
if myWindow == None: sys.exit(1)
root = makePennant()
myViewer = SoGuiExaminerViewer(myWindow)
myViewer.setSceneGraph(root)
myViewer.setTitle("Triangle Strip Set: Pennant")
myViewer.show()
myViewer.viewAll()
SoGui.show(myWindow)
SoGui.mainLoop()
if __name__ == "__main__":
main()
| bleepbloop/Pivy | examples/Mentor/05.3.TriangleStripSet.py | Python | isc | 4,616 |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Rafael Bodill http://github.com/rafi
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import requests
from beets import ui
from beets import dbcore
from beets import config
from beets import plugins
from beets.dbcore import types
API_URL = 'http://ws.audioscrobbler.com/2.0/'
class LastImportPlugin(plugins.BeetsPlugin):
def __init__(self):
super(LastImportPlugin, self).__init__()
config['lastfm'].add({
'user': '',
'api_key': '',
})
config['lastfm']['api_key'].redact = True
self.config.add({
'per_page': 500,
'retry_limit': 3,
})
self.item_types = {
'play_count': types.INTEGER,
}
def commands(self):
cmd = ui.Subcommand('lastimport', help='import last.fm play-count')
def func(lib, opts, args):
import_lastfm(lib, self._log)
cmd.func = func
return [cmd]
def import_lastfm(lib, log):
user = config['lastfm']['user'].get(unicode)
per_page = config['lastimport']['per_page'].get(int)
if not user:
raise ui.UserError('You must specify a user name for lastimport')
log.info('Fetching last.fm library for @{0}', user)
page_total = 1
page_current = 0
found_total = 0
unknown_total = 0
retry_limit = config['lastimport']['retry_limit'].get(int)
# Iterate through a yet to be known page total count
while page_current < page_total:
log.info('Querying page #{0}{1}...',
page_current + 1,
'/{}'.format(page_total) if page_total > 1 else '')
for retry in range(0, retry_limit):
page = fetch_tracks(user, page_current + 1, per_page)
if 'tracks' in page:
# Let us the reveal the holy total pages!
page_total = int(page['tracks']['@attr']['totalPages'])
if page_total < 1:
# It means nothing to us!
raise ui.UserError('Last.fm reported no data.')
track = page['tracks']['track']
found, unknown = process_tracks(lib, track, log)
found_total += found
unknown_total += unknown
break
else:
log.error('ERROR: unable to read page #{0}',
page_current + 1)
log.debug('API response: {}', page)
if retry < retry_limit:
log.info(
'Retrying page #{0}... ({1}/{2} retry)',
page_current + 1, retry + 1, retry_limit
)
else:
log.error('FAIL: unable to fetch page #{0}, ',
'tried {1} times', page_current, retry + 1)
page_current += 1
log.info('... done!')
log.info('finished processing {0} song pages', page_total)
log.info('{0} unknown play-counts', unknown_total)
log.info('{0} play-counts imported', found_total)
def fetch_tracks(user, page, limit):
return requests.get(API_URL, params={
'method': 'library.gettracks',
'user': user,
'api_key': plugins.LASTFM_KEY,
'page': bytes(page),
'limit': bytes(limit),
'format': 'json',
}).json()
def process_tracks(lib, tracks, log):
total = len(tracks)
total_found = 0
total_fails = 0
log.info('Received {0} tracks in this page, processing...', total)
for num in xrange(0, total):
song = ''
trackid = tracks[num]['mbid'].strip()
artist = tracks[num]['artist'].get('name', '').strip()
title = tracks[num]['name'].strip()
album = ''
if 'album' in tracks[num]:
album = tracks[num]['album'].get('name', '').strip()
log.debug(u'query: {0} - {1} ({2})', artist, title, album)
# First try to query by musicbrainz's trackid
if trackid:
song = lib.items(
dbcore.query.MatchQuery('mb_trackid', trackid)
).get()
# Otherwise try artist/title/album
if not song:
log.debug(u'no match for mb_trackid {0}, trying by '
u'artist/title/album', trackid)
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title),
dbcore.query.SubstringQuery('album', album)
])
song = lib.items(query).get()
# If not, try just artist/title
if not song:
log.debug(u'no album match, trying by artist/title')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title)
])
song = lib.items(query).get()
# Last resort, try just replacing to utf-8 quote
if not song:
title = title.replace("'", u'\u2019')
log.debug(u'no title match, trying utf-8 single quote')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title)
])
song = lib.items(query).get()
if song:
count = int(song.get('play_count', 0))
new_count = int(tracks[num]['playcount'])
log.debug(u'match: {0} - {1} ({2}) '
u'updating: play_count {3} => {4}',
song.artist, song.title, song.album, count, new_count)
song['play_count'] = new_count
song.store()
total_found += 1
else:
total_fails += 1
log.info(u' - No match: {0} - {1} ({2})',
artist, title, album)
if total_fails > 0:
log.info('Acquired {0}/{1} play-counts ({2} unknown)',
total_found, total, total_fails)
return total_found, total_fails
| kareemallen/beets | beetsplug/lastimport.py | Python | mit | 6,738 |
from wagtailmenus.utils.version import get_version, get_stable_branch_name
# major.minor.patch.release.number
# release must be one of alpha, beta, rc, or final
VERSION = (3, 1, 0, "alpha", 0)
__version__ = get_version(VERSION)
stable_branch_name = get_stable_branch_name(VERSION)
default_app_config = "wagtailmenus.apps.WagtailMenusConfig"
def get_main_menu_model_string():
"""
Get the dotted ``app.Model`` name for the main menu model as a string.
Useful for developers extending wagtailmenus, that need to refer to the
main menu model (such as in foreign keys), but the model itself is not
required.
"""
from wagtailmenus.conf import settings
return settings.MAIN_MENU_MODEL
def get_flat_menu_model_string():
"""
Get the dotted ``app.Model`` name for the flat menu model as a string.
Useful for developers extending wagtailmenus, that need to refer to the
flat menu model (such as in foreign keys), but the model itself is not
required.
"""
from wagtailmenus.conf import settings
return settings.FLAT_MENU_MODEL
def get_main_menu_model():
"""
Get the model from the ``WAGTAILMENUS_MAIN_MENU_MODEL`` setting.
Useful for developers extending wagtailmenus, and need the actual model.
Defaults to the standard :class:`~wagtailmenus.models.MainMenu` model
if no custom model is defined.
"""
from wagtailmenus.conf import settings
return settings.models.MAIN_MENU_MODEL
def get_flat_menu_model():
"""
Get the model from the ``WAGTAILMENUS_FLAT_MENU_MODEL`` setting.
Useful for developers extending wagtailmenus, and need to the actual model.
Defaults to the standard :class:`~wagtailmenus.models.FlatMenu` model
if no custom model is defined.
"""
from wagtailmenus.conf import settings
return settings.models.FLAT_MENU_MODEL
| rkhleics/wagtailmenus | wagtailmenus/__init__.py | Python | mit | 1,864 |
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import collections
import math
import multiprocessing
import os
import sys
import Image
import Qtrac
Result = collections.namedtuple("Result", "todo copied scaled name")
def main():
size, smooth, source, target, concurrency = handle_commandline()
Qtrac.report("starting...")
canceled = False
try:
scale(size, smooth, source, target, concurrency)
except KeyboardInterrupt:
Qtrac.report("canceling...")
canceled = True
summarize(concurrency, canceled)
def handle_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--concurrency", type=int,
default=multiprocessing.cpu_count(),
help="specify the concurrency (for debugging and "
"timing) [default: %(default)d]")
parser.add_argument("-s", "--size", default=400, type=int,
help="make a scaled image that fits the given dimension "
"[default: %(default)d]")
parser.add_argument("-S", "--smooth", action="store_true",
help="use smooth scaling (slow but good for text)")
parser.add_argument("source",
help="the directory containing the original .xpm images")
parser.add_argument("target",
help="the directory for the scaled .xpm images")
args = parser.parse_args()
source = os.path.abspath(args.source)
target = os.path.abspath(args.target)
if source == target:
args.error("source and target must be different")
if not os.path.exists(args.target):
os.makedirs(target)
return args.size, args.smooth, source, target, args.concurrency
def scale(size, smooth, source, target, concurrency):
pipeline = create_pipeline(size, smooth, concurrency)
for i, (sourceImage, targetImage) in enumerate(
get_jobs(source, target)):
pipeline.send((sourceImage, targetImage, i % concurrency))
def create_pipeline(size, smooth, concurrency):
pipeline = None
sink = results()
for who in range(concurrency):
pipeline = scaler(pipeline, sink, size, smooth, who)
return pipeline
def get_jobs(source, target):
for name in os.listdir(source):
yield os.path.join(source, name), os.path.join(target, name)
@Qtrac.coroutine
def scaler(receiver, sink, size, smooth, me):
while True:
sourceImage, targetImage, who = (yield)
if who == me:
try:
result = scale_one(size, smooth, sourceImage, targetImage)
sink.send(result)
except Image.Error as err:
Qtrac.report(str(err), True)
elif receiver is not None:
receiver.send((sourceImage, targetImage, who))
@Qtrac.coroutine
def results():
while True:
result = (yield)
results.todo += result.todo
results.copied += result.copied
results.scaled += result.scaled
Qtrac.report("{} {}".format("copied" if result.copied else "scaled",
os.path.basename(result.name)))
results.todo = results.copied = results.scaled = 0
def scale_one(size, smooth, sourceImage, targetImage):
oldImage = Image.from_file(sourceImage)
if oldImage.width <= size and oldImage.height <= size:
oldImage.save(targetImage)
return Result(1, 1, 0, targetImage)
else:
if smooth:
scale = min(size / oldImage.width, size / oldImage.height)
newImage = oldImage.scale(scale)
else:
stride = int(math.ceil(max(oldImage.width / size,
oldImage.height / size)))
newImage = oldImage.subsample(stride)
newImage.save(targetImage)
return Result(1, 0, 1, targetImage)
def summarize(concurrency, canceled):
message = "copied {} scaled {} ".format(results.copied, results.scaled)
difference = results.todo - (results.copied + results.scaled)
if difference:
message += "skipped {} ".format(difference)
message += "using {} coroutines".format(concurrency)
if canceled:
message += " [canceled]"
Qtrac.report(message)
print()
if __name__ == "__main__":
main()
| nwiizo/workspace_2017 | pipng/imagescale-c.py | Python | mit | 4,916 |
import sys
import cStringIO
import OpenSSL
from libpathod import pathod, version
from netlib import tcp, http
from netlib.exceptions import HttpException, TlsException
import tutils
class TestPathod(object):
def test_logging(self):
s = cStringIO.StringIO()
p = pathod.Pathod(("127.0.0.1", 0), logfp=s)
assert len(p.get_log()) == 0
id = p.add_log(dict(s="foo"))
assert p.log_by_id(id)
assert len(p.get_log()) == 1
p.clear_log()
assert len(p.get_log()) == 0
for _ in range(p.LOGBUF + 1):
p.add_log(dict(s="foo"))
assert len(p.get_log()) <= p.LOGBUF
class TestNoWeb(tutils.DaemonTests):
noweb = True
def test_noweb(self):
assert self.get("200:da").status_code == 200
assert self.getpath("/").status_code == 800
class TestTimeout(tutils.DaemonTests):
timeout = 0.01
def test_noweb(self):
# FIXME: Add float values to spec language, reduce test timeout to
# increase test performance
# This is a bodge - we have some platform difference that causes
# different exceptions to be raised here.
tutils.raises(Exception, self.pathoc, ["get:/:p1,1"])
assert self.d.last_log()["type"] == "timeout"
class TestNoApi(tutils.DaemonTests):
noapi = True
def test_noapi(self):
assert self.getpath("/log").status_code == 404
r = self.getpath("/")
assert r.status_code == 200
assert not "Log" in r.content
class TestNotAfterConnect(tutils.DaemonTests):
ssl = False
ssloptions = dict(
not_after_connect=True
)
def test_connect(self):
r, _ = self.pathoc(
[r"get:'http://foo.com/p/202':da"],
connect_to=("localhost", self.d.port)
)
assert r[0].status_code == 202
class TestCustomCert(tutils.DaemonTests):
ssl = True
ssloptions = dict(
certs=[("*", tutils.test_data.path("data/testkey.pem"))],
)
def test_connect(self):
r, _ = self.pathoc([r"get:/p/202"])
r = r[0]
assert r.status_code == 202
assert r.sslinfo
assert "test.com" in str(r.sslinfo.certchain[0].get_subject())
class TestSSLCN(tutils.DaemonTests):
ssl = True
ssloptions = dict(
cn="foo.com"
)
def test_connect(self):
r, _ = self.pathoc([r"get:/p/202"])
r = r[0]
assert r.status_code == 202
assert r.sslinfo
assert r.sslinfo.certchain[0].get_subject().CN == "foo.com"
class TestNohang(tutils.DaemonTests):
nohang = True
def test_nohang(self):
r = self.get("200:p0,0")
assert r.status_code == 800
l = self.d.last_log()
assert "Pauses have been disabled" in l["response"]["msg"]
class TestHexdump(tutils.DaemonTests):
hexdump = True
def test_hexdump(self):
r = self.get(r"200:b'\xf0'")
class TestNocraft(tutils.DaemonTests):
nocraft = True
def test_nocraft(self):
r = self.get(r"200:b'\xf0'")
assert r.status_code == 800
assert "Crafting disabled" in r.content
class CommonTests(tutils.DaemonTests):
def test_binarydata(self):
r = self.get(r"200:b'\xf0'")
l = self.d.last_log()
# FIXME: Other binary data elements
def test_sizelimit(self):
r = self.get("200:b@1g")
assert r.status_code == 800
l = self.d.last_log()
assert "too large" in l["response"]["msg"]
def test_preline(self):
r, _ = self.pathoc([r"get:'/p/200':i0,'\r\n'"])
assert r[0].status_code == 200
def test_info(self):
assert tuple(self.d.info()["version"]) == version.IVERSION
def test_logs(self):
assert self.d.clear_log()
assert not self.d.last_log()
rsp = self.get("202:da")
assert len(self.d.log()) == 1
assert self.d.clear_log()
assert len(self.d.log()) == 0
def test_disconnect(self):
rsp = self.get("202:b@100k:d200")
assert len(rsp.content) < 200
def test_parserr(self):
rsp = self.get("400:msg,b:")
assert rsp.status_code == 800
def test_static(self):
rsp = self.get("200:b<file")
assert rsp.status_code == 200
assert rsp.content.strip() == "testfile"
def test_anchor(self):
rsp = self.getpath("anchor/foo")
assert rsp.status_code == 202
def test_invalid_first_line(self):
c = tcp.TCPClient(("localhost", self.d.port))
c.connect()
if self.ssl:
c.convert_to_ssl()
c.wfile.write("foo\n\n\n")
c.wfile.flush()
l = self.d.last_log()
assert l["type"] == "error"
assert "foo" in l["msg"]
def test_invalid_content_length(self):
tutils.raises(
HttpException,
self.pathoc,
["get:/:h'content-length'='foo'"]
)
l = self.d.last_log()
assert l["type"] == "error"
assert "Unparseable Content Length" in l["msg"]
def test_invalid_headers(self):
tutils.raises(HttpException, self.pathoc, ["get:/:h'\t'='foo'"])
l = self.d.last_log()
assert l["type"] == "error"
assert "Invalid headers" in l["msg"]
def test_access_denied(self):
rsp = self.get("=nonexistent")
assert rsp.status_code == 800
def test_source_access_denied(self):
rsp = self.get("200:b</foo")
assert rsp.status_code == 800
assert "File access denied" in rsp.content
def test_proxy(self):
r, _ = self.pathoc([r"get:'http://foo.com/p/202':da"])
assert r[0].status_code == 202
def test_websocket(self):
r, _ = self.pathoc(["ws:/p/"], ws_read_limit=0)
assert r[0].status_code == 101
r, _ = self.pathoc(["ws:/p/ws"], ws_read_limit=0)
assert r[0].status_code == 101
def test_websocket_frame(self):
r, _ = self.pathoc(
["ws:/p/", "wf:f'wf:b\"test\"':pa,1"],
ws_read_limit=1
)
assert r[1].payload == "test"
def test_websocket_frame_reflect_error(self):
r, _ = self.pathoc(
["ws:/p/", "wf:-mask:knone:f'wf:b@10':i13,'a'"],
ws_read_limit=1,
timeout=1
)
# FIXME: Race Condition?
assert "Parse error" in self.d.text_log()
def test_websocket_frame_disconnect_error(self):
self.pathoc(["ws:/p/", "wf:b@10:d3"], ws_read_limit=0)
assert self.d.last_log()
class TestDaemon(CommonTests):
ssl = False
def test_connect(self):
r, _ = self.pathoc(
[r"get:'http://foo.com/p/202':da"],
connect_to=("localhost", self.d.port),
ssl=True
)
assert r[0].status_code == 202
def test_connect_err(self):
tutils.raises(
HttpException,
self.pathoc,
[r"get:'http://foo.com/p/202':da"],
connect_to=("localhost", self.d.port)
)
class TestDaemonSSL(CommonTests):
ssl = True
def test_ssl_conn_failure(self):
c = tcp.TCPClient(("localhost", self.d.port))
c.rbufsize = 0
c.wbufsize = 0
c.connect()
c.wfile.write("\0\0\0\0")
tutils.raises(TlsException, c.convert_to_ssl)
l = self.d.last_log()
assert l["type"] == "error"
assert "SSL" in l["msg"]
def test_ssl_cipher(self):
r, _ = self.pathoc([r"get:/p/202"])
assert r[0].status_code == 202
assert self.d.last_log()["cipher"][1] > 0
class TestHTTP2(tutils.DaemonTests):
ssl = True
noweb = True
noapi = True
nohang = True
if OpenSSL._util.lib.Cryptography_HAS_ALPN:
def test_http2(self):
r, _ = self.pathoc(["GET:/"], ssl=True, use_http2=True)
assert r[0].status_code == 800
| Kriechi/pathod | test/test_pathod.py | Python | mit | 7,878 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_boots_casual_12.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_boots_casual_12.py | Python | mit | 462 |
'''
split a file into two randomly, line by line.
Usage: split.py <input file> <output file 1> <output file 2> [<probability of writing to the first file>]'
'''
import csv
import sys
import random
try:
P = float( sys.argv[4] )
except IndexError:
P = 0.9
print "P = %s" % ( P )
input_file = sys.argv[1]
output_file1 = sys.argv[2]
output_file2 = sys.argv[3]
i = open( input_file )
o1 = open( output_file1, 'wb' )
o2 = open( output_file2, 'wb' )
reader = csv.reader( i )
writer1 = csv.writer( o1 )
writer2 = csv.writer( o2 )
#headers = reader.next()
#writer1.writerow( headers )
#writer2.writerow( headers )
counter = 0
for line in reader:
r = random.random()
if r > P:
writer2.writerow( line )
else:
writer1.writerow( line )
counter += 1
if counter % 100000 == 0:
print counter
| zygmuntz/kaggle-advertised-salaries | split.py | Python | mit | 825 |
from allauth.socialaccount.providers.base import AuthAction, ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class Scope(object):
ACCESS = 'read-only'
class YNABAccount(ProviderAccount):
pass
class YNABProvider(OAuth2Provider):
id = 'ynab'
name = 'YNAB'
account_class = YNABAccount
def get_default_scope(self):
scope = [Scope.ACCESS]
return scope
def get_auth_params(self, request, action):
ret = super(YNABProvider, self).get_auth_params(request,
action)
if action == AuthAction.REAUTHENTICATE:
ret['prompt'] = 'select_account consent'
return ret
def extract_uid(self, data):
return str(data['data']['user']['id'])
provider_classes = [YNABProvider]
| lukeburden/django-allauth | allauth/socialaccount/providers/ynab/provider.py | Python | mit | 852 |
#! /usr/bin/env python
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
from copy import deepcopy
from quex.core_engine.state_machine.core import *
import quex.core_engine.state_machine.nfa_to_dfa as nfa_to_dfa
import quex.core_engine.state_machine.hopcroft_minimization as hopcroft
def do(the_state_machine, pre_context_state_machine):
"""Sets up a pre-condition to the given state machine. This process
is entirely different from any sequentialization or paralellization
of state machines. Here, the state machine representing the pre-
condition ist **not** webbed into the original state machine!
Instead, the following happens:
-- the pre-condition state machine is inverted, because
it is to be walked through backwards.
-- the inverted state machine is marked with the state machine id
of the_state_machine.
-- the original state machine will refere to the inverse
state machine of the pre-condition.
-- the initial state origins and the origins of the acceptance
states are marked as 'pre-conditioned' indicating the id
of the inverted state machine of the pre-condition.
"""
#___________________________________________________________________________________________
# (*) do some consistency checking
assert the_state_machine.__class__.__name__ == "StateMachine"
assert pre_context_state_machine.__class__.__name__ == "StateMachine"
# -- state machines with no states are senseless here.
assert not the_state_machine.is_empty()
assert not pre_context_state_machine.is_empty()
# -- trivial pre-conditions should be added last, for simplicity
assert not the_state_machine.core().pre_context_begin_of_line_f(), \
"This function was not designed to deal with trivially pre-conditioned state machines." + \
"Please, make sure the trivial pre-conditioning happens *after* regular pre-conditions."
#___________________________________________________________________________________________
# (*) invert the state machine of the pre-condition
inverse_pre_context = pre_context_state_machine.get_inverse()
inverse_pre_context = nfa_to_dfa.do(inverse_pre_context)
inverse_pre_context = hopcroft.do(inverse_pre_context)
# (*) let the state machine refer to it
# [Is this necessary? Is it not enough that the acceptance origins point to it? <fschaef>]
the_state_machine.core().set_pre_context_sm(inverse_pre_context)
pre_context_sm_id = inverse_pre_context.get_id()
# (*) create origin data, in case where there is none yet create new one.
# (do not delete, otherwise existing information gets lost)
for state in the_state_machine.states.values():
if not state.is_acceptance(): continue
state.core().set_pre_context_id(pre_context_sm_id)
return the_state_machine
| grit-engine/grit-engine | dependencies/quex-0.34.1/quex/core_engine/state_machine/setup_pre_context.py | Python | mit | 3,013 |
e = .1
mean_list = base.List(self.get_theta(key="treatment"), base.Mean, ["control", "treatment"])
if np.random.binomial(1,e) == 1:
self.action["treatment"] = mean_list.random()
self.action["propensity"] = 0.1*0.5
else:
self.action["treatment"] = mean_list.max()
self.action["propensity"] = (1-e) | Nth-iteration-labs/streamingbandit | app/defaults/E-Greedy/get_action.py | Python | mit | 312 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 04 13:10:20 2015
@author: bayevskihk
"""
import sys
def main(argv):
number = len(argv)
data_addr = 0
last_addr = 0
if(number != 2):
return
else:
try:
data_addr = int(argv[0])
last_addr = int(argv[1])
except:
print ("Wrong arguments")
return
if(parse("build/obj/text.vh", "build/text.rom", data_addr) < 0):
print ("Wrong text file")
return
if(parse("build/obj/data.vh", "build/data.rom", last_addr - data_addr) < 0):
print ("Wrong text file")
return
print ("Convertion was successfull")
def parse(file_name, rom_name, addr_last):
hex_file = open(file_name, 'r')
rom_file = open(rom_name, 'w')
# rom_file.truncate()
hex_parts = hex_file.readline()
line = ""
try:
hex_parts.index("@")
except:
return -1
attached = 0
words = 0
rom_file.write("@00000000\n");
while(1):
hex_parts = hex_file.readline()
# hex_parts = hex_parts.translate({None: "\n"})
hex_parts = hex_parts.split();
if(len(hex_parts) < 4):
break
for part in hex_parts:
if(len(part) == 0):
continue
line += part
attached += 1
if(attached == 4):
attached = 0
words += 1
rom_file.write(line + "\n")
line = ""
for i in range(addr_last - words):
rom_file.write("00000000\n")
rom_file.close()
return 0
if __name__ == '__main__':
main(sys.argv[1:])
| chebykinn/university | circuitry/lab4/convert.py | Python | mit | 1,703 |
import conv
import tools
from ..api.clss import api
from ..sql.clss import sql
from pandas import DataFrame
import time as tm
class data(object):
def __init__(self):
self.a = api()
self.s = sql()
self.jobs = []
self.trd = DataFrame()
self.prc = DataFrame()
def add_trades(self, exchange, symbol, limit='', since='',
auto_since='no', ping_limit=1.0):
job = {'exchange':exchange,'symbol':symbol}
self.a.add_job(exchange, symbol, 'trades', limit=limit, since=since,
auto_since=auto_since, ping_limit=ping_limit)
self.jobs.append(job)
def get_trades(self, exchange='', symbol='', start=''):
trd = self.s.select('trades',exchange=exchange,
symbol=symbol,start=start)
self.trd = self.trd.append(trd)
self.trd = self.trd.drop_duplicates(['tid','exchange'])
def run_trades(self, exchange, symbol):
self.trd = self.trd.append(self.a.run(exchange,symbol,'trades'))
self.trd = self.trd.drop_duplicates(['tid','exchange'])
def run_loop(self, time, to_sql=60, log='no'):
dump = tm.time() + to_sql
end = tm.time() + time
while tm.time() < end:
for job in self.jobs:
self.run_trades(job['exchange'], job['symbol'])
if tm.time() > dump:
dump = tm.time() + to_sql
self.to_sql(log)
def get_price(self, exchange='', symbol='',
freq='', start=''):
prc = self.s.select('price',exchange=exchange,symbol=symbol,
freq=freq, start=start)
self.prc = self.prc.append(prc)
self.prc = self.prc.drop_duplicates(['timestamp','exchange',
'symbol','freq'])
return prc
def run_price(self, exchange, symbol, freq, label='left',
from_sql='no', start=''):
if from_sql == 'yes':
self.get_trades(exchange, symbol, start=start)
# get_trades already applied exchange, symbol checks
trd = self.trd
else:
trd = self.trd
if exchange <> '':
trd = self.trd[self.trd.exchange==exchange]
if symbol <> '':
trd = self.trd[self.trd.symbol==symbol]
trd = tools.date_index(trd)
if len(trd.index) > 0:
prc = conv.olhcv(trd, freq, label=label)
self.prc = self.prc.append(prc)
self.prc = self.prc.drop_duplicates(['timestamp','exchange',
'symbol','freq'])
def to_sql(self, log='no'):
if 'sent' in self.trd:
trd = self.trd[self.trd['sent']<>'yes']
else:
trd = self.trd
if 'sent' in self.prc:
prc = self.prc[self.prc['sent']<>'yes']
else:
prc = self.prc
self.s.insert('trades', trd)
self.s.insert('price', prc)
if log == 'yes':
print trd
print prc
self.trd['sent'] = 'yes'
self.prc['sent'] = 'yes'
| rosspalmer/bitQuant | bitquant/data/clss.py | Python | mit | 3,127 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
A commandline tool for parsing RDF in different formats and serializing the
resulting graph to a chosen format.
"""
import sys
from optparse import OptionParser
import logging
import rdflib
from rdflib import plugin
from rdflib.store import Store
from rdflib.graph import Graph
from rdflib.namespace import Namespace, RDF, RDFS, OWL, XSD
from rdflib.parser import Parser
from rdflib.serializer import Serializer
from rdfextras.tools.pathutils import guess_format
STORE_CONNECTION = ''
STORE_TYPE = 'IOMemory'
DEFAULT_INPUT_FORMAT = 'xml'
DEFAULT_OUTPUT_FORMAT = 'n3'
NS_BINDINGS = {
'rdf': RDF,
'rdfs': RDFS,
'owl': OWL,
'xsd': XSD,
'dc': "http://purl.org/dc/elements/1.1/",
'dct': "http://purl.org/dc/terms/",
'foaf': "http://xmlns.com/foaf/0.1/",
'wot': "http://xmlns.com/wot/0.1/"
}
def parse_and_serialize(input_files, input_format, guess,
outfile, output_format, ns_bindings,
store_conn=STORE_CONNECTION, store_type=STORE_TYPE):
store = plugin.get(store_type, Store)()
store.open(store_conn)
graph = Graph(store)
for prefix, uri in ns_bindings.items():
graph.namespace_manager.bind(prefix, uri, override=False)
for fpath in input_files:
use_format, kws = _format_and_kws(input_format)
if fpath == '-':
fpath = sys.stdin
elif not input_format and guess:
use_format = guess_format(fpath) or DEFAULT_INPUT_FORMAT
graph.parse(fpath, format=use_format, **kws)
if outfile:
output_format, kws = _format_and_kws(output_format)
graph.serialize(destination=outfile, format=output_format, base=None, **kws)
store.rollback()
def _format_and_kws(fmt):
"""
>>> _format_and_kws("fmt")
('fmt', {})
>>> _format_and_kws("fmt:+a")
('fmt', {'a': True})
>>> _format_and_kws("fmt:a")
('fmt', {'a': True})
>>> _format_and_kws("fmt:+a,-b")
('fmt', {'a': True, 'b': False})
>>> _format_and_kws("fmt:c=d")
('fmt', {'c': 'd'})
"""
fmt, kws = fmt, {}
if fmt and ':' in fmt:
fmt, kwrepr = fmt.split(':')
for kw in kwrepr.split(','):
if '=' in kw:
k, v = kw.split('=')
kws[k] = v
elif kw.startswith('-'):
kws[kw[1:]] = False
elif kw.startswith('+'):
kws[kw[1:]] = True
else: # same as "+"
kws[kw] = True
return fmt, kws
def make_option_parser():
parser_names = _get_plugin_names(Parser)
serializer_names = _get_plugin_names(Serializer)
kw_example = "FORMAT:(+)KW1,-KW2,KW3=VALUE"
oparser = OptionParser(
"%prog [-h] [-i INPUT_FORMAT] [-o OUTPUT_FORMAT] [--ns=PFX=NS ...] [-] [FILE ...]",
description=__doc__.strip() + (
" Reads file system paths, URLs or from stdin if '-' is given."
" The result is serialized to stdout."),
version="%prog " + "(using rdflib %s)" % rdflib.__version__)
oparser.add_option('-i', '--input-format',
type=str, #default=DEFAULT_INPUT_FORMAT,
help="Format of the input document(s)."
" Available input formats are: %s." % parser_names +
" If no format is given, it will be guessed from the file name extension."
" Keywords to parser can be given after format like: %s." % kw_example
,
metavar="INPUT_FORMAT")
oparser.add_option('-o', '--output-format',
type=str, default=DEFAULT_OUTPUT_FORMAT,
help="Format of the graph serialization."
" Available output formats are: %s."
% serializer_names +
" Default format is: '%default'." +
" Keywords to serializer can be given after format like: %s." % kw_example
,
metavar="OUTPUT_FORMAT")
oparser.add_option('--ns',
action="append", type=str,
help="Register a namespace binding (QName prefix to a base URI). "
"This can be used more than once.",
metavar="PREFIX=NAMESPACE")
oparser.add_option('--no-guess', dest='guess',
action='store_false', default=True,
help="Don't guess format based on file suffix.")
oparser.add_option('--no-out',
action='store_true', default=False,
help="Don't output the resulting graph (useful for checking validity of input).")
oparser.add_option('-w', '--warn',
action='store_true', default=False,
help="Output warnings to stderr (by default only critical errors).")
return oparser
_get_plugin_names = lambda kind: ", ".join(p.name for p in plugin.plugins(kind=kind))
def main():
oparser = make_option_parser()
opts, args = oparser.parse_args()
if len(args) < 1:
oparser.print_usage()
oparser.exit()
if opts.warn:
loglevel = logging.WARNING
else:
loglevel = logging.CRITICAL
logging.basicConfig(level=loglevel)
ns_bindings = dict(NS_BINDINGS)
if opts.ns:
for ns_kw in opts.ns:
pfx, uri = ns_kw.split('=')
ns_bindings[pfx] = uri
outfile = sys.stdout
if opts.no_out:
outfile = None
parse_and_serialize(args, opts.input_format, opts.guess,
outfile, opts.output_format, ns_bindings)
if __name__ == "__main__":
main()
| bhavanaananda/DataStage | test/RDFDatabank/rdfextras/tools/rdfpipe.py | Python | mit | 5,524 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Delete record tests."""
from __future__ import absolute_import, print_function
from flask import url_for
from helpers import get_json, record_url
from invenio_pidstore.models import PersistentIdentifier
from mock import patch
from sqlalchemy.exc import SQLAlchemyError
def test_valid_delete(app, indexed_records):
"""Test VALID record delete request (DELETE .../records/<record_id>)."""
# Test with and without headers
for i, headers in enumerate([[], [('Accept', 'video/mp4')]]):
pid, record = indexed_records[i]
with app.test_client() as client:
res = client.delete(record_url(pid), headers=headers)
assert res.status_code == 204
res = client.get(record_url(pid))
assert res.status_code == 410
def test_delete_deleted(app, indexed_records):
"""Test deleting a perviously deleted record."""
pid, record = indexed_records[0]
with app.test_client() as client:
res = client.delete(record_url(pid))
assert res.status_code == 204
res = client.delete(record_url(pid))
assert res.status_code == 410
data = get_json(res)
assert 'message' in data
assert data['status'] == 410
def test_delete_notfound(app, indexed_records):
"""Test INVALID record delete request (DELETE .../records/<record_id>)."""
with app.test_client() as client:
# Check that GET with non existing id will return 404
res = client.delete(url_for(
'invenio_records_rest.recid_item', pid_value=0))
assert res.status_code == 404
def test_delete_with_sqldatabase_error(app, indexed_records):
"""Test VALID record delete request (GET .../records/<record_id>)."""
pid, record = indexed_records[0]
with app.test_client() as client:
def raise_error():
raise SQLAlchemyError()
# Force an SQLAlchemy error that will rollback the transaction.
with patch.object(PersistentIdentifier, 'delete',
side_effect=raise_error):
res = client.delete(record_url(pid))
assert res.status_code == 500
with app.test_client() as client:
res = client.get(record_url(pid))
assert res.status_code == 200
| tiborsimko/invenio-records-rest | tests/test_views_item_delete.py | Python | mit | 2,501 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentMSOX92804A(agilent90000):
"Agilent Infiniium MSOX92804A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSOX92804A')
super(agilentMSOX92804A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 28e9
self._init_channels()
| alexforencich/python-ivi | ivi/agilent/agilentMSOX92804A.py | Python | mit | 1,692 |
from django.contrib.auth import authenticate, get_user_model
from graphene import AbstractType, relay, Field, String, ObjectType, Union, List
from users.jwt_schema import TokensSuccess
from users.jwt_util import get_jwt_token
from users.schema.definitions import Viewer
class Error(ObjectType):
"""Form Errors
https://medium.com/@tarkus/validation-and-user-errors-in-graphql-mutations-39ca79cd00bf#.ts99uxfnr
"""
key = String()
message = String(required=True)
class FormErrors(ObjectType):
"""Form Errors
https://medium.com/@tarkus/validation-and-user-errors-in-graphql-mutations-39ca79cd00bf#.ts99uxfnr
"""
errors = List(Error)
class AuthFormUnion(Union):
"""Returns either token error or token success"""
class Meta:
types = (Viewer, FormErrors)
class LoginMutation(relay.ClientIDMutation):
class Input:
email = String(required=True)
password = String(required=True)
auth_form_payload = Field(AuthFormUnion)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
email = input.get('email')
password = input.get('password')
user_exists = get_user_model().objects.filter(email=email)
errors = []
if not user_exists:
error = Error(
key='email',
message='A user with this email doesn\'t exist.')
errors.append(error)
return LoginMutation(FormErrors(errors))
user_password_correct = user_exists[0].check_password(password)
if not user_password_correct:
error = Error(key='password', message='Password is incorrect')
errors.append(error)
return LoginMutation(FormErrors(errors))
user = authenticate(username=email, password=password)
jwt_token = get_jwt_token(user)
if user and jwt_token:
tokens = TokensSuccess(
jwt_token
)
viewer = Viewer(
user=user,
tokens=tokens
)
return LoginMutation(viewer)
class SignupUserMutation(relay.ClientIDMutation):
class Input:
email = String(required=True)
password = String(required=True)
auth_form_payload = Field(AuthFormUnion)
@classmethod
def mutate_and_get_payload(cls, input, context, info):
email = input.get('email')
password = input.get('password')
user = get_user_model().objects.filter(email=email)
errors = []
if not user:
user = get_user_model().objects.create_user(email=email, password=password)
jwt_token = get_jwt_token(user)
token = TokensSuccess(
token=jwt_token
)
viewer = Viewer(
user=user,
tokens=token
)
return SignupUserMutation(viewer)
if user:
error = Error(
key='email',
message='A user with this email already exists.')
errors.append(error)
return SignupUserMutation(FormErrors(errors))
class UserMutations(AbstractType):
login = LoginMutation.Field()
signup = SignupUserMutation.Field()
| ncrmro/reango | server/users/schema/mutations.py | Python | mit | 3,252 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_blacksun_medium_s03_tier2.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/ship/shared_blacksun_medium_s03_tier2.py | Python | mit | 422 |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
max_input_power=1000,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=64,
subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-1,
learning_rate_changes_by_iteration={
1000: 1e-2,
2000: 1e-3
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
def exp_a(name):
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
net.load_params(2000)
return net
def exp_b(name):
# tanh and softplus output
# sane inits for other layers
# just large weights for first layer, sane biases
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Normal(std=1/sqrt(50))
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
# tanh and softplus output
# sane inits for other layers
# just large biases for first layer, sane weights
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'b': Uniform(25),
'W': Normal(std=1/sqrt(50))
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# tanh and softplus output
# sane inits for other layers
# batch norm
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': identity,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': identity,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh, # need nonlinearity for hid_to_hid
'learn_init': False,
'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': identity
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'W': Normal(std=1/sqrt(50))
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': BatchNormLayer,
'nonlinearity': tanh,
'axes': (0, 1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# like a but with max power = 5900W
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=5900
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
# like a but with max power = 5900W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=5900
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
net.load_params(1000)
return net
def exp_g(name):
# like a but with max power = 1000W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=1000
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
# like a but with max power = 5900W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=5900,
skip_probability=0.9
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
# like a but with max power = 1000W and 5 appliances
# tanh and softplus output
# sane inits for other layers
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
standardise_targets=True,
unit_variance_targets=True,
max_input_power=1000,
skip_probability=0.9
))
source_dict_copy['appliances'] = [
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
]
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
loss_function=lambda x, t: mse(x, t).mean(),
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
2000: 1e-5
}
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50)),
'b': Normal(std=1/sqrt(50))
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(50))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 80,
'W_in_to_hid': Normal(std=1/sqrt(50)),
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': False,
'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('fghi')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| JackKelly/neuralnilm_prototype | scripts/e405.py | Python | mit | 27,430 |
# coding=utf-8
import json
import re
import responses
import pytest
import mapbox
def test_geocoder_default_name():
"""Default name is set"""
geocoder = mapbox.Geocoder()
assert geocoder.name == 'mapbox.places'
def test_geocoder_name():
"""Named dataset name is set"""
geocoder = mapbox.Geocoder('mapbox.places-permanent')
assert geocoder.name == 'mapbox.places-permanent'
def _check_coordinate_precision(coord, precision):
"""Coordinate precision is <= specified number of digits"""
if '.' not in coord:
return True
else:
return len(coord.split('.')[-1]) <= precision
@responses.activate
def test_geocoder_forward():
"""Forward geocoding works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').forward('1600 pennsylvania ave nw')
assert response.status_code == 200
assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
@responses.activate
def test_geocoder_forward_geojson():
"""Forward geocoding .geojson method works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').forward('1600 pennsylvania ave nw')
assert response.status_code == 200
assert response.geojson() == response.json()
@responses.activate
def test_geocoder_reverse():
"""Reverse geocoding works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?access_token=pk.test'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').reverse(lon=lon, lat=lat)
assert response.status_code == 200
assert response.json()['query'] == [lon, lat]
@responses.activate
def test_geocoder_reverse_geojson():
"""Reverse geocoding geojson works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?access_token=pk.test'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(access_token='pk.test').reverse(lon=lon, lat=lat)
assert response.status_code == 200
assert response.geojson() == response.json()
def test_geocoder_place_types():
"""Place types are enumerated"""
assert sorted(mapbox.Geocoder().place_types.items()) == [
('address', "A street address with house number. Examples: 1600 Pennsylvania Ave NW, 1051 Market St, Oberbaumstrasse 7."),
('country', "Sovereign states and other political entities. Examples: United States, France, China, Russia."),
('district', "Second order administrative division. Only used when necessary. Examples: Tianjin, Beijing"),
('locality', "A smaller area within a place that possesses official status and boundaries. Examples: Oakleigh (Melbourne)"),
('neighborhood', 'A smaller area within a place, often without formal boundaries. Examples: Montparnasse, Downtown, Haight-Ashbury.'),
('place', "City, town, village or other municipality relevant to a country's address or postal system. Examples: Cleveland, Saratoga Springs, Berlin, Paris."),
('poi', "Places of interest including commercial venues, major landmarks, parks, and other features. Examples: Subway Restaurant, Yosemite National Park, Statue of Liberty."),
('poi.landmark', "Places of interest that are particularly notable or long-lived like parks, places of worship and museums. A strict subset of the poi place type. Examples: Yosemite National Park, Statue of Liberty."),
('postcode', "Postal code, varies by a country's postal system. Examples: 20009, CR0 3RL."),
('region', "First order administrative divisions within a country, usually provinces or states. Examples: California, Ontario, Essonne.")]
def test_validate_country_codes_err():
try:
mapbox.Geocoder()._validate_country_codes(('us', 'bogus'))
except mapbox.InvalidCountryCodeError as err:
assert str(err) == "bogus"
def test_validate_country():
assert mapbox.Geocoder()._validate_country_codes(
('us', 'br')) == {'country': 'us,br'}
def test_validate_place_types_err():
try:
mapbox.Geocoder()._validate_place_types(('address', 'bogus'))
except mapbox.InvalidPlaceTypeError as err:
assert str(err) == "bogus"
def test_validate_place_types():
assert mapbox.Geocoder()._validate_place_types(
('address', 'poi')) == {'types': 'address,poi'}
@responses.activate
def test_geocoder_forward_types():
"""Type filtering of forward geocoding works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?types=address,country,place,poi.landmark,postcode,region&access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'1600 pennsylvania ave nw',
types=('address', 'country', 'place', 'poi.landmark', 'postcode', 'region'))
assert response.status_code == 200
assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
@responses.activate
def test_geocoder_reverse_types():
"""Type filtering of reverse geocoding works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?types=address,country,place,poi.landmark,postcode,region&access_token=pk.test'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').reverse(
lon=lon, lat=lat,
types=('address', 'country', 'place', 'poi.landmark', 'postcode', 'region'))
assert response.status_code == 200
assert response.json()['query'] == [lon, lat]
@responses.activate
def test_geocoder_forward_proximity():
"""Proximity parameter works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?proximity=0.0,0.0&access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'1600 pennsylvania ave nw', lon=0, lat=0)
assert response.status_code == 200
assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
@responses.activate
def test_geocoder_proximity_rounding():
"""Proximity parameter is rounded to 3 decimal places"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json',
match_querystring=False,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'1600 pennsylvania ave nw', lon=0.123456, lat=0.987654)
# check coordinate precision for proximity flag
match = re.search(r'[&\?]proximity=([^&$]+)', response.url)
assert match is not None
for coord in re.split(r'(%2C|,)', match.group(1)):
assert _check_coordinate_precision(coord, 3)
@responses.activate
def test_geocoder_forward_bbox():
"""Bbox parameter works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/washington.json?bbox=-78.3284%2C38.6039%2C-78.0428%2C38.7841&access_token=pk.test',
match_querystring=True,
body='{"query": ["washington"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'washington', bbox=(-78.3284,38.6039,-78.0428,38.7841))
assert response.status_code == 200
assert response.json()['query'] == ["washington"]
@responses.activate
def test_geocoder_forward_limit():
"""Limit parameter works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/washington.json?limit=3&access_token=pk.test',
match_querystring=True,
body='{"query": ["washington"], "features": [1, 2, 3]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward(
'washington', limit=3)
assert response.status_code == 200
assert len(response.json()['features']) == 3
@responses.activate
def test_geocoder_reverse_limit():
"""Limit parameter works"""
lon, lat = -77.4371, 37.5227
body = json.dumps({"query": [lon, lat],
"features": [{'name': 'place'}]})
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/{0},{1}.json?access_token=pk.test&limit=1&types=place'.format(lon, lat),
match_querystring=True,
body=body,
status=200,
content_type='application/json')
service = mapbox.Geocoder(access_token='pk.test')
response = service.reverse(lon=lon, lat=lat, limit=1, types=['place'])
assert response.status_code == 200
assert len(response.json()['features']) == 1
@responses.activate
def test_geocoder_reverse_limit_requires_onetype():
"""Limit requires a single type"""
lon, lat = -77.123456789, 37.987654321
service = mapbox.Geocoder(access_token='pk.test')
with pytest.raises(mapbox.InvalidPlaceTypeError):
service.reverse(lon=lon, lat=lat, limit=1)
with pytest.raises(mapbox.InvalidPlaceTypeError):
service.reverse(lon=lon, lat=lat, limit=1, types=['places', 'country'])
@responses.activate
def test_geocoder_reverse_rounding():
"""Reverse geocoding parameters are rounded to 5 decimal places"""
lon, lat = -77.123456789, 37.987654321
body = json.dumps({"query": [lon, lat]})
responses.add(
responses.GET,
re.compile('https:\/\/api\.mapbox\.com\/geocoding\/v5\/mapbox\.places\/.+\.json'),
match_querystring=False,
body=body,
status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').reverse(
lon=lon, lat=lat)
# check coordinate precision for reverse geocoding coordinates
match = re.search(r'\/([\-\d\.\,]+)\.json', response.url)
assert match is not None
for coord in re.split(r'(%2C|,)', match.group(1)):
assert _check_coordinate_precision(coord, 5)
@responses.activate
def test_geocoder_unicode():
"""Forward geocoding works with non-ascii inputs
Specifically, the URITemplate needs to utf-8 encode all inputs
"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/Florian%C3%B3polis%2C%20Brazil.json?access_token=pk.test',
match_querystring=True,
body='{}', status=200,
content_type='application/json')
query = "Florianópolis, Brazil"
try:
query = query.decode('utf-8') # Python 2
except:
pass # Python 3
response = mapbox.Geocoder(access_token='pk.test').forward(query)
assert response.status_code == 200
@responses.activate
def test_geocoder_forward_country():
"""Country parameter of forward geocoding works"""
responses.add(
responses.GET,
'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?country=us&access_token=pk.test',
match_querystring=True,
body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
content_type='application/json')
response = mapbox.Geocoder(
access_token='pk.test').forward('1600 pennsylvania ave nw', country=['us'])
assert response.status_code == 200
| ravik/mapbox-baseSDK | tests/test_geocoder.py | Python | mit | 12,916 |
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the bitcoin network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 15714)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 25714)
g.write('#endif\n')
if __name__ == '__main__':
main()
| TheAltcoinBoard/XAB-withoutSecp256k1 | share/seeds/generate-seeds.py | Python | mit | 4,318 |
def saveData(X, f_out, colfmt='%i'):
'''
Quick alias for saving data matricies. If X and f_out are tuples,
this function will save multiple matricies at once.
'''
import numpy as np
if isinstance(X, tuple):
assert(len(X) == len(f_out))
for idx,Z in enumerate(X):
np.savetxt(f_out[idx], Z, delimiter=',', fmt=colfmt)
else:
np.savetxt(f_out, X, delimiter=',', fmt=colfmt)
| mattdelhey/kaggle-galaxy | saveData.py | Python | mit | 440 |
"""Zwave util methods."""
import logging
from . import const
_LOGGER = logging.getLogger(__name__)
def check_node_schema(node, schema):
"""Check if node matches the passed node schema."""
if (const.DISC_NODE_ID in schema and
node.node_id not in schema[const.DISC_NODE_ID]):
_LOGGER.debug("node.node_id %s not in node_id %s",
node.node_id, schema[const.DISC_NODE_ID])
return False
if (const.DISC_GENERIC_DEVICE_CLASS in schema and
node.generic not in schema[const.DISC_GENERIC_DEVICE_CLASS]):
_LOGGER.debug("node.generic %s not in generic_device_class %s",
node.generic, schema[const.DISC_GENERIC_DEVICE_CLASS])
return False
if (const.DISC_SPECIFIC_DEVICE_CLASS in schema and
node.specific not in schema[const.DISC_SPECIFIC_DEVICE_CLASS]):
_LOGGER.debug("node.specific %s not in specific_device_class %s",
node.specific, schema[const.DISC_SPECIFIC_DEVICE_CLASS])
return False
return True
def check_value_schema(value, schema):
"""Check if the value matches the passed value schema."""
if (const.DISC_COMMAND_CLASS in schema and
value.command_class not in schema[const.DISC_COMMAND_CLASS]):
_LOGGER.debug("value.command_class %s not in command_class %s",
value.command_class, schema[const.DISC_COMMAND_CLASS])
return False
if (const.DISC_TYPE in schema and
value.type not in schema[const.DISC_TYPE]):
_LOGGER.debug("value.type %s not in type %s",
value.type, schema[const.DISC_TYPE])
return False
if (const.DISC_GENRE in schema and
value.genre not in schema[const.DISC_GENRE]):
_LOGGER.debug("value.genre %s not in genre %s",
value.genre, schema[const.DISC_GENRE])
return False
if (const.DISC_READONLY in schema and
value.is_read_only is not schema[const.DISC_READONLY]):
_LOGGER.debug("value.is_read_only %s not %s",
value.is_read_only, schema[const.DISC_READONLY])
return False
if (const.DISC_WRITEONLY in schema and
value.is_write_only is not schema[const.DISC_WRITEONLY]):
_LOGGER.debug("value.is_write_only %s not %s",
value.is_write_only, schema[const.DISC_WRITEONLY])
return False
if (const.DISC_LABEL in schema and
value.label not in schema[const.DISC_LABEL]):
_LOGGER.debug("value.label %s not in label %s",
value.label, schema[const.DISC_LABEL])
return False
if (const.DISC_INDEX in schema and
value.index not in schema[const.DISC_INDEX]):
_LOGGER.debug("value.index %s not in index %s",
value.index, schema[const.DISC_INDEX])
return False
if (const.DISC_INSTANCE in schema and
value.instance not in schema[const.DISC_INSTANCE]):
_LOGGER.debug("value.instance %s not in instance %s",
value.instance, schema[const.DISC_INSTANCE])
return False
return True
| miniconfig/home-assistant | homeassistant/components/zwave/util.py | Python | mit | 3,178 |
import glob, csv, re, shutil, mustache, time
import numpy as np
oddsfile = list(sorted(glob.glob('raw/odds*.csv')))[-1]
timestamp = re.search('s(.*?)\.', oddsfile).group(1)
with open(oddsfile) as infile:
reader = csv.reader(infile)
header = reader.next()
teams = [row for row in reader]
fixed = []
for team in teams:
t = team[0:2]
for odd in team[2:]:
if odd:
o = float(odd)
# betdaq lists some impossible odds. WTF?
if o < 1: o = 1.
t.append(o)
fixed.append(t)
teams = fixed
summary = []
for team in teams:
odds = team[2:]
try:
max_ = max(odds)
except ValueError:
#nobody is offering odds on this team, they're eliminated, skip them
continue
min_ = min(odds)
mean = np.mean(odds)
median = np.median(odds)
summary.append(team[:2] + [max_, min_, mean, median])
summaryfile = "raw/summary%s.csv" % timestamp
with file(summaryfile, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['name', 'group', 'max', 'min', 'mean', 'median'])
for row in summary:
w.writerow(row)
shutil.copy2(summaryfile, "summary.csv")
last_updated = time.strftime("%b %d %Y %H:%M")
context = {"last_updated": last_updated}
out = mustache.render(file("index.mustache.html").read(), context)
file("index.html", 'w').write(out)
| llimllib/champsleagueviz | europa/stats.py | Python | mit | 1,359 |
# -*- coding: utf-8 -*-
""" S3 Profile
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from gluon.html import *
from gluon.http import redirect
from gluon.storage import Storage
from s3crud import S3CRUD
from s3data import S3DataList
from s3resource import S3FieldSelector
# =============================================================================
class S3Profile(S3CRUD):
"""
Interactive Method Handler for Profile Pages
Configure widgets using s3db.configure(tablename, profile_widgets=[])
@ToDo: Make more configurable:
* Currently assumes a max of 2 widgets per row
* Currently uses Bootstrap classes
* Currently uses internal widgets rather than S3Method widgets
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
API entry point
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.http in ("GET", "POST", "DELETE"):
if r.record:
output = self.profile(r, **attr)
else:
# Redirect to the List View
redirect(r.url(method=""))
else:
r.error(405, r.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def profile(self, r, **attr):
"""
Generate a Profile page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
tablename = self.tablename
get_config = current.s3db.get_config
# Page Title
title = get_config(tablename, "profile_title")
if not title:
try:
title = r.record.name
except:
title = current.T("Profile Page")
# Page Header
header = get_config(tablename, "profile_header")
if not header:
header = H2(title, _class="profile_header")
output = dict(title=title,
header=header)
# Get the page widgets
widgets = get_config(tablename, "profile_widgets")
# Index the widgets by their position in the config
for index, widget in enumerate(widgets):
widget["index"] = index
if r.representation == "dl":
# Ajax-update of one datalist
get_vars = r.get_vars
index = r.get_vars.get("update", None)
if index:
try:
index = int(index)
except ValueError:
datalist = ""
else:
# @ToDo: Check permissions to the Resource & do something different if no permission
datalist = self._datalist(r, widgets[index], **attr)
output["item"] = datalist
else:
# Default page-load
rows = []
if widgets:
append = rows.append
odd = True
for widget in widgets:
w_type = widget["type"]
if odd:
row = DIV(_class="row profile")
colspan = widget.get("colspan", 1)
if w_type == "map":
row.append(self._map(r, widget, **attr))
if colspan == 2:
append(row)
elif w_type == "comments":
row.append(self._comments(r, widget, **attr))
if colspan == 2:
append(row)
elif w_type == "datalist":
row.append(self._datalist(r, widget, **attr))
if colspan == 2:
append(row)
else:
raise
if odd:
odd = False
else:
odd = True
append(row)
else:
# Method not supported for this resource
# @ToDo Some kind of 'Page not Configured'?
r.error(405, r.ERROR.BAD_METHOD)
output["rows"] = rows
current.response.view = self._view(r, "profile.html")
return output
# -------------------------------------------------------------------------
@staticmethod
def _resolve_context(context, id):
"""
Resolve a context filter
@param context: the context (as a string)
@param id: the record_id
"""
if context == "location":
# Show records linked to this Location & all it's Child Locations
s = "(location)$path"
# This version doesn't serialize_url
#m = ("%(id)s/*,*/%(id)s/*" % dict(id=id)).split(",")
#filter = (S3FieldSelector(s).like(m)) | (S3FieldSelector(s) == id)
m = ("%(id)s,%(id)s/*,*/%(id)s/*,*/%(id)s" % dict(id=id)).split(",")
m = [f.replace("*", "%") for f in m]
filter = S3FieldSelector(s).like(m)
# @ToDo:
#elif context == "organisation":
# # Show records linked to this Organisation and all it's Branches
# s = "(%s)" % context
# filter = S3FieldSelector(s) == id
else:
# Normal: show just records linked directly to this master resource
s = "(%s)" % context
filter = S3FieldSelector(s) == id
return filter
# -------------------------------------------------------------------------
def _comments(self, r, widget, **attr):
"""
Generate a Comments widget
@param r: the S3Request instance
@param widget: the widget as a tuple: (label, type, icon)
@param attr: controller attributes for the request
@ToDo: Configurable to use either Disqus or internal Comments
"""
label = widget.get("label", "")
if label:
label = current.T(label)
icon = widget.get("icon", "")
if icon:
icon = TAG[""](I(_class=icon), " ")
# Render the widget
output = DIV(H4(icon,
label,
_class="profile-sub-header"),
DIV(_class="thumbnail"),
_class="span12")
return output
# -------------------------------------------------------------------------
def _datalist(self, r, widget, **attr):
"""
Generate a dataList
@param r: the S3Request instance
@param widget: the widget as a tuple: (label, tablename, icon, filter)
@param attr: controller attributes for the request
"""
T = current.T
s3db = current.s3db
id = r.id
context = widget.get("context", None)
if context:
context = self._resolve_context(context, id)
s3db.context = context
tablename = widget.get("tablename", None)
resource = s3db.resource(tablename, context=True)
table = resource.table
# Config Options:
# 1st choice: Widget
# 2nd choice: get_config
# 3rd choice: Default
config = resource.get_config
list_fields = widget.get("list_fields",
config("list_fields", None))
list_layout = widget.get("list_layout",
config("list_layout", None))
orderby = widget.get("orderby",
config("list_orderby",
~resource.table.created_on))
filter = widget.get("filter", None)
if filter:
resource.add_filter(filter)
# Use the widget-index to create a unique ID
listid = "profile-list-%s-%s" % (tablename, widget["index"])
# Page size
pagesize = 4
representation = r.representation
if representation == "dl":
# Ajax-update
get_vars = r.get_vars
record_id = get_vars.get("record", None)
if record_id is not None:
# Ajax-update of a single record
resource.add_filter(S3FieldSelector("id") == record_id)
start, limit = 0, 1
else:
# Ajax-update of full page
start = get_vars.get("start", None)
limit = get_vars.get("limit", None)
if limit is not None:
try:
start = int(start)
limit = int(limit)
except ValueError:
start, limit = 0, pagesize
else:
start = None
else:
# Page-load
start, limit = 0, pagesize
# Ajax-delete items?
if representation == "dl" and r.http in ("DELETE", "POST"):
if "delete" in r.get_vars:
return self._dl_ajax_delete(r, resource)
else:
r.error(405, r.ERROR.BAD_METHOD)
# dataList
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=start,
limit=limit,
listid=listid,
orderby=orderby,
layout=list_layout)
# Render the list
ajaxurl = r.url(vars={"update": widget["index"]},
representation="dl")
data = datalist.html(ajaxurl=ajaxurl,
pagesize=pagesize
)
if numrows == 0:
msg = P(I(_class="icon-folder-open-alt"),
BR(),
S3CRUD.crud_string(tablename,
"msg_no_match"),
_class="empty_card-holder")
data.insert(1, msg)
if representation == "dl":
# This is an Ajax-request, so we don't need the wrapper
current.response.view = "plain.html"
return data
# Interactive only below here
label = widget.get("label", "")
if label:
label = T(label)
icon = widget.get("icon", "")
if icon:
icon = TAG[""](I(_class=icon), " ")
# Permission to create new items?
insert = widget.get("insert", True)
if insert and current.auth.s3_has_permission("create", table):
#if r.tablename = "org_organisation":
# @ToDo: Special check for creating resources on Organisation profile
if filter:
vars = filter.serialize_url(filter)
else:
vars = Storage()
vars.refresh = listid
if context:
filters = context.serialize_url(resource)
for f in filters:
vars[f] = filters[f]
default = widget.get("default", None)
if default:
k, v = default.split("=", 1)
vars[k] = v
title_create = widget.get("title_create", None)
if title_create:
title_create = T(title_create)
else:
title_create = S3CRUD.crud_string(tablename, "title_create")
c, f = tablename.split("_", 1)
c = widget.get("create_controller", c)
f = widget.get("create_function", f)
create = A(I(_class="icon icon-plus-sign small-add"),
_href=URL(c=c, f=f, args=["create.popup"], vars=vars),
_class="s3_modal",
_title=title_create,
)
else:
create = ""
if numrows > pagesize:
# Button to display the rest of the records in a Modal
more = numrows - pagesize
vars = {}
if context:
filters = context.serialize_url(resource)
for f in filters:
vars[f] = filters[f]
if filter:
filters = filter.serialize_url(resource)
for f in filters:
vars[f] = filters[f]
c, f = tablename.split("_", 1)
url = URL(c=c, f=f, args=["datalist.popup"],
vars=vars)
more = DIV(A(BUTTON("%s (%s)" % (T("see more"), more),
_class="btn btn-mini",
_type="button",
),
_class="s3_modal",
_href=url,
_title=label,
),
_class="more_profile")
else:
more = ""
# Render the widget
output = DIV(create,
H4(icon,
label,
_class="profile-sub-header"),
DIV(data,
more,
_class="card-holder"),
_class="span6")
return output
# -------------------------------------------------------------------------
def _map(self, r, widget, **attr):
"""
Generate a Map widget
@param r: the S3Request instance
@param widget: the widget as a tuple: (label, type, icon)
@param attr: controller attributes for the request
"""
from s3gis import Marker
T = current.T
db = current.db
s3db = current.s3db
label = widget.get("label", "")
if label:
label = current.T(label)
icon = widget.get("icon", "")
if icon:
icon = TAG[""](I(_class=icon), " ")
context = widget.get("context", None)
if context:
context = self._resolve_context(context, r.id)
cserialize_url = context.serialize_url
height = widget.get("height", 383)
width = widget.get("width", 568) # span6 * 99.7%
bbox = widget.get("bbox", {})
# Default to showing all the resources in datalist widgets as separate layers
ftable = s3db.gis_layer_feature
mtable = s3db.gis_marker
feature_resources = []
fappend = feature_resources.append
widgets = s3db.get_config(r.tablename, "profile_widgets")
s3dbresource = s3db.resource
for widget in widgets:
if widget["type"] != "datalist":
continue
show_on_map = widget.get("show_on_map", True)
if not show_on_map:
continue
# @ToDo: Check permission to access layer (both controller/function & also within Map Config)
tablename = widget["tablename"]
listid = "profile-list-%s-%s" % (tablename, widget["index"])
layer = dict(name = T(widget["label"]),
id = listid,
active = True,
)
filter = widget.get("filter", None)
marker = widget.get("marker", None)
if marker:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1)).first()
layer_id = None
layer_name = widget.get("layer", None)
if layer_name:
row = db(ftable.name == layer_name).select(ftable.layer_id,
limitby=(0, 1)).first()
if row:
layer_id = row.layer_id
if layer_id:
layer["layer_id"] = layer_id
resource = s3dbresource(tablename)
filter_url = ""
first = True
if context:
filters = cserialize_url(resource)
for f in filters:
sep = "" if first else "&"
filter_url = "%s%s%s=%s" % (filter_url, sep, f, filters[f])
first = False
if filter:
filters = filter.serialize_url(resource)
for f in filters:
sep = "" if first else "&"
filter_url = "%s%s%s=%s" % (filter_url, sep, f, filters[f])
first = False
if filter_url:
layer["filter"] = filter_url
else:
layer["tablename"] = tablename
map_url = widget.get("map_url", None)
if not map_url:
# Build one
c, f = tablename.split("_", 1)
map_url = URL(c=c, f=f, extension="geojson")
resource = s3dbresource(tablename)
first = True
if context:
filters = cserialize_url(resource)
for f in filters:
sep = "?" if first else "&"
map_url = "%s%s%s=%s" % (map_url, sep, f, filters[f])
first = False
if filter:
filters = filter.serialize_url(resource)
for f in filters:
sep = "?" if first else "&"
map_url = "%s%s%s=%s" % (map_url, sep, f, filters[f])
first = False
layer["url"] = map_url
if marker:
layer["marker"] = marker
fappend(layer)
map = current.gis.show_map(height=height,
width=width,
bbox=bbox,
collapsed=True,
feature_resources=feature_resources,
)
# Button to go full-screen
fullscreen = A(I(_class="icon icon-fullscreen"),
_href=URL(c="gis", f="map_viewing_client"),
_class="gis_fullscreen_map-btn",
# If we need to support multiple maps on a page
#_map="default",
_title=T("View full screen"),
)
s3 = current.response.s3
if s3.debug:
script = "/%s/static/scripts/S3/s3.gis.fullscreen.js" % current.request.application
else:
script = "/%s/static/scripts/S3/s3.gis.fullscreen.min.js" % current.request.application
s3.scripts.append(script)
# Render the widget
output = DIV(fullscreen,
H4(icon,
label,
_class="profile-sub-header"),
DIV(map,
_class="card-holder"),
_class="span6")
return output
# END =========================================================================
| sahildua2305/eden | modules/s3/s3profile.py | Python | mit | 20,693 |
import string
from universe import error
from universe.vncdriver import constants
class VNCEvent(object):
pass
def keycode(key):
if key in constants.KEYMAP:
return constants.KEYMAP.get(key)
elif len(key) == 1:
return ord(key)
else:
raise error.Error('Not sure how to translate to keycode: {!r}'.format(key))
class KeyEvent(VNCEvent):
_keysym_to_name = {}
for key, value in constants.KEYMAP.items():
_keysym_to_name[value] = key
for c in string.printable:
_keysym_to_name[ord(c)] = c
@classmethod
def build(cls, keys, down=None):
"""Build a key combination, such as:
ctrl-t
"""
codes = []
for key in keys.split('-'):
key = keycode(key)
codes.append(key)
events = []
if down is None or down:
for code in codes:
events.append(cls(code, down=True))
if down is None or not down:
for code in reversed(codes):
events.append(cls(code, down=False))
return events
@classmethod
def by_name(cls, key, down=None):
return cls(keycode(key), down=down)
def __init__(self, key, down=True):
# TODO: validate key
self.key = key
self.down = bool(down)
def compile(self):
return 'KeyEvent', self.key, self.down
def __repr__(self):
if self.down:
direction = 'down'
else:
direction = 'up'
name = self._keysym_to_name.get(self.key)
if not name:
name = '0x{:x}'.format(self.key)
else:
name = '{} (0x{:x})'.format(name, self.key)
return 'KeyEvent<key={} direction={}>'.format(name, direction)
def __str__(self):
return repr(self)
def __hash__(self):
return (self.key, self.down).__hash__()
def __eq__(self, other):
return type(other) == type(self) and \
other.key == self.key and \
other.down == self.down
@property
def key_name(self):
"""Human readable name"""
return self._keysym_to_name.get(self.key)
class PointerEvent(VNCEvent):
def __init__(self, x, y, buttonmask=0):
self.x = x
self.y = y
self.buttonmask = buttonmask
def compile(self):
return 'PointerEvent', self.x, self.y, self.buttonmask
def __repr__(self):
return 'PointerEvent<x={} y={} buttonmask={}>'.format(self.x, self.y, self.buttonmask)
def __str__(self):
return repr(self)
| rht/universe | universe/spaces/vnc_event.py | Python | mit | 2,568 |
import os
import logging
import decimal
import base64
import json
from datetime import datetime
from lib import config, util, util_bitcoin
ASSET_MAX_RETRY = 3
D = decimal.Decimal
def parse_issuance(db, message, cur_block_index, cur_block):
if message['status'] != 'valid':
return
def modify_extended_asset_info(asset, description):
"""adds an asset to asset_extended_info collection if the description is a valid json link. or, if the link
is not a valid json link, will remove the asset entry from the table if it exists"""
if util.is_valid_url(description, suffix='.json', allow_no_protocol=True):
db.asset_extended_info.update({'asset': asset},
{'$set': {
'info_url': description,
'info_status': 'needfetch',
'fetch_info_retry': 0, # retry ASSET_MAX_RETRY times to fetch info from info_url
'info_data': {},
'errors': []
}}, upsert=True)
#^ valid info_status settings: needfetch, valid, invalid, error
#additional fields will be added later in events, once the asset info is pulled
else:
db.asset_extended_info.remove({ 'asset': asset })
#remove any saved asset image data
imagePath = os.path.join(config.DATA_DIR, config.SUBDIR_ASSET_IMAGES, asset + '.png')
if os.path.exists(imagePath):
os.remove(imagePath)
tracked_asset = db.tracked_assets.find_one(
{'asset': message['asset']}, {'_id': 0, '_history': 0})
#^ pulls the tracked asset without the _id and history fields. This may be None
if message['locked']: #lock asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'locked',
'locked': True,
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Locking asset %s" % (message['asset'],))
elif message['transfer']: #transfer asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'transferred',
'owner': message['issuer'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Transferring asset %s to address %s" % (message['asset'], message['issuer']))
elif message['quantity'] == 0 and tracked_asset is not None: #change description
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'changed_description',
'description': message['description'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
modify_extended_asset_info(message['asset'], message['description'])
logging.info("Changing description for asset %s to '%s'" % (message['asset'], message['description']))
else: #issue new asset or issue addition qty of an asset
if not tracked_asset: #new issuance
tracked_asset = {
'_change_type': 'created',
'_at_block': cur_block_index, #the block ID this asset is current for
'_at_block_time': cur_block['block_time_obj'],
#^ NOTE: (if there are multiple asset tracked changes updates in a single block for the same
# asset, the last one with _at_block == that block id in the history array is the
# final version for that asset at that block
'asset': message['asset'],
'owner': message['issuer'],
'description': message['description'],
'divisible': message['divisible'],
'locked': False,
'total_issued': message['quantity'],
'total_issued_normalized': util_bitcoin.normalize_quantity(message['quantity'], message['divisible']),
'_history': [] #to allow for block rollbacks
}
db.tracked_assets.insert(tracked_asset)
logging.info("Tracking new asset: %s" % message['asset'])
modify_extended_asset_info(message['asset'], message['description'])
else: #issuing additional of existing asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'issued_more',
},
"$inc": {
'total_issued': message['quantity'],
'total_issued_normalized': util_bitcoin.normalize_quantity(message['quantity'], message['divisible'])
},
"$push": {'_history': tracked_asset} }, upsert=False)
logging.info("Adding additional %s quantity for asset %s" % (
util_bitcoin.normalize_quantity(message['quantity'], message['divisible']), message['asset']))
return True
def inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, new_status='error', errors=[]):
asset['fetch_info_retry'] += 1
asset['errors'] = errors
if asset['fetch_info_retry'] == max_retry:
asset['info_status'] = new_status
db.asset_extended_info.save(asset)
def sanitize_json_data(data):
data['asset'] = util.sanitize_eliteness(data['asset'])
if 'description' in data: data['description'] = util.sanitize_eliteness(data['description'])
if 'website' in data: data['website'] = util.sanitize_eliteness(data['website'])
if 'pgpsig' in data: data['pgpsig'] = util.sanitize_eliteness(data['pgpsig'])
return data
def process_asset_info(db, asset, info_data):
# sanity check
assert asset['info_status'] == 'needfetch'
assert 'info_url' in asset
assert util.is_valid_url(asset['info_url'], allow_no_protocol=True) #already validated in the fetch
errors = util.is_valid_json(info_data, config.ASSET_SCHEMA)
if not isinstance(info_data, dict) or 'asset' not in info_data:
errors.append('Invalid data format')
elif asset['asset'] != info_data['asset']:
errors.append('asset field does not match asset name')
if len(errors) > 0:
inc_fetch_retry(db, asset, new_status='invalid', errors=errors)
return (False, errors)
asset['info_status'] = 'valid'
#fetch any associated images...
#TODO: parallelize this 2nd level asset image fetching ... (e.g. just compose a list here, and process it in later on)
if 'image' in info_data:
info_data['valid_image'] = util.fetch_image(info_data['image'],
config.SUBDIR_ASSET_IMAGES, asset['asset'], fetch_timeout=5)
asset['info_data'] = sanitize_json_data(info_data)
db.asset_extended_info.save(asset)
return (True, None)
def fetch_all_asset_info(db):
assets = list(db.asset_extended_info.find({'info_status': 'needfetch'}))
asset_info_urls = []
def asset_fetch_complete_hook(urls_data):
logging.info("Enhanced asset info fetching complete. %s unique URLs fetched. Processing..." % len(urls_data))
for asset in assets:
logging.debug("Looking at asset %s: %s" % (asset, asset['info_url']))
if asset['info_url']:
info_url = ('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url']
assert info_url in urls_data
if not urls_data[info_url][0]: #request was not successful
inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, errors=[urls_data[info_url][1]])
logging.warn("Fetch for asset at %s not successful: %s (try %i of %i)" % (
info_url, urls_data[info_url][1], asset['fetch_info_retry'], ASSET_MAX_RETRY))
else:
result = process_asset_info(db, asset, urls_data[info_url][1])
if not result[0]:
logging.info("Processing for asset %s at %s not successful: %s" % (asset['asset'], info_url, result[1]))
else:
logging.info("Processing for asset %s at %s successful" % (asset['asset'], info_url))
#compose and fetch all info URLs in all assets with them
for asset in assets:
if not asset['info_url']: continue
if asset.get('disabled', False):
logging.info("ExtendedAssetInfo: Skipping disabled asset %s" % asset['asset'])
continue
#may or may not end with .json. may or may not start with http:// or https://
asset_info_urls.append(('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url'])
asset_info_urls_str = ', '.join(asset_info_urls)
asset_info_urls_str = (asset_info_urls_str[:2000] + ' ...') if len(asset_info_urls_str) > 2000 else asset_info_urls_str #truncate if necessary
if len(asset_info_urls):
logging.info('Fetching enhanced asset info for %i assets: %s' % (len(asset_info_urls), asset_info_urls_str))
util.stream_fetch(asset_info_urls, asset_fetch_complete_hook,
fetch_timeout=10, max_fetch_size=4*1024, urls_group_size=20, urls_group_time_spacing=20,
per_request_complete_callback=lambda url, data: logging.debug("Asset info URL %s retrieved, result: %s" % (url, data)))
def get_escrowed_balances(addresses):
addresses_holder = ','.join(['?' for e in range(0,len(addresses))])
sql ='''SELECT (source || '_' || give_asset) AS source_asset, source AS address, give_asset AS asset, SUM(give_remaining) AS quantity
FROM orders
WHERE source IN ({}) AND status = ? AND give_asset != ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['open', 'BTC']
results = util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx0_address || '_' || forward_asset) AS source_asset, tx0_address AS address, forward_asset AS asset, SUM(forward_quantity) AS quantity
FROM order_matches
WHERE tx0_address IN ({}) AND forward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['BTC', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx1_address || '_' || backward_asset) AS source_asset, tx1_address AS address, backward_asset AS asset, SUM(backward_quantity) AS quantity
FROM order_matches
WHERE tx1_address IN ({}) AND backward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['BTC', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager_remaining) AS quantity
FROM bets
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XCP, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(forward_quantity) AS quantity
FROM bet_matches
WHERE tx0_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XCP, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(backward_quantity) AS quantity
FROM bet_matches
WHERE tx1_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XCP, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XCP, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx0_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XCP, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx1_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XCP, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
escrowed_balances = {}
for order in results:
if order['address'] not in escrowed_balances:
escrowed_balances[order['address']] = {}
if order['asset'] not in escrowed_balances[order['address']]:
escrowed_balances[order['address']][order['asset']] = 0
escrowed_balances[order['address']][order['asset']] += order['quantity']
return escrowed_balances
| ClearingHouse/clearblockd | lib/components/assets.py | Python | mit | 14,545 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pytest
@pytest.fixture
def fda_application(conn, organization):
fda_application = {
'id': 'ANDA018659',
'organisation_id': organization,
'drug_name': 'ALLOPURINOL',
'active_ingredients': 'ALLOPURINOL',
}
fda_application_id = conn['database']['fda_applications'].insert(fda_application)
return fda_application_id
| arthurSena/processors | tests/fixtures/api/fda_applications.py | Python | mit | 543 |
from redispy import command
import unittest
class TestCommand(unittest.TestCase):
def test_auth(self):
c = command.ConnectionAuth()
assert c.get_id() == 'AUTH'
if __name__ == '__main__':
unittest.main()
| whiteclover/redis-py | t/commandtest.py | Python | mit | 236 |